content
stringlengths
0
1.55M
<import_from_stmt>lndmanage.lib.network_info NetworkAnalysis<import_from_stmt>lndmanage.lib.node LndNode<import_from_stmt>lndmanage settings<import_stmt>logging.config<line_sep>logging.config.dictConfig(settings.logger_config)<line_sep>logger=logging.getLogger(__name__)<if_stmt>__name__<eq>'__main__'<block_start>node=LndNode()<line_sep>network_analysis=NetworkAnalysis(node)<line_sep>network_analysis.print_node_overview(node.pub_key)<line_sep>logger.info('-------- Nodes with highest capacity: --------')<for_stmt>n network_analysis.get_sorted_nodes_by_property()<block_start>logger.info(n)<block_end>logger.info('-------- Nodes with highest degree: --------')<for_stmt>n network_analysis.get_sorted_nodes_by_property(key='degree')<block_start>logger.info(n)<block_end>logger.info('-------- Nodes with highest capacity/channel: --------')<for_stmt>n network_analysis.get_sorted_nodes_by_property(key='capacity_per_channel' min_degree=10)<block_start>logger.info(n)<block_end>logger.info('-------- Nodes with lowest capacity/channel: --------')<for_stmt>n network_analysis.get_sorted_nodes_by_property(key='capacity_per_channel' min_degree=20 decrementing=<false>)<block_start>logger.info(n)<block_end>logger.info('-------- Nodes with most user nodes: --------')<for_stmt>n network_analysis.get_sorted_nodes_by_property(key='user_nodes' min_degree=20)<block_start>logger.info(n)<block_end>network_analysis.print_find_nodes_giving_most_secondary_hops(node.pub_key)<block_end>
<import_stmt>time<import_stmt>adafruit_dotstar<import_from_stmt>rainbowio colorwheel<import_stmt>board<import_stmt>touchio<line_sep>pixel=adafruit_dotstar.DotStar(board.APA102_SCK board.APA102_MOSI 1 brightness=.1)<line_sep>touch=touchio.TouchIn(board.D1)<line_sep>hue=0<while_stmt><true><block_start>hue=hue+touch.value<times>3<if_stmt>hue<g>255# Wrap back around to red <block_start>hue=hue-255<block_end>pixel[0]=colorwheel(hue)<line_sep>time.sleep(.05)<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_stmt>unittest<import_stmt>epitran<class_stmt>TestSorani(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.epi=epitran.Epitran(u'fas-Arab')<block_end><def_stmt>test_faarsi self<block_start>tr=self.epi.transliterate('فارسی')<line_sep>self.assertEqual(tr 'fɒrsj')<block_end><def_stmt>test_rowshan self<block_start>tr=self.epi.transliterate('روشن')<line_sep>self.assertEqual(tr 'rvʃn')<block_end><def_stmt>test_hamaye self<block_start>tr=self.epi.transliterate('همهٔ')<line_sep>self.assertEqual(tr 'hmhʔ')<block_end><def_stmt>test_aafraad self<block_start>tr=self.epi.transliterate('افراد')<line_sep>self.assertEqual(tr 'ɒfrɒd')<block_end><def_stmt>test_bashar self<block_start>tr=self.epi.transliterate('بشر')<line_sep>self.assertEqual(tr 'bʃr')<block_end><def_stmt>test_aazaad self<block_start>tr=self.epi.transliterate('آزاد')<line_sep>self.assertEqual(tr 'ɒzɒd')<block_end><def_stmt>test_donjaa self<block_start>tr=self.epi.transliterate('دنیا')<line_sep>self.assertEqual(tr 'dnjɒ')<block_end><def_stmt>test_miaayand self<block_start>tr=self.epi.transliterate('می‌آیند')<line_sep>self.assertEqual(tr 'mj‌ɒjnd')<block_end><def_stmt>test_heysiyaat self<block_start>tr=self.epi.transliterate('حیثیت')<line_sep>self.assertEqual(tr 'hjsjt')<block_end><block_end>
<import_from_stmt>.compat unittest<import_stmt>ucl<import_stmt>sys<class_stmt>DumpTest(unittest.TestCase)<block_start><def_stmt>test_no_args self<block_start><with_stmt>self.assertRaises(TypeError)<block_start>ucl.dump()<block_end><block_end><def_stmt>test_none self<block_start>self.assertEqual(ucl.dump(<none>) <none>)<block_end><def_stmt>test_null self<block_start>data={"a":<none>}<line_sep>valid="a = null;\n"<line_sep>self.assertEqual(ucl.dump(data) valid)<block_end><def_stmt>test_int self<block_start>data={"a":1}<line_sep>valid="a = 1;\n"<line_sep>self.assertEqual(ucl.dump(data) valid)<block_end><def_stmt>test_nested_int self<block_start>data={"a":{"b":1}}<line_sep>valid="a {\n b = 1;\n}\n"<line_sep>self.assertEqual(ucl.dump(data) valid)<block_end><def_stmt>test_int_array self<block_start>data={"a":[1 2 3 4]}<line_sep>valid="a [\n 1,\n 2,\n 3,\n 4,\n]\n"<line_sep>self.assertEqual(ucl.dump(data) valid)<block_end><def_stmt>test_str self<block_start>data={"a":"b"}<line_sep>valid="a = \"b\";\n"<line_sep>self.assertEqual(ucl.dump(data) valid)<block_end>@unittest.skipIf(sys.version_info[0]<g>2 "Python3 uses unicode only")<def_stmt>test_unicode self<block_start>data={unicode("a"):unicode("b")}<line_sep>valid=unicode("a = \"b\";\n")<line_sep>self.assertEqual(ucl.dump(data) valid)<block_end><def_stmt>test_float self<block_start>data={"a":1.1}<line_sep>valid="a = 1.100000;\n"<line_sep>self.assertEqual(ucl.dump(data) valid)<block_end><def_stmt>test_boolean self<block_start>data={"a":<true> "b":<false>}<line_sep>valid=["a = true;\nb = false;\n" "b = false;\na = true;\n"]<line_sep>self.assertIn(ucl.dump(data) valid)<block_end><def_stmt>test_empty_ucl self<block_start>self.assertEqual(ucl.dump({}) "")<block_end><def_stmt>test_json self<block_start>data={"a":1 "b":"bleh;"}<line_sep>valid=['{\n "a": 1,\n "b": "bleh;"\n}' '{\n "b": "bleh;",\n "a": 1\n}']<line_sep>self.assertIn(ucl.dump(data ucl.UCL_EMIT_JSON) valid)<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>re<import_from_stmt>hamcrest *<import_from_stmt>amplify.agent.objects.nginx.filters Filter<import_from_stmt>test.base BaseTestCase<line_sep>__author__="<NAME>"<line_sep>__copyright__="Copyright (C) Nginx, Inc. All rights reserved."<line_sep>__license__=""<line_sep>__maintainer__="<NAME>"<line_sep>__email__="<EMAIL>"<class_stmt>FiltersTestCase(BaseTestCase)<block_start><def_stmt>test_init self<block_start>filtr=Filter(filter_rule_id='1' metric='http.something' data=[['logname' '~' 'foo.txt'] ['$request_method' '~' 'post'] ['$request_uri' '~' '.*\.gif'] ['$status' '!~' '200']])<line_sep>assert_that(filtr.filter_rule_id equal_to('1'))<line_sep>assert_that(filtr.metric equal_to('http.something'))<line_sep>assert_that(filtr.filename equal_to('foo.txt'))<line_sep>assert_that('logname' not_(is_in(filtr.data)))<line_sep>assert_that(filtr.data['request_method'] equal_to(re.compile("POST")))<line_sep>assert_that(filtr.data['request_uri'] equal_to(re.compile(".*\.gif")))<line_sep>assert_that(filtr.data['status'] equal_to(re.compile("200")))<line_sep>assert_that('logname' not_(is_in(filtr._negated_conditions)))<line_sep>assert_that(filtr._negated_conditions['request_method'] equal_to(<false>))<line_sep>assert_that(filtr._negated_conditions['request_uri'] equal_to(<false>))<line_sep>assert_that(filtr._negated_conditions['status'] equal_to(<true>))<block_end><def_stmt>test_init_without_filename self<block_start>filtr=Filter(filter_rule_id='1' metric='http.something' data=[['$request_method' '~' 'post'] ['$request_uri' '~' '*.gif']])<line_sep>assert_that(filtr.filename equal_to(<none>))<block_end><def_stmt>test_empty self<block_start>filtr=Filter(filter_rule_id='1' metric='http.something' data=[])<line_sep>assert_that(filtr.empty equal_to(<true>))<block_end><def_stmt>test_filematch self<block_start>filtr=Filter(filter_rule_id='1' metric='http.something' data=[['logname' '~' 'foo.txt']])<line_sep>assert_that(filtr.matchfile('foo.txt') equal_to(<true>))<line_sep>assert_that(filtr.matchfile('foo.log') equal_to(<false>))<line_sep>filtr=Filter(filter_rule_id='1' metric='http.something' data=[['logname' '!~' 'foo.txt']])<line_sep>assert_that(filtr.matchfile('foo.txt') equal_to(<false>))<line_sep>assert_that(filtr.matchfile('foo.log') equal_to(<true>))<line_sep>filtr=Filter(filter_rule_id='1' metric='http.something' data=[['$request_method' '~' 'post'] ['$request_uri' '~' '.*\.gif'] ['$status' '!~' '200']])<line_sep>assert_that(filtr.matchfile('foo.txt') equal_to(<true>))<line_sep>assert_that(filtr.matchfile('foo.log') equal_to(<true>))<block_end><block_end>
# SPDX-FileCopyrightText: 2014-2020 <NAME> # # SPDX-License-Identifier: MIT <import_from_future_stmt> division print_function<import_stmt>pytest<import_stmt>pickle<import_from_stmt>collections OrderedDict<import_stmt>numpy<as>np<import_from_stmt>symfit Variable Parameter Fit FitResults Eq Ge CallableNumericalModel Model <import_from_stmt>symfit.distributions BivariateGaussian<import_from_stmt>symfit.core.minimizers BaseMinimizer MINPACK BFGS NelderMead ChainedMinimizer BasinHopping <import_from_stmt>symfit.core.objectives LogLikelihood LeastSquares VectorLeastSquares MinimizeModel <def_stmt>ge_constraint a# Has to be in the global namespace for pickle. <block_start><return>a-1<block_end><class_stmt>TestTestResult()<block_start>@classmethod<def_stmt>setup_class cls<block_start>xdata=np.linspace(1 10 10)<line_sep>ydata=3<times>xdata<power>2<line_sep>cls.a=Parameter('a')<line_sep>cls.b=Parameter('b')<line_sep>x=Variable('x')<line_sep>y=Variable('y')<line_sep>model=Model({y:cls.a<times>x<power>cls.b})<line_sep>fit=Fit(model x=xdata y=ydata)<line_sep>cls.fit_result=fit.execute()<line_sep>fit=Fit(model x=xdata y=ydata minimizer=MINPACK)<line_sep>cls.minpack_result=fit.execute()<line_sep>fit=Fit(model x=xdata objective=LogLikelihood)<line_sep>cls.likelihood_result=fit.execute()<line_sep>fit=Fit(model x=xdata y=ydata minimizer=[BFGS NelderMead])<line_sep>cls.chained_result=fit.execute()<line_sep>z=Variable('z')<line_sep>constraints=[Eq(cls.a cls.b) CallableNumericalModel.as_constraint({z:ge_constraint} connectivity_mapping={z:{cls.a}} constraint_type=Ge model=model)]<line_sep>fit=Fit(model x=xdata y=ydata constraints=constraints)<line_sep>cls.constrained_result=fit.execute()<line_sep>fit=Fit(model x=xdata y=ydata constraints=constraints minimizer=BasinHopping)<line_sep>cls.constrained_basinhopping_result=fit.execute()<block_end><def_stmt>test_params_type self<block_start><assert_stmt>isinstance(self.fit_result.params OrderedDict)<block_end><def_stmt>test_minimizer_output_type self<block_start><assert_stmt>isinstance(self.fit_result.minimizer_output dict)<assert_stmt>isinstance(self.minpack_result.minimizer_output dict)<assert_stmt>isinstance(self.likelihood_result.minimizer_output dict)<block_end><def_stmt>test_fitting self<block_start>""" Test if the fitting worked in the first place. """<assert_stmt>isinstance(self.fit_result FitResults)<assert_stmt>self.fit_result.value(self.a)<eq>pytest.approx(3.0)<assert_stmt>self.fit_result.value(self.b)<eq>pytest.approx(2.0)<assert_stmt>isinstance(self.fit_result.stdev(self.a) float)<assert_stmt>isinstance(self.fit_result.stdev(self.b) float)<assert_stmt>isinstance(self.fit_result.r_squared float)<line_sep># by definition since there's no fuzzyness <assert_stmt>self.fit_result.r_squared<eq>1.0<block_end><def_stmt>test_fitting_2 self<block_start>np.random.seed(43)<line_sep>mean=(0.62 0.71)# x, y mean 0.7, 0.7 cov=[[0.102<power>2 0] [0 0.07<power>2]]<line_sep>data_1=np.random.multivariate_normal(mean cov 10<power>5)<line_sep>mean=(0.33 0.28)# x, y mean 0.3, 0.3 cov=[# rho = 0.25 [0.05<power>2 0.25<times>0.05<times>0.101] [0.25<times>0.05<times>0.101 0.101<power>2]]<line_sep>data_2=np.random.multivariate_normal(mean cov 10<power>5)<line_sep>data=np.vstack((data_1 data_2))<line_sep># Insert them as y,x here as np fucks up cartesian conventions. ydata,xedges,yedges=np.histogram2d(data[: 1] data[: 0] bins=200 range=[[0.0 1.0] [0.0 1.0]] density=<true>)<line_sep>xcentres=(xedges[:-1]+xedges[1:])/2<line_sep>ycentres=(yedges[:-1]+yedges[1:])/2<line_sep># Make a valid grid to match ydata xx,yy=np.meshgrid(xcentres ycentres sparse=<false>)<line_sep>x=Variable('x')<line_sep>y=Variable('y')<line_sep>x0_1=Parameter('x0_1' value=0.6 min=0.5 max=0.7)<line_sep>sig_x_1=Parameter('sig_x_1' value=0.1 min=0.0 max=0.2)<line_sep>y0_1=Parameter('y0_1' value=0.7 min=0.6 max=0.8)<line_sep>sig_y_1=Parameter('sig_y_1' value=0.05 min=0.0 max=0.2)<line_sep>rho_1=Parameter('rho_1' value=0.0 min=-0.5 max=0.5)<line_sep>A_1=Parameter('A_1' value=0.5 min=0.3 max=0.7)<line_sep>g_1=A_1<times>BivariateGaussian(x=x y=y mu_x=x0_1 mu_y=y0_1 sig_x=sig_x_1 sig_y=sig_y_1 rho=rho_1)<line_sep>x0_2=Parameter('x0_2' value=0.3 min=0.2 max=0.4)<line_sep>sig_x_2=Parameter('sig_x_2' value=0.05 min=0.0 max=0.2)<line_sep>y0_2=Parameter('y0_2' value=0.3 min=0.2 max=0.4)<line_sep>sig_y_2=Parameter('sig_y_2' value=0.1 min=0.0 max=0.2)<line_sep>rho_2=Parameter('rho_2' value=0.26 min=0.0 max=0.8)<line_sep>A_2=Parameter('A_2' value=0.5 min=0.3 max=0.7)<line_sep>g_2=A_2<times>BivariateGaussian(x=x y=y mu_x=x0_2 mu_y=y0_2 sig_x=sig_x_2 sig_y=sig_y_2 rho=rho_2)<line_sep>model=g_1+g_2<line_sep>fit=Fit(model xx yy ydata)<line_sep>fit_result=fit.execute()<assert_stmt>fit_result.r_squared<g>0.95<for_stmt>param fit.model.params<block_start><try_stmt><block_start><assert_stmt>fit_result.stdev(param)<power>2<eq>pytest.approx(fit_result.variance(param))<block_end><except_stmt>AssertionError<block_start><assert_stmt>fit_result.variance(param)<le>0.0<assert_stmt>np.isnan(fit_result.stdev(param))<block_end><block_end># Covariance matrix should be symmetric <for_stmt>param_1 fit.model.params<block_start><for_stmt>param_2 fit.model.params<block_start><assert_stmt>fit_result.covariance(param_1 param_2)<eq>pytest.approx(fit_result.covariance(param_2 param_1) rel=1e-3)<block_end><block_end><block_end><def_stmt>test_minimizer_included self<block_start>""""The minimizer used should be included in the results."""<assert_stmt>isinstance(self.constrained_result.minimizer BaseMinimizer)<assert_stmt>isinstance(self.constrained_basinhopping_result.minimizer BaseMinimizer)<assert_stmt>isinstance(self.likelihood_result.minimizer BaseMinimizer)<assert_stmt>isinstance(self.fit_result.minimizer BaseMinimizer)<assert_stmt>isinstance(self.chained_result.minimizer ChainedMinimizer)<for_stmt>minimizer,cls zip(self.chained_result.minimizer.minimizers [BFGS NelderMead])<block_start><assert_stmt>isinstance(minimizer cls)<block_end><block_end><def_stmt>test_objective_included self<block_start>""""The objective used should be included in the results."""<assert_stmt>isinstance(self.fit_result.objective LeastSquares)<assert_stmt>isinstance(self.minpack_result.objective VectorLeastSquares)<assert_stmt>isinstance(self.likelihood_result.objective LogLikelihood)<assert_stmt>isinstance(self.constrained_result.objective LeastSquares)<assert_stmt>isinstance(self.constrained_basinhopping_result.objective LeastSquares)<block_end><def_stmt>test_constraints_included self<block_start>""" Test if the constraints have been properly fed to the results object so we can easily print their compliance. """<line_sep># For a constrained fit we expect a list of MinimizeModel objectives. <for_stmt>constrained_result [self.constrained_result self.constrained_basinhopping_result]<block_start><assert_stmt>isinstance(constrained_result.constraints list)<for_stmt>constraint self.constrained_result.constraints<block_start><assert_stmt>isinstance(constraint MinimizeModel)<block_end><block_end><block_end><def_stmt>test_message_included self<block_start>"""Status message should be included."""<assert_stmt>isinstance(self.fit_result.status_message str)<assert_stmt>isinstance(self.minpack_result.status_message str)<assert_stmt>isinstance(self.likelihood_result.status_message str)<assert_stmt>isinstance(self.constrained_result.status_message str)<assert_stmt>isinstance(self.constrained_basinhopping_result.status_message str)<block_end><def_stmt>test_pickle self<block_start><for_stmt>fit_result [self.fit_result self.chained_result self.constrained_basinhopping_result self.constrained_result self.likelihood_result]<block_start>dumped=pickle.dumps(fit_result)<line_sep>new_result=pickle.loads(dumped)<assert_stmt>sorted(fit_result.__dict__.keys())<eq>sorted(new_result.__dict__.keys())<for_stmt>k,v1 fit_result.__dict__.items()<block_start>v2=new_result.__dict__[k]<if_stmt>k<eq>'minimizer'<block_start><assert_stmt>type(v1)<eq>type(v2)<block_end><elif_stmt>k<ne>'minimizer_output'# Ignore minimizer_output <block_start><if_stmt>isinstance(v1 np.ndarray)<block_start><assert_stmt>v1<eq>pytest.approx(v2 nan_ok=<true>)<block_end><block_end><block_end><block_end><block_end><def_stmt>test_gof_presence self<block_start>""" Test if the expected goodness of fit estimators are present. """<assert_stmt>hasattr(self.fit_result 'objective_value')<assert_stmt>hasattr(self.fit_result 'r_squared')<assert_stmt>hasattr(self.fit_result 'chi_squared')<assert_stmt><not>hasattr(self.fit_result 'log_likelihood')<assert_stmt><not>hasattr(self.fit_result 'likelihood')<assert_stmt>hasattr(self.minpack_result 'objective_value')<assert_stmt>hasattr(self.minpack_result 'r_squared')<assert_stmt>hasattr(self.minpack_result 'chi_squared')<assert_stmt><not>hasattr(self.minpack_result 'log_likelihood')<assert_stmt><not>hasattr(self.minpack_result 'likelihood')<assert_stmt>hasattr(self.likelihood_result 'objective_value')<assert_stmt><not>hasattr(self.likelihood_result 'r_squared')<assert_stmt><not>hasattr(self.likelihood_result 'chi_squared')<assert_stmt>hasattr(self.likelihood_result 'log_likelihood')<assert_stmt>hasattr(self.likelihood_result 'likelihood')<block_end><block_end>
<import_from_stmt>unittest TestCase<import_from_stmt>pact.http_proxy app<import_from_stmt>fastapi.testclient TestClient<line_sep>client=TestClient(app)<class_stmt>HttpProxyTestCase(TestCase)<block_start><def_stmt>test_ping self<block_start>res=client.get('/ping')<line_sep>self.assertEqual(res.status_code 200)<assert_stmt>res.json()<eq>{"ping":"pong"}<block_end><def_stmt>test_handle_http_error self<block_start>res=client.get('/something_does_not_exist')<line_sep>self.assertEqual(res.status_code 404)<line_sep>json_res=res.json()<line_sep>json_res['code']=404<line_sep>json_res['name']='Not Found'<block_end><def_stmt>test_setup self<block_start>payload={'anyPayload':'really'}<line_sep>res=client.post('/setup' json=payload)<line_sep>self.assertEqual(res.status_code 201)<line_sep>json_res=res.json()<assert_stmt>json_res<eq>payload<block_end><def_stmt>setup_state self payload<block_start>setup_res=client.post('/setup' json=payload)<line_sep>self.assertEqual(setup_res.status_code 201)<block_end><def_stmt>test_home_should_return_expected_response self<block_start>message={'event':'ObjectCreated:Put' 'bucket':'bucket_name' 'key':'path_to_file_in_s3.pdf' 'documentType':'application/pdf'}<line_sep>data={'messageHandlers':{'A document created successfully':message}}<line_sep>self.setup_state(data)<line_sep>payload={'providerStates':[{'name':'A document created successfully'}]}<line_sep>res=client.post('/' json=payload)<line_sep>self.assertEqual(res.json() {'contents':message})<block_end><def_stmt>test_home_raise_runtime_error_if_no_matched self<block_start>data={'messageHandlers':{'A document created successfully':{'event':'ObjectCreated:Put'}}}<line_sep>self.setup_state(data)<line_sep>payload={'providerStates':[{'name':'New state to raise RuntimeError'}]}<line_sep>res=client.post('/' json=payload)<line_sep>self.assertEqual(res.status_code 500)<assert_stmt>res.json()<eq>{'detail':'No matched handler.'}<block_end><block_end>
# Adapted from https://github.com/krrish94/nerf-pytorch <import_stmt>torch<import_from_stmt>einops repeat<def_stmt>meshgrid_xy tensor1:torch.Tensor tensor2:torch.Tensor<arrow>(torch.Tensor torch.Tensor)<block_start>"""Mimick np.meshgrid(..., indexing="xy") in pytorch. torch.meshgrid only allows "ij" indexing. (If you're unsure what this means, safely skip trying to understand this, and run a tiny example!) Args: tensor1 (torch.Tensor): Tensor whose elements define the first dimension of the returned meshgrid. tensor2 (torch.Tensor): Tensor whose elements define the second dimension of the returned meshgrid. """<line_sep># TESTED ii,jj=torch.meshgrid(tensor1 tensor2)<line_sep><return>ii.transpose(-1 -2) jj.transpose(-1 -2)<block_end><def_stmt>cumprod_exclusive tensor:torch.Tensor<arrow>torch.Tensor<block_start>r"""Mimick functionality of tf.math.cumprod(..., exclusive=True), as it isn't available in PyTorch. Args: tensor (torch.Tensor): Tensor whose cumprod (cumulative product, see `torch.cumprod`) along dim=-1 is to be computed. Returns: cumprod (torch.Tensor): cumprod of Tensor along dim=-1, mimiciking the functionality of tf.math.cumprod(..., exclusive=True) (see `tf.math.cumprod` for details). """<line_sep># TESTED # Only works for the last dimension (dim=-1) dim=-1<line_sep># Compute regular cumprod first (this is equivalent to `tf.math.cumprod(..., exclusive=False)`). cumprod=torch.cumprod(tensor dim)<line_sep># "Roll" the elements along dimension 'dim' by 1 element. cumprod=torch.roll(cumprod 1 dim)<line_sep># Replace the first element by "1" as this is what tf.cumprod(..., exclusive=True) does. cumprod[<ellipsis> 0]=1.0<line_sep><return>cumprod<block_end><def_stmt>get_ray_bundle_batch height:int width:int focal_length tform_cam2world:torch.Tensor<block_start>r"""Compute the bundle of rays passing through all pixels of a batch of image (one ray per pixel). Args: height (int): Height of an image (number of pixels). width (int): Width of an image (number of pixels). focal_length (float or torch.Tensor): Focal length (number of pixels, i.e., calibrated intrinsics). tform_cam2world (torch.Tensor): A 6-DoF rigid-body transform (shape: :math:`(B, 4, 4)`) that transforms a 3D point from the camera frame to the "world" frame for the current example. Returns: ray_origins (torch.Tensor): A tensor of shape :math:`(B, width, height, 3)` denoting the centers of each ray. `ray_origins[B][i][j]` denotes the origin of the ray passing through pixel at batch index `B`, row index `j`, and column index `i`. ray_directions (torch.Tensor): A tensor of shape :math:`(B, width, height, 3)` denoting the direction of each ray (a unit vector). `ray_directions[B][i][j]` denotes the direction of the ray passing through the pixel at batch index `B`, row index `j`, and column index `i`. """<line_sep>x=torch.arange(width dtype=tform_cam2world.dtype device=tform_cam2world.device).to(tform_cam2world)<line_sep>y=torch.arange(height dtype=tform_cam2world.dtype device=tform_cam2world.device)<line_sep>ii,jj=meshgrid_xy(x y)<if_stmt>type(focal_length)<in>[tuple list]# if given two values, assume they are fx and fy <block_start>fx,fy=focal_length<block_end><else_stmt># otherwise assume fx and fy share the same magnitude, but opposing polarity <block_start>fx,fy=focal_length -focal_length<block_end># construct unit direction vectors # shape [height, width, 3] directions=torch.stack([(ii-width<times>0.5)/fx (jj-height<times>0.5)/fy -torch.ones_like(ii)] dim=-1)<line_sep>B=tform_cam2world.shape[0]<line_sep># shape [B x height x width, 1, 3] directions=directions.view(1 -1 1 3).repeat(B 1 1 1).view(-1 1 3)<line_sep># shape [B x height x width, 4, 4] tform_cam2world=tform_cam2world.unsqueeze(1).repeat(1 height<times>width 1 1).view(-1 4 4)<line_sep>ray_directions=torch.sum(directions<times>tform_cam2world[: :3 :3] dim=-1).view(B height width 3)<line_sep>ray_origins=tform_cam2world[: :3 -1].view(B height width 3)<line_sep><return>ray_origins ray_directions<block_end><def_stmt>get_sample_points tform_cam2world F H W samples_per_ray=32 near=0 far=1 use_viewdirs=<true> perturb=<false> mask=<none><block_start>B=tform_cam2world.shape[0]<line_sep>ray_origins,ray_directions=get_ray_bundle_batch(H W F tform_cam2world)# [B, H, W, 3] ro=ray_origins.view((B -1 3))<line_sep>rd=ray_directions.view((B -1 3))<if_stmt>mask<is><not><none><block_start><if_stmt>len(mask.shape)<eq>1# same mask for each image in batch, mask is shape [n_patch_pixels] <block_start>ro=ro[: mask :]<line_sep>rd=rd[: mask :]<block_end><elif_stmt>len(mask.shape)<eq>2# different mask for each image in batch, mask is shape [B, n_patch_pixels] <block_start>mask=repeat(mask 'b n_patch_pixels -> b n_patch_pixels 3')<line_sep># ro is shape [B, n_pixels, 3], gather along pixel dimension ro=torch.gather(ro dim=1 index=mask)<line_sep>rd=torch.gather(rd dim=1 index=mask)<block_end><block_end>near=near<times>torch.ones_like(rd[<ellipsis> :1])<line_sep>far=far<times>torch.ones_like(rd[<ellipsis> :1])<line_sep>num_rays=ro.shape[1]<line_sep>t_vals=torch.linspace(0.0 1.0 samples_per_ray dtype=ro.dtype device=ro.device)<line_sep>z_vals=near<times>(1.0-t_vals)+far<times>t_vals<if_stmt>perturb# Get intervals between samples. <block_start>mids=0.5<times>(z_vals[<ellipsis> 1:]+z_vals[<ellipsis> :-1])<line_sep>upper=torch.cat((mids z_vals[<ellipsis> -1:]) dim=-1)<line_sep>lower=torch.cat((z_vals[<ellipsis> :1] mids) dim=-1)<line_sep># Stratified samples in those intervals. t_rand=torch.rand(z_vals.shape dtype=ro.dtype device=ro.device)<line_sep>z_vals=lower+(upper-lower)<times>t_rand<block_end># pts -> (B, H*W, N_samples, 3) # pts are in world coordinates pts=ro[<ellipsis> <none> :]+rd[<ellipsis> <none> :]<times>z_vals[<ellipsis> : <none>]<if_stmt>use_viewdirs<block_start>viewdirs=rd<line_sep>viewdirs=viewdirs/viewdirs.norm(p=2 dim=-1).unsqueeze(-1)<line_sep>viewdirs=viewdirs.view((B -1 1 3))<line_sep># input_dirs -> (B, H*W, N_samples, 3) viewdirs=viewdirs.expand(pts.shape)<block_end><else_stmt><block_start>viewdirs=<none><block_end><return>pts viewdirs z_vals rd ro<block_end><def_stmt>volume_render_radiance_field rgb occupancy depth_values ray_directions radiance_field_noise_std=0.0 alpha_activation='relu' activate_rgb=<true> density_bias=0 <block_start>one_e_10=torch.tensor([1e10] dtype=ray_directions.dtype device=ray_directions.device)<line_sep>dists=torch.cat((depth_values[<ellipsis> 1:]-depth_values[<ellipsis> :-1] one_e_10.expand(depth_values[<ellipsis> :1].shape) ) dim=-1 )<line_sep>dists=dists<times>ray_directions[<ellipsis> <none> :].norm(p=2 dim=-1)<line_sep>noise=0.0<if_stmt>radiance_field_noise_std<g>0.0<block_start>noise=(torch.randn(occupancy.shape dtype=occupancy.dtype device=occupancy.device )<times>radiance_field_noise_std)<block_end><if_stmt>alpha_activation<eq>'relu'<block_start>sigma_a=torch.nn.functional.relu(occupancy+noise)<block_end><elif_stmt>alpha_activation<eq>'softplus'# Deformable NeRF uses softplus instead of ReLU https://arxiv.org/pdf/2011.12948.pdf <block_start>sigma_a=torch.nn.functional.softplus(occupancy+noise+density_bias)<block_end>alpha=1.0-torch.exp(-sigma_a<times>dists)<line_sep>weights=alpha<times>cumprod_exclusive(1.0-alpha+1e-10)<if_stmt>activate_rgb<block_start>rgb=torch.sigmoid(rgb)<line_sep># widened sigmoid from https://github.com/google/mipnerf/blob/main/internal/models.py#L123 rgb_padding=0.001<line_sep>rgb=rgb<times>(1+2<times>rgb_padding)-rgb_padding<block_end>rgb_map=weights[<ellipsis> <none>]<times>rgb<line_sep>rgb_map=rgb_map.sum(dim=-2)<line_sep>depth_map=weights<times>depth_values<line_sep>depth_map=depth_map.sum(dim=-1)<line_sep>acc_map=weights.sum(dim=-1)<line_sep>disp_map=1.0/torch.max(1e-10<times>torch.ones_like(depth_map) depth_map/acc_map)<line_sep># occupancy prior from Neural Volumes # https://github.com/facebookresearch/neuralvolumes/blob/master/models/neurvol1.py#L130 occupancy_prior=torch.mean(torch.log(0.1+alpha.view(alpha.size(0) -1))+torch.log(0.1+1.0-alpha.view(alpha.size(0) -1))--2.20727)<line_sep><return>rgb_map disp_map acc_map weights depth_map occupancy_prior<block_end><def_stmt>sample_pdf_2 bins weights num_samples det=<false><block_start>"""sample_pdf function from another concurrent pytorch implementation by yenchenlin (https://github.com/yenchenlin/nerf-pytorch). """<line_sep>weights=weights+1e-5<line_sep>pdf=weights/torch.sum(weights dim=-1 keepdim=<true>)<line_sep>cdf=torch.cumsum(pdf dim=-1)<line_sep>cdf=torch.cat([torch.zeros_like(cdf[<ellipsis> :1]) cdf] dim=-1)# (batchsize, len(bins)) # Take uniform samples <if_stmt>det<block_start>u=torch.linspace(0.0 1.0 steps=num_samples dtype=weights.dtype device=weights.device)<line_sep>u=u.expand(list(cdf.shape[:-1])+[num_samples])<block_end><else_stmt><block_start>u=torch.rand(list(cdf.shape[:-1])+[num_samples] dtype=weights.dtype device=weights.device )<block_end># Invert CDF u=u.contiguous()<line_sep>cdf=cdf.contiguous()<line_sep>inds=torch.searchsorted(cdf u right=<true>)<line_sep>below=torch.max(torch.zeros_like(inds-1) inds-1)<line_sep>above=torch.min((cdf.shape[-1]-1)<times>torch.ones_like(inds) inds)<line_sep>inds_g=torch.stack((below above) dim=-1)# (batchsize, num_samples, 2) matched_shape=(inds_g.shape[0] inds_g.shape[1] cdf.shape[-1])<line_sep>cdf_g=torch.gather(cdf.unsqueeze(1).expand(matched_shape) 2 inds_g)<line_sep>bins_g=torch.gather(bins.unsqueeze(1).expand(matched_shape) 2 inds_g)<line_sep>denom=cdf_g[<ellipsis> 1]-cdf_g[<ellipsis> 0]<line_sep>denom=torch.where(denom<l>1e-5 torch.ones_like(denom) denom)<line_sep>t=(u-cdf_g[<ellipsis> 0])/denom<line_sep>samples=bins_g[<ellipsis> 0]+t<times>(bins_g[<ellipsis> 1]-bins_g[<ellipsis> 0])<line_sep><return>samples<block_end>
<import_stmt>sys<import_stmt>logging<import_stmt>os<import_stmt>time<import_stmt>calendar<line_sep>LOGGER=logging.getLogger(__name__)<line_sep>logging.basicConfig(stream=sys.stdout level=logging.DEBUG)<def_stmt>main <block_start>home_dir=os.path.expanduser('~')<line_sep>check_that_cache_dir_is_removed(home_dir)<line_sep>check_that_global_tmp_dir_is_empty()<line_sep>check_vim_info_does_not_exists(home_dir)<line_sep>check_bash_history(home_dir)<line_sep>check_if_any_files_in_subfolder_with_mask_was_last_modified_before_the_boottime(home_dir "history" recursive=<false>)<line_sep>check_if_any_files_in_subfolder_with_mask_was_last_modified_before_the_boottime("/var/lib/cloud/instances/")<line_sep><return>0<block_end><def_stmt>check_that_cache_dir_is_removed home_dir<block_start>cache_dir_path=os.path.join(home_dir ".cache")<if_stmt>os.path.exists(cache_dir_path)<block_start>content_of_cache_dir=[f<for>f os.listdir(cache_dir_path)]<line_sep>LOGGER.info("Contents of cache directory: %s" content_of_cache_dir)<if_stmt>len(content_of_cache_dir)<g>1<block_start><raise>ValueError("cache dir includes more than 1 file (not only motd)")<block_end><if_stmt><not>content_of_cache_dir[0].startswith("pip")<block_start><raise>ValueError("cache dir include file that it probably should not have: {}".format(content_of_cache_dir[0]))<block_end><block_end><block_end><def_stmt>check_that_global_tmp_dir_is_empty <block_start>global_tmp_dir_path="/tmp/"<line_sep>global_tmp_dir_content=[f<for>f os.listdir(global_tmp_dir_path)]<for_stmt>f global_tmp_dir_content<block_start><if_stmt><not>f.startswith(".")<and>"system"<not><in>f.lower()<and>"dkms"<not><in>f.lower()<and>"ccNPSUr9.s"<not><in>f<and>"hsperfdata"<not><in>f<block_start><raise>ValueError("/tmp folder includes file that probably should not be there: {}".format(f))<block_end><block_end><block_end><def_stmt>check_vim_info_does_not_exists home_dir<block_start>viminfo_path=os.path.join(home_dir ".viminfo")<if_stmt>os.path.exists(viminfo_path)<block_start><raise>ValueError("{} still exists".format(viminfo_path))<block_end><block_end><def_stmt>check_bash_history home_dir<block_start>bash_history_path=os.path.join(home_dir ".bash_history")<if_stmt>os.path.exists(bash_history_path)<block_start><with_stmt>open(bash_history_path "r")<as>bash_history_file<block_start><if_stmt>bash_history_file.read()<block_start><raise>ValueError("{} contains history".format(bash_history_path))<block_end><block_end><block_end><block_end><def_stmt>check_if_any_files_in_subfolder_with_mask_was_last_modified_before_the_boottime folder mask=<none> recursive=<true><block_start>uptime_seconds=0<if_stmt>recursive# Recursive travel and get all files under given folder <block_start>all_files=[os.path.join(dp f)<for>dp,dn,filenames os.walk(folder)<for>f filenames]<block_end><else_stmt><block_start>all_files=[f<for>f os.listdir(folder)<if>os.path.isfile(os.path.join(folder f))]<block_end># Get the bootime <with_stmt>open('/proc/uptime' 'r')<as>uptime_process<block_start>uptime_seconds=int(round(float(uptime_process.readline().split()[0])))<block_end>current_time_seconds=int(calendar.timegm(time.gmtime()))<line_sep>boot_time_seconds=current_time_seconds-uptime_seconds<line_sep># Filter the files need to be checked <if_stmt>mask<is><not><none><block_start>all_files=[f<for>f all_files<if>mask<in>f]<block_end><for_stmt>f all_files<block_start>last_modified_time_seconds=int(round(os.path.getmtime(f)))<if_stmt>last_modified_time_seconds<l>boot_time_seconds<block_start><raise>ValueError("Looks like {} was modified before the current boot".format(f))<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><try_stmt><block_start>sys.exit(main())<block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end><block_end>
# local <import_stmt>ivy<line_sep># global <import_from_stmt>typing Callable Type List Iterable Optional<import_from_stmt>types ModuleType<def_stmt>_wrap_function function_name:str<arrow>Callable<block_start>"""Wraps the function called `function_name`. Parameters ---------- function_name the name of the function e.g. "abs", "mean" etc. Returns ------- new_function the wrapped function. Examples -------- >>> ivy.set_backend("torch") >>> from ivy.array.wrapping import _wrap_function >>> absolute = _wrap_function("abs") >>> x = ivy.array([-1]) >>> print(absolute(x)) ivy.array([1]) """<def_stmt>new_function self *args **kwargs<block_start>"""Add the data of the current array from which the instance function is invoked as the first arg parameter or kwarg parameter. Return the new function with the name function_name and the new args variable or kwargs as the new inputs. """<line_sep>function=ivy.__dict__[function_name]<line_sep># gives us the position and name of the array argument data_idx=function.array_spec[0]<if_stmt>len(args)<g>data_idx[0][0]<block_start>args=ivy.copy_nest(args to_mutable=<true>)<line_sep>data_idx=[data_idx[0][0]]+[0<if>idx<is>int<else>idx<for>idx data_idx[1:]]<line_sep>ivy.insert_into_nest_at_index(args data_idx self._data)<block_end><else_stmt><block_start>kwargs=ivy.copy_nest(kwargs to_mutable=<true>)<line_sep>data_idx=[data_idx[0][1]]+[0<if>idx<is>int<else>idx<for>idx data_idx[1:]]<line_sep>ivy.insert_into_nest_at_index(kwargs data_idx self._data)<block_end><return>function(*args **kwargs)<block_end><return>new_function<block_end><def_stmt>add_ivy_array_instance_methods cls:Type[ivy.Array] modules:List[ModuleType] to_ignore:Optional[Iterable]=()<block_start>"""Loop over all ivy modules such as activations, general, etc. and add the module functions to ivy arrays as instance methods using _wrap_function. Parameters ---------- cls the class we want to add the instance methods to. modules the modules to loop over: activations, general etc. to_ignore any items we don't want to add an instance method for. Examples -------- As shown, `add_ivy_array_instance_methods` adds all the appropriate functions from the activations module as instance methods to our toy `ArrayExample` class: >>> from ivy.functional.ivy import activations >>> class ArrayExample: \ pass >>> ivy.add_ivy_array_instance_methods(ArrayExample, [activations]) >>> print(hasattr(ArrayExample, "relu"), hasattr(ArrayExample, "softmax")) True True """<for_stmt>module modules<block_start><for_stmt>key,value module.__dict__.items()# we skip the cases where the function is protected, the instance # method has already been added manually and a few other cases <block_start><if_stmt>(key.startswith("_")<or>key[0].isupper()<or><not>callable(value)<or>key<in>cls.__dict__<or>hasattr(cls key)<or>key<in>to_ignore<or>key<not><in>ivy.__dict__)<block_start><continue><block_end><try_stmt><block_start>setattr(cls key _wrap_function(key))<block_end><except_stmt>AttributeError<block_start><pass><block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>os<def_stmt>import_dir name fromlist=()<block_start>PACKAGE_EXT='.sublime-package'<line_sep>dirname=os.path.basename(os.path.dirname(os.path.realpath(__file__)))<if_stmt>dirname.endswith(PACKAGE_EXT)<block_start>dirname=dirname[:-len(PACKAGE_EXT)]<block_end><return>__import__('{0}.{1}'.format(dirname name) fromlist=fromlist)<block_end><try_stmt><block_start>imp=import_dir('hayaku_dict_driver' ('parse_dict_json' ))<line_sep>get_css_dict,merge_dict,merge_aliases=imp.get_css_dict imp.merge_dict imp.merge_aliases<block_end><except_stmt>ImportError<block_start><import_from_stmt>hayaku_dict_driver get_css_dict merge_dict merge_aliases<block_end>hayaku_extra_dicts_cache={}<line_sep>hayaku_extra_aliases_cache={}<line_sep>hayaku_dict_cache={}<line_sep>hayaku_aliases_cache={}<def_stmt>get_merged_dict options<block_start><global>hayaku_extra_dicts_cache<line_sep><global>hayaku_extra_aliases_cache<line_sep><global>hayaku_dict_cache<line_sep><global>hayaku_aliases_cache<line_sep>settings=options.get('settings')<line_sep>cache_key='CSS'<line_sep>preprocessor=options.get('CSS_preprocessor')<if_stmt>preprocessor<block_start>cache_key=preprocessor<block_end>result_dict,result_aliases=get_css_dict(preprocessor=preprocessor)<line_sep>new_dict={}<line_sep>new_aliases={}<line_sep>extra_scopes=['user' 'syntax' 'project']+settings.get('hayaku_extra_scopes' [])<for_stmt>scope extra_scopes<block_start>dict_name='hayaku_'+scope+'_dict'<line_sep>alias_name='hayaku_'+scope+'_aliases'<line_sep>new_dict[dict_name]=settings.get(dict_name {})<line_sep>new_aliases[alias_name]=settings.get(alias_name {})<line_sep># TODO: use a function for those two if-else noodles <if_stmt>'CSS'<in>new_dict[dict_name]<block_start><if_stmt>preprocessor<in>new_dict[dict_name]<block_start>new_dict[dict_name]=merge_dict(new_dict[dict_name].get('CSS') new_dict[dict_name].get(preprocessor))<block_end><else_stmt><block_start>new_dict[dict_name]=new_dict[dict_name].get('CSS')<block_end><block_end><elif_stmt>preprocessor<in>new_dict[dict_name]<block_start>new_dict[dict_name]=new_dict[dict_name].get(preprocessor)<block_end><if_stmt>'CSS'<in>new_aliases[alias_name]<block_start><if_stmt>preprocessor<in>new_aliases[alias_name]<block_start>new_aliases[alias_name]=merge_dict(new_aliases[alias_name].get('CSS') new_aliases[alias_name].get(preprocessor))<block_end><else_stmt><block_start>new_aliases[alias_name]=new_aliases[alias_name].get('CSS')<block_end><block_end><elif_stmt>preprocessor<in>new_aliases[alias_name]<block_start>new_aliases[alias_name]=new_aliases[alias_name].get(preprocessor)<block_end><block_end><if_stmt>new_dict<ne>hayaku_extra_dicts_cache.get(cache_key)<block_start>hayaku_extra_dicts_cache[cache_key]=new_dict<for_stmt>dict_scope dict(hayaku_extra_dicts_cache.get(cache_key))<block_start>result_dict=merge_dict(result_dict hayaku_extra_dicts_cache.get(cache_key).get(dict_scope))<block_end>hayaku_dict_cache[cache_key]=result_dict<block_end><elif_stmt>cache_key<in>hayaku_dict_cache<block_start>result_dict=hayaku_dict_cache[cache_key]<block_end><if_stmt>new_aliases<ne>hayaku_extra_aliases_cache.get(cache_key)<block_start>hayaku_extra_aliases_cache[cache_key]=new_aliases<for_stmt>aliases_scope dict(hayaku_extra_aliases_cache.get(cache_key))<block_start>result_aliases=merge_aliases(result_aliases hayaku_extra_aliases_cache.get(cache_key).get(aliases_scope))<block_end>hayaku_aliases_cache[cache_key]=result_aliases<block_end><elif_stmt>cache_key<in>hayaku_aliases_cache<block_start>result_aliases=hayaku_aliases_cache[cache_key]<block_end><return>result_dict result_aliases<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_stmt>RecoLocalCalo.HcalRecProducers.hfsimplereco_cfi<as>_mod<line_sep>hfQIE10Reco=_mod.hfsimplereco.clone(digiLabel="simHcalUnsuppressedDigis:HFQIE10DigiCollection" Subdetector='HFQIE10' firstSample=2 samplesToAdd=1)<line_sep>
<import_stmt>os<import_from_stmt>drivers IPHONE_UA<import_from_stmt>selenium webdriver<import_from_stmt>selenium.webdriver.common.desired_capabilities DesiredCapabilities<def_stmt>get driver_path<block_start><if_stmt><not>os.path.exists(driver_path)<block_start><raise>FileNotFoundError("Could not find phantomjs executable at %s. Download it for your platform at http://phantomjs.org/download.html" driver_path)<block_end>dcap=dict(DesiredCapabilities.PHANTOMJS)<line_sep>dcap["phantomjs.page.settings.userAgent"]=IPHONE_UA<line_sep>driver=webdriver.PhantomJS(desired_capabilities=dcap executable_path=driver_path)<line_sep>driver.set_window_size(1024 3000)<line_sep><return>driver<block_end>
<import_from_stmt>.group_sampler GroupSampler<line_sep>__all__=['GroupSampler']<line_sep>
# coding=utf-8 # Copyright 2020 HuggingFace Datasets Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # Lint as: python3 <import_stmt>datasets<line_sep>_DESCRIPTION="""\ GermaNER is a freely available statistical German Named Entity Tagger based on conditional random fields(CRF). The tagger is trained and evaluated on the NoSta-D Named Entity dataset, which was used in the GermEval 2014 for named entity recognition. The tagger comes close to the performance of the best (proprietary) system in the competition with 77% F-measure (this is the latest result; the one reported in the paper is 76%) test set performance on the four standard NER classes (PERson, LOCation, ORGanisation and OTHer). We describe a range of features and their influence on German NER classification and provide a comparative evaluation and some analysis of the results. The software components, the training data and all data used for feature generation are distributed under permissive licenses, thus this tagger can be used in academic and commercial settings without restrictions or fees. The tagger is available as a command-line tool and as an Apache UIMA component. """<line_sep>_HOMEPAGE_URL="https://github.com/tudarmstadt-lt/GermaNER"<line_sep>_URL="https://raw.githubusercontent.com/tudarmstadt-lt/GermaNER/a206b554feca263d740302449fff0776c66d0040/data/v0.9.1/full_train.tsv"<line_sep>_CITATION="""\ @inproceedings{Benikova2015GermaNERFO, title={GermaNER: Free Open German Named Entity Recognition Tool}, author={<NAME> and <NAME> and <NAME> and <NAME>}, booktitle={GSCL}, year={2015} } """<class_stmt>GermaNER(datasets.GeneratorBasedBuilder)<block_start>VERSION=datasets.Version("0.9.1")<def_stmt>_info self<block_start><return>datasets.DatasetInfo(description=_DESCRIPTION features=datasets.Features({"id":datasets.Value("string") "tokens":datasets.Sequence(datasets.Value("string")) "ner_tags":datasets.Sequence(datasets.features.ClassLabel(names=["B-LOC" "B-ORG" "B-OTH" "B-PER" "I-LOC" "I-ORG" "I-OTH" "I-PER" "O" ])) } ) supervised_keys=<none> homepage=_HOMEPAGE_URL citation=_CITATION )<block_end><def_stmt>_split_generators self dl_manager<block_start>path=dl_manager.download_and_extract(_URL)<line_sep><return>[datasets.SplitGenerator(name=datasets.Split.TRAIN gen_kwargs={"datapath":path} )]<block_end><def_stmt>_generate_examples self datapath<block_start>sentence_counter=0<with_stmt>open(datapath encoding="utf-8")<as>f<block_start>current_words=[]<line_sep>current_labels=[]<for_stmt>row f<block_start>row=row.rstrip()<line_sep>row_split=row.split()<if_stmt>len(row_split)<eq>2<block_start>token,label=row_split<line_sep>current_words.append(token)<line_sep>current_labels.append(label)<block_end><else_stmt><block_start><if_stmt><not>current_words<block_start><continue><block_end><assert_stmt>len(current_words)<eq>len(current_labels) "word len doesnt match label length"<line_sep>sentence=(sentence_counter {"id":str(sentence_counter) "tokens":current_words "ner_tags":current_labels } )<line_sep>sentence_counter<augadd>1<line_sep>current_words=[]<line_sep>current_labels=[]<line_sep><yield>sentence<block_end><block_end># if something remains: <if_stmt>current_words<block_start>sentence=(sentence_counter {"id":str(sentence_counter) "tokens":current_words "ner_tags":current_labels } )<line_sep><yield>sentence<block_end><block_end><block_end><block_end>
# AUTO-GENERATED by tools/checkspecs.py - DO NOT EDIT <import_from_stmt>..preprocess Qwarp<def_stmt>test_Qwarp_inputs <block_start>input_map=dict(Qfinal=dict(argstr="-Qfinal" ) Qonly=dict(argstr="-Qonly" ) allineate=dict(argstr="-allineate" ) allineate_opts=dict(argstr="-allineate_opts %s" requires=["allineate"] ) allsave=dict(argstr="-allsave" xor=["nopadWARP" "duplo" "plusminus"] ) args=dict(argstr="%s" ) ballopt=dict(argstr="-ballopt" xor=["workhard" "boxopt"] ) base_file=dict(argstr="-base %s" copyfile=<false> extensions=<none> mandatory=<true> ) baxopt=dict(argstr="-boxopt" xor=["workhard" "ballopt"] ) blur=dict(argstr="-blur %s" ) duplo=dict(argstr="-duplo" xor=["gridlist" "maxlev" "inilev" "iniwarp" "plusminus" "allsave"] ) emask=dict(argstr="-emask %s" copyfile=<false> extensions=<none> ) environ=dict(nohash=<true> usedefault=<true> ) expad=dict(argstr="-expad %d" xor=["nopadWARP"] ) gridlist=dict(argstr="-gridlist %s" copyfile=<false> extensions=<none> xor=["duplo" "plusminus"] ) hel=dict(argstr="-hel" xor=["nmi" "mi" "lpc" "lpa" "pear"] ) in_file=dict(argstr="-source %s" copyfile=<false> extensions=<none> mandatory=<true> ) inilev=dict(argstr="-inilev %d" xor=["duplo"] ) iniwarp=dict(argstr="-iniwarp %s" xor=["duplo"] ) iwarp=dict(argstr="-iwarp" xor=["plusminus"] ) lpa=dict(argstr="-lpa" xor=["nmi" "mi" "lpc" "hel" "pear"] ) lpc=dict(argstr="-lpc" position=-2 xor=["nmi" "mi" "hel" "lpa" "pear"] ) maxlev=dict(argstr="-maxlev %d" position=-1 xor=["duplo"] ) mi=dict(argstr="-mi" xor=["mi" "hel" "lpc" "lpa" "pear"] ) minpatch=dict(argstr="-minpatch %d" ) nmi=dict(argstr="-nmi" xor=["nmi" "hel" "lpc" "lpa" "pear"] ) noXdis=dict(argstr="-noXdis" ) noYdis=dict(argstr="-noYdis" ) noZdis=dict(argstr="-noZdis" ) noneg=dict(argstr="-noneg" ) nopad=dict(argstr="-nopad" ) nopadWARP=dict(argstr="-nopadWARP" xor=["allsave" "expad"] ) nopenalty=dict(argstr="-nopenalty" ) nowarp=dict(argstr="-nowarp" ) noweight=dict(argstr="-noweight" ) num_threads=dict(nohash=<true> usedefault=<true> ) out_file=dict(argstr="-prefix %s" extensions=<none> name_source=["in_file"] name_template="ppp_%s" ) out_weight_file=dict(argstr="-wtprefix %s" extensions=<none> ) outputtype=dict() overwrite=dict(argstr="-overwrite" ) pblur=dict(argstr="-pblur %s" ) pear=dict(argstr="-pear" ) penfac=dict(argstr="-penfac %f" ) plusminus=dict(argstr="-plusminus" xor=["duplo" "allsave" "iwarp"] ) quiet=dict(argstr="-quiet" xor=["verb"] ) resample=dict(argstr="-resample" ) verb=dict(argstr="-verb" xor=["quiet"] ) wball=dict(argstr="-wball %s" xor=["wmask"] ) weight=dict(argstr="-weight %s" extensions=<none> ) wmask=dict(argstr="-wpass %s %f" xor=["wball"] ) workhard=dict(argstr="-workhard" xor=["boxopt" "ballopt"] ) )<line_sep>inputs=Qwarp.input_spec()<for_stmt>key,metadata list(input_map.items())<block_start><for_stmt>metakey,value list(metadata.items())<block_start><assert_stmt>getattr(inputs.traits()[key] metakey)<eq>value<block_end><block_end><block_end><def_stmt>test_Qwarp_outputs <block_start>output_map=dict(base_warp=dict(extensions=<none> ) source_warp=dict(extensions=<none> ) warped_base=dict(extensions=<none> ) warped_source=dict(extensions=<none> ) weights=dict(extensions=<none> ) )<line_sep>outputs=Qwarp.output_spec()<for_stmt>key,metadata list(output_map.items())<block_start><for_stmt>metakey,value list(metadata.items())<block_start><assert_stmt>getattr(outputs.traits()[key] metakey)<eq>value<block_end><block_end><block_end>
<import_stmt>os<import_stmt>math<import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_stmt>skimage.transform<as>trans<import_stmt>cv2<import_stmt>torch<import_from_stmt>data dataset_info<import_from_stmt>data.base_dataset BaseDataset<import_stmt>util.util<as>util<line_sep>dataset_info=dataset_info()<class_stmt>AllFaceDataset(BaseDataset)<block_start>@staticmethod<def_stmt>modify_commandline_options parser is_train<block_start>parser.add_argument('--no_pairing_check' action='store_true' help='If specified, skip sanity check of correct label-image file pairing')<line_sep><return>parser<block_end><def_stmt>cv2_loader self img_str<block_start>img_array=np.frombuffer(img_str dtype=np.uint8)<line_sep><return>cv2.imdecode(img_array cv2.IMREAD_COLOR)<block_end><def_stmt>fill_list self tmp_list<block_start>length=len(tmp_list)<if_stmt>length%self.opt.batchSize<ne>0<block_start>end=math.ceil(length/self.opt.batchSize)<times>self.opt.batchSize<line_sep>tmp_list=tmp_list+tmp_list[-1<times>(end-length):]<block_end><return>tmp_list<block_end><def_stmt>initialize self opt<block_start>self.opt=opt<line_sep>dataset_num=dataset_info.get_dataset(opt)<line_sep>self.prefix=[dataset_info.prefix[num]<for>num dataset_num]<line_sep>file_list=[dataset_info.file_list[num]<for>num dataset_num]<line_sep>land_mark_list=[dataset_info.land_mark_list[num]<for>num dataset_num]<line_sep>self.params_dir=[dataset_info.params_dir[num]<for>num dataset_num]<line_sep>self.folder_level=[dataset_info.folder_level[num]<for>num dataset_num]<line_sep>self.num_datasets=len(file_list)<assert_stmt>len(land_mark_list)<eq>self.num_datasets 'num of landmk dir should be the num of datasets'<assert_stmt>len(self.params_dir)<eq>self.num_datasets 'num of params_dir should be the num of datasets'<line_sep>self.dataset_lists=[]<line_sep>self.landmark_paths=[]<line_sep>self.sizes=[]<for_stmt>n range(self.num_datasets)<block_start><with_stmt>open(file_list[n])<as>f<block_start>img_lists=f.readlines()<block_end>img_lists=self.fill_list(img_lists)<line_sep>self.sizes.append(len(img_lists))<line_sep>self.dataset_lists.append(sorted(img_lists))<with_stmt>open(land_mark_list[n])<as>f<block_start>landmarks=f.readlines()<line_sep>landmarks=self.fill_list(landmarks)<line_sep>self.landmark_paths.append(sorted(landmarks))<block_end><block_end>self.dataset_size=min(self.sizes)<line_sep>self.initialized=<false><block_end><def_stmt>get_landmarks self landmark img_list<block_start>landmark_split=landmark.strip().split(' ')<line_sep>filename1_without_ext=os.path.basename(img_list.strip())<line_sep>filename2_without_ext=os.path.basename(landmark_split[0])<assert_stmt>(filename1_without_ext<eq>filename2_without_ext) "The image_path %s and params_path %s don't match."%(img_list landmark_split[0])<line_sep>label=landmark_split[1]<line_sep>landmarks=landmark_split[2:]<line_sep>landmarks=list(map(float landmarks))<line_sep>landmarks_array=np.array(landmarks).reshape(5 2)<line_sep><return>landmarks_array label<block_end><def_stmt>get_param_file self img_list dataset_num<block_start>img_name=os.path.splitext(img_list)[0]<line_sep>name_split=img_name.split("/")<line_sep>folder_level=self.folder_level[dataset_num]<line_sep>param_folder=os.path.join(self.params_dir[dataset_num] "/".join([name_split[i]<for>i range(len(name_split)-folder_level len(name_split))])+".txt")<line_sep># params = np.loadtxt(param_folder) <return>param_folder<block_end><def_stmt>paths_match self path1 path2<block_start>filename1_without_ext=os.path.splitext(os.path.basename(path1)[-10:])[0]<line_sep>filename2_without_ext=os.path.splitext(os.path.basename(path2)[-10:])[0]<line_sep><return>filename1_without_ext<eq>filename2_without_ext<block_end><def_stmt>affine_align self img landmark=<none> **kwargs<block_start>M=<none><line_sep>h,w,c=img.shape<line_sep>src=np.array([[38.2946 51.6963] [73.5318 51.5014] [56.0252 71.7366] [41.5493 92.3655] [70.7299 92.2041]] dtype=np.float32)<line_sep>src=src<times>290/112<line_sep>src[: 0]<augadd>50<line_sep>src[: 1]<augadd>60<line_sep>src=src/400<times>self.opt.crop_size<line_sep>dst=landmark<line_sep># dst = landmark.astype(np.float32) tform=trans.SimilarityTransform()<line_sep>tform.estimate(dst src)<line_sep>M=tform.params[0:2 :]<line_sep>warped=cv2.warpAffine(img M (self.opt.crop_size self.opt.crop_size) borderValue=0.0)<line_sep><return>warped M<block_end><def_stmt>__getitem__ self index# Label Image <block_start>randnum=np.random.randint(sum(self.sizes))<line_sep>dataset_num=np.random.randint(self.num_datasets)<line_sep>image_path=self.dataset_lists[dataset_num][index].strip()<line_sep>image_path=os.path.join(self.prefix[dataset_num] image_path)<line_sep>img=cv2.imread(image_path)<if_stmt>img<is><none><block_start><raise>Exception('None Image')<block_end>param_path=self.get_param_file(image_path dataset_num)<line_sep># img = cv2.imread(image_path) img=cv2.cvtColor(img cv2.COLOR_BGR2RGB)<line_sep>M=<none><line_sep>landmark_path=self.landmark_paths[dataset_num][index].strip()<line_sep>landmarks,label=self.get_landmarks(landmark_path image_path)<line_sep>wrapped_img,M=self.affine_align(img landmarks)<line_sep>M=torch.from_numpy(M).float()<line_sep>wrapped_img=wrapped_img.transpose(2 0 1)/255.0<line_sep>wrapped_img=torch.from_numpy(wrapped_img).float()<line_sep>input_dict={'image':wrapped_img 'param_path':param_path 'M':M 'path':image_path}<line_sep># Give subclasses a chance to modify the final output self.postprocess(input_dict)<line_sep><return>input_dict<block_end><def_stmt>postprocess self input_dict<block_start><return>input_dict<block_end><def_stmt>__len__ self<block_start><return>self.dataset_size<block_end><block_end>
# _*_ coding: utf-8 _*_ """ Created by Allen7D on 2020/4/13. """<import_from_stmt>app create_app<import_from_stmt>tests.utils get_authorization<line_sep>__author__='Allen7D'<line_sep>app=create_app()<def_stmt>test_create_auth_list <block_start><with_stmt>app.test_client()<as>client<block_start>rv=client.post('/cms/auth/append' headers={'Authorization':get_authorization()} json={'group_id':5 'auth_ids':[1 2 3]})<line_sep>json_data=rv.get_json()<line_sep>print(json_data)<block_end><block_end><def_stmt>test_delete_auth_list <block_start><with_stmt>app.test_client()<as>client<block_start>rv=client.post('/cms/auth/remove' headers={'Authorization':get_authorization()} json={'group_id':5 'auth_ids':[1 2 3]})<line_sep>json_data=rv.get_json()<line_sep>print(json_data)<block_end><block_end>test_create_auth_list()<line_sep>test_delete_auth_list()<line_sep>
<import_from_stmt>kfp.components InputPath OutputPath create_component_from_func<def_stmt>catboost_train_classifier training_data_path:InputPath('CSV') model_path:OutputPath('CatBoostModel') starting_model_path:InputPath('CatBoostModel')=<none> label_column:int=0 loss_function:str='Logloss' num_iterations:int=500 learning_rate:float=<none> depth:int=6 random_seed:int=0 cat_features:list=<none> text_features:list=<none> additional_training_options:dict={} <block_start>'''Train a CatBoost classifier model. Args: training_data_path: Path for the training data in CSV format. model_path: Output path for the trained model in binary CatBoostModel format. starting_model_path: Path for the existing trained model to start from. label_column: Column containing the label data. loss_function: The metric to use in training and also selector of the machine learning problem to solve. Default = 'Logloss' num_iterations: Number of trees to add to the ensemble. learning_rate: Step size shrinkage used in update to prevents overfitting. Default value is selected automatically for binary classification with other parameters set to default. In all other cases default is 0.03. depth: Depth of a tree. All trees are the same depth. Default = 6 random_seed: Random number seed. Default = 0 cat_features: A list of Categorical features (indices or names). text_features: A list of Text features (indices or names). additional_training_options: A dictionary with additional options to pass to CatBoostClassifier Outputs: model: Trained model in binary CatBoostModel format. Annotations: author: <NAME> <<EMAIL>> '''<import_stmt>tempfile<import_from_stmt>pathlib Path<import_from_stmt>catboost CatBoostClassifier Pool<line_sep>column_descriptions={label_column:'Label'}<line_sep>column_description_path=tempfile.NamedTemporaryFile(delete=<false>).name<with_stmt>open(column_description_path 'w')<as>column_description_file<block_start><for_stmt>idx,kind column_descriptions.items()<block_start>column_description_file.write('{}\t{}\n'.format(idx kind))<block_end><block_end>train_data=Pool(training_data_path column_description=column_description_path has_header=<true> delimiter=',' )<line_sep>model=CatBoostClassifier(iterations=num_iterations depth=depth learning_rate=learning_rate loss_function=loss_function random_seed=random_seed verbose=<true> **additional_training_options )<line_sep>model.fit(train_data cat_features=cat_features text_features=text_features init_model=starting_model_path #verbose=False, #plot=True, )<line_sep>Path(model_path).parent.mkdir(parents=<true> exist_ok=<true>)<line_sep>model.save_model(model_path)<block_end><if_stmt>__name__<eq>'__main__'<block_start>catboost_train_classifier_op=create_component_from_func(catboost_train_classifier output_component_file='component.yaml' base_image='python:3.7' packages_to_install=['catboost==0.23'] annotations={"author":"<NAME> <<EMAIL>>" "canonical_location":"https://raw.githubusercontent.com/Ark-kun/pipeline_components/master/components/CatBoost/Train_classifier/from_CSV/component.yaml" } )<block_end>
<import_from_stmt>typing TYPE_CHECKING<import_stmt>pytest<import_from_stmt>double_protocol double<def_stmt>test_double_int <arrow><none><block_start>given=2<line_sep>result=double(given)<assert_stmt>result<eq>given<times>2<if_stmt>TYPE_CHECKING<block_start>reveal_type(given)<line_sep>reveal_type(result)<block_end><block_end><def_stmt>test_double_str <arrow><none><block_start>given='A'<line_sep>result=double(given)<assert_stmt>result<eq>given<times>2<if_stmt>TYPE_CHECKING<block_start>reveal_type(given)<line_sep>reveal_type(result)<block_end><block_end><def_stmt>test_double_fraction <arrow><none><block_start><import_from_stmt>fractions Fraction<line_sep>given=Fraction(2 5)<line_sep>result=double(given)<assert_stmt>result<eq>given<times>2<if_stmt>TYPE_CHECKING<block_start>reveal_type(given)<line_sep>reveal_type(result)<block_end><block_end><def_stmt>test_double_array <arrow><none><block_start><import_from_stmt>array array<line_sep>given=array('d' [1.0 2.0 3.14])<line_sep>result=double(given)<if_stmt>TYPE_CHECKING<block_start>reveal_type(given)<line_sep>reveal_type(result)<block_end><block_end><def_stmt>test_double_nparray <arrow><none><block_start><import_stmt>numpy<as>np# type: ignore given=np.array([[1 2] [3 4]])<line_sep>result=double(given)<line_sep>comparison=result<eq>given<times>2<assert_stmt>comparison.all()<if_stmt>TYPE_CHECKING<block_start>reveal_type(given)<line_sep>reveal_type(result)<block_end><block_end><def_stmt>test_double_none <arrow><none><block_start>given=<none><with_stmt>pytest.raises(TypeError)<block_start>double(given)<block_end><block_end>
"""Extreme Discovery Protocol."""<import_from_future_stmt> absolute_import<import_stmt>dpkt<class_stmt>EDP(dpkt.Packet)<block_start>__hdr__=(('version' 'B' 1) ('reserved' 'B' 0) ('hlen' 'H' 0) ('sum' 'H' 0) ('seq' 'H' 0) ('mid' 'H' 0) ('mac' '6s' b''))<def_stmt>__bytes__ self<block_start><if_stmt><not>self.sum<block_start>self.sum=dpkt.in_cksum(dpkt.Packet.__bytes__(self))<block_end><return>dpkt.Packet.__bytes__(self)<block_end><block_end><class_stmt>TestEDP(object)<block_start>""" Test basic EDP functionality. """<line_sep>@classmethod<def_stmt>setup_class cls<block_start><import_from_stmt>binascii unhexlify<line_sep>cls.buf=unhexlify('01'# version <concat>'00'# reserved <concat>'013c'# hlen <concat>'9e76'# sum <concat>'001b'# seq <concat>'0000'# mid <concat>'080027'# mac <concat>'2d90ed990200240000000000000000000000000f020207000000000000000000000000000000009901010445584f532d32000000000000000'<concat>'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'<concat>'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'<concat>'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'<concat>'00000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000'<concat>'00000000000000000000000000000000099000004')<line_sep>cls.p=EDP(cls.buf)<block_end><def_stmt>test_version self<block_start><assert_stmt>(self.p.version<eq>1)<block_end><def_stmt>test_reserved self<block_start><assert_stmt>(self.p.reserved<eq>0)<block_end><def_stmt>test_hlen self<block_start><assert_stmt>(self.p.hlen<eq>316)<block_end><def_stmt>test_sum self<block_start><assert_stmt>(self.p.sum<eq>40566)<block_end><def_stmt>test_seq self<block_start><assert_stmt>(self.p.seq<eq>27)<block_end><def_stmt>test_mid self<block_start><assert_stmt>(self.p.mid<eq>0)<block_end><def_stmt>test_mac self<block_start><assert_stmt>(self.p.mac<eq>b"\x08\x00'-\x90\xed")<block_end><def_stmt>test_bytes self<block_start><assert_stmt>bytes(self.p)<eq>self.buf<line_sep># force recalculation of the checksum edp=EDP(self.buf)<line_sep>edp.sum=0<assert_stmt>edp.sum<eq>0<assert_stmt>bytes(edp)<eq>self.buf<block_end><block_end>
# Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Sanity checking for grd_helper.py. Run manually before uploading a CL."""<import_stmt>io<import_stmt>os<import_stmt>subprocess<import_stmt>sys<line_sep># Add the parent dir so that we can import from "helper". sys.path.insert(0 os.path.dirname(os.path.dirname(os.path.abspath(__file__))))<import_from_stmt>helper grd_helper<import_from_stmt>helper translation_helper<if_stmt>sys.platform.startswith('win')# Use the |git.bat| in the depot_tools/ on Windows. <block_start>GIT='git.bat'<block_end><else_stmt><block_start>GIT='git'<block_end>here=os.path.dirname(os.path.realpath(__file__))<line_sep>repo_root=os.path.normpath(os.path.join(here '..' '..' '..'))<def_stmt>list_files_in_repository repo_path pattern<block_start>"""Lists all files matching given pattern in the given git repository"""<line_sep># This works because git does its own glob expansion even though there is no # shell to do it. output=subprocess.check_output([GIT 'ls-files' '--' pattern] cwd=repo_path).decode('utf-8')<line_sep><return>output.strip().splitlines()<block_end><def_stmt>read_file_as_text path<block_start><with_stmt>io.open(path mode='r' encoding='utf-8')<as>f<block_start><return>f.read()<block_end><block_end># Sanity checks to ensure that we can parse all grd and grdp files in the repo. # Must not fail. <def_stmt>Run <block_start>grds=list_files_in_repository(repo_root '*.grd')<line_sep>grdps=list_files_in_repository(repo_root '*.grdp')<line_sep>print('Found %d grds, %d grdps in the repo.'%(len(grds) len(grdps)))<line_sep># Make sure we can parse all .grd files in the source tree. Grd files are # parsed via the file path. <for_stmt>grd grds# This file is intentionally missing an include, skip it. <block_start><if_stmt>grd<eq>os.path.join('tools' 'translation' 'testdata' 'internal.grd')<block_start><continue><block_end>path=os.path.join(repo_root grd)<line_sep>grd_helper.GetGrdMessages(path os.path.dirname(path))<block_end># Make sure we can parse all .grdp files in the source tree. # Grdp files are parsed using file contents instead of path. <for_stmt>grdp grdps<block_start>path=os.path.join(repo_root grdp)<line_sep># Parse grdp files using file contents. contents=read_file_as_text(path)<line_sep>grd_helper.GetGrdpMessagesFromString(contents)<block_end>print('Successfully parsed all .grd and .grdp files in the repo.')<line_sep># Additional check for translateable grds. Translateable grds are a subset # of all grds so this checks some files twice, but it exercises the # get_translatable_grds() path and also doesn't need to skip internal.grd. TRANSLATION_EXPECTATIONS_PATH=os.path.join(repo_root 'tools' 'gritsettings' 'translation_expectations.pyl')<line_sep>translateable_grds=translation_helper.get_translatable_grds(repo_root grds TRANSLATION_EXPECTATIONS_PATH)<line_sep>print('Found %d translateable .grd files in translation expectations.'%len(translateable_grds))<for_stmt>grd translateable_grds<block_start>path=os.path.join(repo_root grd.path)<line_sep>grd_helper.GetGrdMessages(path os.path.dirname(path))<block_end>print('Successfully parsed all translateable_grds .grd files in translation '<concat>'expectations.')<line_sep>print('DONE')<block_end><if_stmt>__name__<eq>'__main__'<block_start>Run()<block_end>
<import_from_future_stmt> absolute_import<import_from_stmt>future.utils PY2 PY26<import_from_stmt>subprocess *<if_stmt>PY2<block_start>__future_module__=<true><import_from_stmt>commands getoutput getstatusoutput<block_end><if_stmt>PY26<block_start><import_from_stmt>future.backports.misc check_output<block_end>
<import_stmt>numpy<as>np<import_from_stmt>cyvcf2 VCF Variant Writer<import_stmt>os.path<line_sep>HERE=os.path.dirname(__file__)<line_sep>HEM_PATH=os.path.join(HERE "test-hemi.vcf")<line_sep>VCF_PATH=os.path.join(HERE "test.vcf.gz")<def_stmt>check_var v<block_start>s=[x.split(":")[0]<for>x str(v).split("\t")[9:]]<line_sep>lookup={'0/0':0 '0/1':1 './1':1 '1/.':1 '0/.':0 './0':0 '1/1':3 '.':2 './.':2}<line_sep>expected=np.array([lookup[ss]<for>ss s])<line_sep>obs=v.gt_types<assert_stmt>np.all(expected<eq>obs) zip(expected obs)<block_end><def_stmt>test_hemi <block_start>""" make sure that we are getting the correct gt_types for hemizygous variants """<for_stmt>p (HEM_PATH VCF_PATH)<block_start>vcf=VCF(p)<for_stmt>v vcf<block_start>check_var(v)<block_end><block_end><block_end>
# Source: https://github.com/Qwicen/node <import_stmt>contextlib<import_stmt>gc<import_stmt>glob<import_stmt>hashlib<import_stmt>os<import_stmt>time<import_stmt>numpy<as>np<import_stmt>requests<import_stmt>torch<import_from_stmt>tqdm tqdm<def_stmt>download url filename delete_if_interrupted=<true> chunk_size=4096<block_start>""" saves file from url to filename with a fancy progressbar """<try_stmt><block_start><with_stmt>open(filename "wb")<as>f<block_start>print("Downloading {} > {}".format(url filename))<line_sep>response=requests.get(url stream=<true>)<line_sep>total_length=response.headers.get('content-length')<if_stmt>total_length<is><none># no content length header <block_start>f.write(response.content)<block_end><else_stmt><block_start>total_length=int(total_length)<with_stmt>tqdm(total=total_length)<as>progressbar<block_start><for_stmt>data response.iter_content(chunk_size=chunk_size)<block_start><if_stmt>data# filter-out keep-alive chunks <block_start>f.write(data)<line_sep>progressbar.update(len(data))<block_end><block_end><block_end><block_end><block_end><block_end><except_stmt>Exception<as>e<block_start><if_stmt>delete_if_interrupted<block_start>print("Removing incomplete download {}.".format(filename))<line_sep>os.remove(filename)<block_end><raise>e<block_end><return>filename<block_end><def_stmt>iterate_minibatches *tensors batch_size shuffle=<true> epochs=1 allow_incomplete=<true> callback=<lambda>x:x<block_start>indices=np.arange(len(tensors[0]))<line_sep>upper_bound=int((np.ceil<if>allow_incomplete<else>np.floor)(len(indices)/batch_size))<times>batch_size<line_sep>epoch=0<while_stmt><true><block_start><if_stmt>shuffle<block_start>np.random.shuffle(indices)<block_end><for_stmt>batch_start callback(range(0 upper_bound batch_size))<block_start>batch_ix=indices[batch_start:batch_start+batch_size]<line_sep>batch=[tensor[batch_ix]<for>tensor tensors]<line_sep><yield>batch<if>len(tensors)<g>1<else>batch[0]<block_end>epoch<augadd>1<if_stmt>epoch<ge>epochs<block_start><break><block_end><block_end><block_end><def_stmt>process_in_chunks function *args batch_size out=<none> **kwargs<block_start>""" Computes output by applying batch-parallel function to large data tensor in chunks :param function: a function(*[x[indices, ...] for x in args]) -> out[indices, ...] :param args: one or many tensors, each [num_instances, ...] :param batch_size: maximum chunk size processed in one go :param out: memory buffer for out, defaults to torch.zeros of appropriate size and type :returns: function(data), computed in a memory-efficient way """<line_sep>total_size=args[0].shape[0]<line_sep>first_output=function(*[x[0:batch_size]<for>x args])<line_sep>output_shape=(total_size )+tuple(first_output.shape[1:])<if_stmt>out<is><none><block_start>out=torch.zeros(*output_shape dtype=first_output.dtype device=first_output.device layout=first_output.layout **kwargs)<block_end>out[0:batch_size]=first_output<for_stmt>i range(batch_size total_size batch_size)<block_start>batch_ix=slice(i min(i+batch_size total_size))<line_sep>out[batch_ix]=function(*[x[batch_ix]<for>x args])<block_end><return>out<block_end><def_stmt>check_numpy x<block_start>""" Makes sure x is a numpy array """<if_stmt>isinstance(x torch.Tensor)<block_start>x=x.detach().cpu().numpy()<block_end>x=np.asarray(x)<assert_stmt>isinstance(x np.ndarray)<line_sep><return>x<block_end>@contextlib.contextmanager<def_stmt>nop_ctx <block_start><yield><none><block_end><def_stmt>get_latest_file pattern<block_start>list_of_files=glob.glob(pattern)# * means all if need specific format then *.csv <assert_stmt>len(list_of_files)<g>0 "No files found: "+pattern<line_sep><return>max(list_of_files key=os.path.getctime)<block_end><def_stmt>md5sum fname<block_start>""" Computes mdp checksum of a file """<line_sep>hash_md5=hashlib.md5()<with_stmt>open(fname "rb")<as>f<block_start><for_stmt>chunk iter(<lambda>:f.read(4096) b"")<block_start>hash_md5.update(chunk)<block_end><block_end><return>hash_md5.hexdigest()<block_end><def_stmt>free_memory sleep_time=0.1<block_start>""" Black magic function to free torch memory and some jupyter whims """<line_sep>gc.collect()<line_sep>torch.cuda.synchronize()<line_sep>gc.collect()<line_sep>torch.cuda.empty_cache()<line_sep>time.sleep(sleep_time)<block_end><def_stmt>to_float_str element<block_start><try_stmt><block_start><return>str(float(element))<block_end><except_stmt>ValueError<block_start><return>element<block_end><block_end>
<import_stmt>numpy<as>np<import_stmt>h5py<import_stmt>string<import_from_stmt>util crawl_meta<import_stmt>time<line_sep>CRAWL_DATA=<true><line_sep>AFTER_DECISION=<false><line_sep>CRAWL_REVIEW=<true><line_sep># Get the meta data meta_list=crawl_meta(meta_hdf5=<none> write_meta_name='data_{}.hdf5'.format(time.strftime("%Y%m%d%H%M%S")) crawl_review=CRAWL_REVIEW)<line_sep>num_withdrawn=len([m<for>m meta_list<if>m.withdrawn<or>m.desk_reject])<line_sep>print('Number of submissions: {} (withdrawn/desk reject submissions: {})'.format(len(meta_list) num_withdrawn))<line_sep>
<import_from_stmt>numpy array arange zeros unique searchsorted full nan<import_from_stmt>numpy.linalg norm# type: ignore <import_from_stmt>pyNastran.utils.numpy_utils integer_types<import_from_stmt>pyNastran.bdf.field_writer_8 print_card_8 set_blank_if_default<import_from_stmt>pyNastran.bdf.field_writer_16 print_card_16<import_from_stmt>pyNastran.bdf.bdf_interface.assign_type integer integer_or_blank double_or_blank integer_double_or_blank string_or_blank <import_from_stmt>pyNastran.bdf.cards.elements.bars BAROR<import_from_stmt>pyNastran.bdf.field_writer_8 set_string8_blank_if_default<import_from_stmt>pyNastran.dev.bdf_vectorized.cards.elements.element Element<class_stmt>CBAR(Element)<block_start>""" +-------+-----+-----+-----+-----+-----+-----+-----+------+ | CBAR | EID | PID | GA | GB | X1 | X2 | X3 | OFFT | +-------+-----+-----+-----+-----+-----+-----+-----+------+ | | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B | +-------+-----+-----+-----+-----+-----+-----+-----+------+ or +-------+-----+-----+-----+-----+-----+-----+-----+------+ | CBAR | EID | PID | GA | GB | G0 | | | OFFT | +-------+-----+-----+-----+-----+-----+-----+-----+------+ | | PA | PB | W1A | W2A | W3A | W1B | W2B | W3B | +-------+-----+-----+-----+-----+-----+-----+-----+------+ +-------+-------+-----+-------+-------+--------+-------+-------+-------+ | CBAR | 2 | 39 | 7 | 6 | 105 | | | GGG | +-------+-------+-----+-------+-------+--------+-------+-------+-------+ | | | 513 | 0.0+0 | 0.0+0 | -9. | 0.0+0 | 0.0+0 | -9. | +-------+-------+-----+-------+-------+--------+-------+-------+-------+ """<line_sep>type='CBAR'<def_stmt>__init__ self model<block_start>""" Defines the CBAR object. Parameters ---------- model : BDF the BDF object """<line_sep>Element.__init__(self model)<block_end><def_stmt>allocate self card_count<block_start>ncards=card_count[self.type]<line_sep>self.n=ncards<if_stmt>self.n<block_start><assert_stmt>isinstance(ncards int) ncards<line_sep>float_fmt=self.model.float_fmt<line_sep>#: Element ID self.element_id=zeros(ncards 'int32')<line_sep>#: Property ID self.property_id=zeros(ncards 'int32')<line_sep>self.node_ids=zeros((ncards 2) 'int32')<line_sep>self.is_g0=zeros(ncards 'bool')<line_sep>self.g0=full(ncards nan 'int32')<line_sep>self.x=full((ncards 3) nan float_fmt)<line_sep>self.offt=full(ncards nan '|U3')<line_sep>self.pin_flags=zeros((ncards 2) 'int32')<line_sep>self.wa=zeros((ncards 3) float_fmt)<line_sep>self.wb=zeros((ncards 3) float_fmt)<block_end><block_end><def_stmt>add_card self card comment=''<block_start>i=self.i<if_stmt>0<and>self.model.cbaror.n<g>0<block_start>cbaror=self.model.cbaror<line_sep>pid_default=cbaror.property_id<line_sep>is_g0_default=cbaror.is_g0<line_sep>x1_default=cbaror.x[0]<line_sep>x2_default=cbaror.x[1]<line_sep>x3_default=cbaror.x[2]<line_sep>g0_default=cbaror.g0<line_sep>offt_default=cbaror.offt<block_end><else_stmt><block_start>pid_default=<none><line_sep>is_g0_default=<none><line_sep>x1_default=0.0<line_sep>x2_default=0.0<line_sep>x3_default=0.0<line_sep>g0_default=<none><line_sep>offt_default='GGG'<block_end>eid=integer(card 1 'element_id')<line_sep>self.element_id[i]=eid<if_stmt>pid_default<is><not><none><block_start>self.property_id[i]=integer_or_blank(card 2 'property_id' pid_default)<block_end><else_stmt><block_start>self.property_id[i]=integer_or_blank(card 2 'property_id' eid)<block_end>self.node_ids[i]=[integer(card 3 'GA') integer(card 4 'GB')]<line_sep>#--------------------------------------------------------- # x / g0 <if_stmt>g0_default<is><not><none><block_start>field5=integer_double_or_blank(card 5 'g0_x1' g0_default)<block_end><else_stmt><block_start>field5=integer_double_or_blank(card 5 'g0_x1' x1_default)<block_end><if_stmt>isinstance(field5 integer_types)<block_start>self.is_g0[i]=<true><line_sep>self.g0[i]=field5<block_end><elif_stmt>isinstance(field5 float)<block_start>self.is_g0[i]=<false><line_sep>x=array([field5 double_or_blank(card 6 'x2' x2_default) double_or_blank(card 7 'x3' x3_default)] dtype='float64')<line_sep>self.x[i :]=x<if_stmt>norm(x)<eq>0.0<block_start>msg='G0 vector defining plane 1 is not defined on CBAR %s.\n'%eid<line_sep>msg<augadd>'G0 = %s\n'%field5<line_sep>msg<augadd>'X = %s\n'%x<line_sep>msg<augadd>'%s'%card<line_sep><raise>RuntimeError(msg)<block_end><block_end><else_stmt><block_start>msg=('field5 on CBAR (G0/X1) is the wrong type...id=%s field5=%s '<concat>'type=%s'%(self.eid field5 type(field5)))<line_sep><raise>RuntimeError(msg)<block_end>#--------------------------------------------------------- # offt # bit doesn't exist on the CBAR offt=string_or_blank(card 8 'offt' offt_default)<line_sep>msg='invalid offt parameter of CBEAM...offt=%s'%offt<assert_stmt>offt[0]<in>['G' 'B' 'O' 'E'] msg<assert_stmt>offt[1]<in>['G' 'B' 'O' 'E'] msg<assert_stmt>offt[2]<in>['G' 'B' 'O' 'E'] msg<line_sep>self.offt[i]=offt<line_sep>self.pin_flags[i :]=[integer_or_blank(card 9 'pa' 0) integer_or_blank(card 10 'pb' 0)]<line_sep>self.wa[i :]=[double_or_blank(card 11 'w1a' 0.0) double_or_blank(card 12 'w2a' 0.0) double_or_blank(card 13 'w3a' 0.0) ]<line_sep>self.wb[i :]=[double_or_blank(card 14 'w1b' 0.0) double_or_blank(card 15 'w2b' 0.0) double_or_blank(card 16 'w3b' 0.0) ]<assert_stmt>len(card)<le>17 'len(CBAR card) = %i\ncard=%s'%(len(card) card)<line_sep>self.i<augadd>1<block_end><def_stmt>build self<block_start><if_stmt>self.n<block_start>i=self.element_id.argsort()<line_sep>self.element_id=self.element_id[i]<line_sep>self.property_id=self.property_id[i]<line_sep>self.node_ids=self.node_ids[i :]<line_sep>self.is_g0=self.is_g0[i]<line_sep>self.g0=self.g0[i]<line_sep>self.x=self.x[i :]<line_sep>self.offt=self.offt[i]<line_sep>self.pin_flags=self.pin_flags[i :]<line_sep>self.wa=self.wa[i :]<line_sep>self.wb=self.wb[i :]<line_sep>unique_eids=unique(self.element_id)<if_stmt>len(unique_eids)<ne>len(self.element_id)<block_start><raise>RuntimeError('There are duplicate CBAR IDs...')<block_end>self._cards=[]<block_end><else_stmt><block_start>self.element_id=array([] dtype='int32')<line_sep>self.property_id=array([] dtype='int32')<block_end><block_end><def_stmt>update self maps<block_start>""" maps = { 'node_id' : nid_map, 'property' : pid_map, } """<if_stmt>self.n<block_start>eid_map=maps['element']<line_sep>nid_map=maps['node']<line_sep>pid_map=maps['property']<for_stmt>i,(eid pid nids) enumerate(zip(self.element_id self.property_id self.node_ids))<block_start>self.element_id[i]=eid_map[eid]<line_sep>self.property_id[i]=pid_map[pid]<line_sep>self.node_ids[i 0]=nid_map[nids[0]]<line_sep>self.node_ids[i 1]=nid_map[nids[1]]<block_end><block_end><block_end>#========================================================================= <def_stmt>get_mass_by_element_id self grid_cid0=<none> total=<false><block_start>""" mass = rho * A * L + nsm """<if_stmt>self.n<eq>0<block_start><return>0.0<block_end><return>[0.0]<if_stmt>grid_cid0<is><none><block_start>grid_cid0=self.model.grid.get_position_by_node_index()<block_end>p1=grid_cid0[self.node_ids[: 0]]<line_sep>p2=grid_cid0[self.node_ids[: 1]]<line_sep>L=p2-p1<line_sep>i=self.model.properties_bar.get_index(self.property_id)<line_sep>A=self.model.properties_bar.get_Area[i]<line_sep>material_id=self.model.properties_bar.material_id[i]<line_sep>rho,E,J=self.model.Materials.get_rho_E_J(material_id)<line_sep>rho=self.model.Materials.get_rho(self.mid)<line_sep>E=self.model.Materials.get_E(self.mid)<line_sep>J=self.model.Materials.get_J(self.mid)<line_sep>mass=norm(L axis=1)<times>A<times>rho+self.nsm<if_stmt>total<block_start><return>mass.sum()<block_end><else_stmt><block_start><return>mass<block_end><block_end>#========================================================================= <def_stmt>write_card self bdf_file size=8 element_ids=<none><block_start><if_stmt>self.n<block_start><if_stmt>element_ids<is><none><block_start>i=arange(self.n)<block_end><else_stmt><block_start>i=searchsorted(self.element_id self.element_id)<block_end><for_stmt>(eid pid n is_g0 g0 x offt pin wa wb) zip(self.element_id[i] self.property_id[i] self.node_ids[i] self.is_g0[i] self.g0[i] self.x[i] self.offt[i] self.pin_flags[i] self.wa[i] self.wb[i])<block_start>pa=set_blank_if_default(pin[0] 0)<line_sep>pb=set_blank_if_default(pin[1] 0)<line_sep>w1a=set_blank_if_default(wa[0] 0.0)<line_sep>w2a=set_blank_if_default(wa[1] 0.0)<line_sep>w3a=set_blank_if_default(wa[2] 0.0)<line_sep>w1b=set_blank_if_default(wb[0] 0.0)<line_sep>w2b=set_blank_if_default(wb[1] 0.0)<line_sep>w3b=set_blank_if_default(wb[2] 0.0)<line_sep>x1=g0<if>is_g0<else>x[0]<line_sep>x2=0<if>is_g0<else>x[1]<line_sep>x3=0<if>is_g0<else>x[2]<line_sep>offt=set_string8_blank_if_default(offt 'GGG')<line_sep>card=['CBAR' eid pid n[0] n[1] x1 x2 x3 offt pa pb w1a w2a w3a w1b w2b w3b]<if_stmt>size<eq>8<block_start>bdf_file.write(print_card_8(card))<block_end><else_stmt><block_start>bdf_file.write(print_card_16(card))<block_end><block_end><block_end><block_end><def_stmt>slice_by_index self i<block_start>i=self._validate_slice(i)<line_sep>obj=CBAR(self.model)<line_sep>obj.n=len(i)<line_sep>#obj._cards = self._cards[i] #obj._comments = obj._comments[i] #obj.comments = obj.comments[i] obj.element_id=self.element_id[i]<line_sep>obj.property_id=self.property_id[i]<line_sep>obj.node_ids=self.node_ids[i :]<line_sep>obj.is_g0=self.is_g0[i]<line_sep>obj.g0=self.g0[i]<line_sep>obj.x=self.x[i :]<line_sep>obj.offt=self.offt[i]<line_sep>obj.pin_flags=self.pin_flags[i]<line_sep>obj.wa=self.wa[i]<line_sep>obj.wb=self.wb[i]<line_sep><return>obj<block_end>#def get_stiffness_matrix(self, model, node_ids, index0s, fnorm=1.0): #return K, dofs, n_ijv <block_end>
""" doc_graphviz.py Creates the graphviz output used to visualize script dependencies. This file relies on the schemas.yml to create the graphviz plots. """<import_stmt>os<import_stmt>cea.config<import_stmt>cea.schemas<import_from_stmt>jinja2 Template<line_sep>__author__="<NAME>"<line_sep>__copyright__="Copyright 2018, Architecture and Building Systems - ETH Zurich"<line_sep>__credits__=["<NAME>" "<NAME>"]<line_sep>__license__="MIT"<line_sep>__version__="2.14"<line_sep>__maintainer__="<NAME>"<line_sep>__email__="<EMAIL>"<line_sep>__status__="Production"<def_stmt>create_graphviz_files graphviz_data documentation_dir<block_start>""" :param dict graphviz_data: maps script names to a set of (input/output, script, locator_method, folder_name, file_name) :param documentation_dir: folder with the documentation in it ($repo/docs) :return: None """<if_stmt>os.path.exists(os.path.join(documentation_dir "graphviz"))<block_start><for_stmt>fname os.listdir(os.path.join(documentation_dir "graphviz"))<block_start>print("deleting {fname}".format(fname=fname))<line_sep>os.remove(os.path.join(documentation_dir "graphviz" fname))<block_end><block_end><for_stmt>script_name graphviz_data<block_start>print("Creating graph for: {script_name}".format(**locals()))<line_sep># creating new variable to preserve original trace_data used by other methods trace_data=shorten_trace_data_paths(sorted(graphviz_data[script_name]))<line_sep>trace_data=unique_users_creators(trace_data)<line_sep># set of unique scripts scripts=sorted(set([td[1]<for>td trace_data]))<line_sep># set of common dirs for each file accessed by the script(s) db_group=sorted(set(td[3]<for>td trace_data))<line_sep># float containing the node width for the largest file name width=5<line_sep># jinja2 template setup and execution template_path=os.path.join(documentation_dir "templates" "graphviz_template.gv")<line_sep>template=Template(open(template_path 'r').read())<line_sep>digraph=template.render(tracedata=trace_data script_name=script_name scripts=scripts db_group=db_group width=width)<line_sep>digraph=remove_extra_lines(digraph)<with_stmt>open(os.path.join(documentation_dir "graphviz" "{script}.gv".format(script=script_name)) 'w')<as>f<block_start>f.write(digraph)<block_end><block_end><block_end><def_stmt>unique_users_creators trace_data<block_start>""" Make sure that the data does not define the same script as producer _and_ consumer at the same time. Prefer producer. :param trace_data: list of tuples of form (0:input/output, 1:script, 2:locator_method, 3:folder_name, 4:file_name) :return: trace_data, filtered """<line_sep>input_lms=set(t[2]<for>t trace_data<if>t[0]<eq>"input")<line_sep>trace_data=[t<for>t trace_data<if>t[0]<eq>"input"<or>t[2]<not><in>input_lms]<line_sep><return>trace_data<block_end><def_stmt>remove_extra_lines digraph<block_start>digraph="\n".join([line<for>line digraph.split('\n')<if>len(line.strip())])<line_sep><return>digraph<block_end><def_stmt>shorten_trace_data_paths trace_data<block_start>""" Shorten the paths in trace_data to max 3 components :param trace_data: :return: """<for_stmt>i,(direction _script method path db) enumerate(trace_data)<block_start>path="/".join(path.rsplit('/')[-3:])# only keep max last 3 components trace_data[i]=(direction _script method path db)<block_end><return>trace_data<block_end><def_stmt>get_list_of_digraphs documentation_dir schema_scripts<block_start>list_of_digraphs=[]<for_stmt>script schema_scripts<block_start>graphviz_file=os.path.join(documentation_dir 'graphviz/%s.gv'%script)<if_stmt>os.path.isfile(graphviz_file)<block_start>underline='-'<times>len(script)<with_stmt>open(graphviz_file)<as>viz<block_start>digraph=viz.read()<block_end>contents=[[script underline digraph]]<line_sep>list_of_digraphs.extend(contents)<block_end><block_end><return>list_of_digraphs<block_end><def_stmt>main _<block_start>schemas=cea.schemas.schemas(plugins=[])<line_sep>schema_scripts=cea.schemas.get_schema_scripts(plugins=[])<line_sep>documentation_dir=os.path.join(os.path.dirname(cea.config.__file__) '..' 'docs')<line_sep>graphviz_data={}<for_stmt>script schema_scripts<block_start>trace_data=set()<for_stmt>locator_method schemas<block_start>file_path=schemas[locator_method]['file_path']<line_sep>file_name=os.path.basename(file_path)<line_sep>folder_name=os.path.dirname(file_path)<if_stmt>script<in>schemas[locator_method]['created_by']<block_start>trace_data.add(('output' script locator_method folder_name file_name))<block_end><if_stmt>script<in>schemas[locator_method]['used_by']<block_start>trace_data.add(('input' script locator_method folder_name file_name))<block_end><block_end>graphviz_data[script]=trace_data<block_end>create_graphviz_files(graphviz_data documentation_dir)<line_sep>list_of_digraphs=get_list_of_digraphs(documentation_dir=documentation_dir schema_scripts=schema_scripts)<line_sep>template_path=os.path.join(documentation_dir "templates" "graphviz_template.rst")<line_sep>template=Template(open(template_path 'r').read())<with_stmt>open(os.path.join(documentation_dir 'script-data-flow.rst') 'w')<as>fp<block_start>fp.write(template.render(list_of_digraphs=list_of_digraphs))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main(cea.config.Configuration())<block_end>
cpgf._import(<none> "builtin.debug")<line_sep>cpgf._import(<none> "builtin.core")<class_stmt>SAppContext<block_start>device=<none> <line_sep>counter=0 <line_sep>listbox=<none><block_end>Context=SAppContext()<line_sep>GUI_ID_QUIT_BUTTON=101<line_sep>GUI_ID_NEW_WINDOW_BUTTON=102<line_sep>GUI_ID_FILE_OPEN_BUTTON=103<line_sep>GUI_ID_TRANSPARENCY_SCROLL_BAR=104<def_stmt>makeMyEventReceiver receiver<block_start><def_stmt>OnEvent me event<block_start><if_stmt>event.EventType<eq>irr.EET_GUI_EVENT<block_start>id=event.GUIEvent.Caller.getID()<line_sep>env=Context.device.getGUIEnvironment()<if_stmt>event.GUIEvent.EventType<eq>irr.EGET_SCROLL_BAR_CHANGED<block_start><if_stmt>id<eq>GUI_ID_TRANSPARENCY_SCROLL_BAR<block_start>pos=cpgf.cast(event.GUIEvent.Caller irr.IGUIScrollBar).getPos()<line_sep>skin=env.getSkin()<for_stmt>i range(irr.EGDC_COUNT)<block_start>col=skin.getColor(i)<line_sep>col.setAlpha(pos)<line_sep>skin.setColor(i col)<block_end><block_end><block_end><elif_stmt>event.GUIEvent.EventType<eq>irr.EGET_BUTTON_CLICKED<block_start><if_stmt>id<eq>GUI_ID_QUIT_BUTTON<block_start>Context.device.closeDevice()<line_sep><return><true><block_end><elif_stmt>id<eq>GUI_ID_NEW_WINDOW_BUTTON<block_start>Context.listbox.addItem("Window created")<line_sep>Context.counter=Context.counter+30<if_stmt>Context.counter<g>200<block_start>Context.counter=0<line_sep><block_end>window=env.addWindow(irr.rect_s32(100+Context.counter 100+Context.counter 300+Context.counter 200+Context.counter) <false> "Test window")<line_sep>env.addStaticText("Please close me" irr.rect_s32(35 35 140 50) <true> <false> window)<line_sep><return><true><block_end><elif_stmt>id<eq>GUI_ID_FILE_OPEN_BUTTON<block_start>Context.listbox.addItem("File open")<line_sep>env.addFileOpenDialog("Please choose a file.")<line_sep><return><true><line_sep><block_end><block_end><block_end><return><false><line_sep><block_end>receiver.OnEvent=OnEvent<line_sep><block_end><def_stmt>start <block_start>driverType=irr.driverChoiceConsole()<if_stmt>driverType<eq>irr.EDT_COUNT<block_start><return>1<line_sep><block_end>device=irr.createDevice(driverType irr.dimension2d_u32(640 480))<if_stmt>device<eq><none><block_start><return>1<line_sep><block_end>device.setWindowCaption("cpgf Irrlicht Python Binding - User Interface Demo")<line_sep>device.setResizable(<true>)<line_sep>driver=device.getVideoDriver()<line_sep>env=device.getGUIEnvironment()<line_sep>skin=env.getSkin()<line_sep>font=env.getFont("../../media/fonthaettenschweiler.bmp")<if_stmt>font<block_start>skin.setFont(font)<line_sep><block_end>skin.setFont(env.getBuiltInFont() irr.EGDF_TOOLTIP)<line_sep>env.addButton(irr.rect_s32(10 240 110 240+32) <none> GUI_ID_QUIT_BUTTON "Quit" "Exits Program")<line_sep>env.addButton(irr.rect_s32(10 280 110 280+32) <none> GUI_ID_NEW_WINDOW_BUTTON "New Window" "Launches a Window")<line_sep>env.addButton(irr.rect_s32(10 320 110 320+32) <none> GUI_ID_FILE_OPEN_BUTTON "File Open" "Opens a file")<line_sep>env.addStaticText("Transparent Control:" irr.rect_s32(150 20 350 40) <true>)<line_sep>scrollbar=env.addScrollBar(<true> irr.rect_s32(150 45 350 60) <none> GUI_ID_TRANSPARENCY_SCROLL_BAR)<line_sep>scrollbar.setMax(255)<line_sep>scrollbar.setPos(env.getSkin().getColor(irr.EGDC_WINDOW).getAlpha())<line_sep>env.addStaticText("Logging ListBox:" irr.rect_s32(50 110 250 130) <true>)<line_sep>listbox=env.addListBox(irr.rect_s32(50 140 250 210))<line_sep>env.addEditBox("Editable Text" irr.rect_s32(350 80 550 100))<line_sep>Context.device=device<line_sep>Context.counter=0<line_sep>Context.listbox=listbox<line_sep>MyEventReceiver=cpgf.cloneClass(irr.IEventReceiverWrapper)<line_sep>makeMyEventReceiver(MyEventReceiver)<line_sep>receiver=MyEventReceiver()<line_sep>device.setEventReceiver(receiver)<line_sep>env.addImage(driver.getTexture("../../media/irrlichtlogo2.png") irr.position2d_s32(10 10))<while_stmt>device.run()<and>driver<block_start><if_stmt>device.isWindowActive()<block_start>driver.beginScene(<true> <true> irr.SColor(0 200 200 200))<line_sep>env.drawAll()<line_sep>driver.endScene()<line_sep><block_end><block_end>device.drop()<line_sep><return>0<line_sep><block_end>start()<line_sep>
# -*- coding: utf-8 -*- <import_from_stmt>copy deepcopy<import_stmt>pytest<import_from_stmt>schematics.models Model<import_from_stmt>schematics.types *<import_from_stmt>schematics.types.compound *<import_from_stmt>schematics.exceptions *<import_from_stmt>schematics.undefined Undefined<line_sep>@pytest.mark.parametrize('init' (<true> <false>))<def_stmt>test_import_data init<block_start><class_stmt>M(Model)<block_start>a,b,c,d=IntType() IntType() IntType() IntType()<block_end>m=M({'a':1 'b':<none> 'c':3} init=init)<line_sep>m.import_data({'a':<none> 'b':2})<if_stmt>init<block_start><assert_stmt>m._data<eq>{'a':<none> 'b':2 'c':3 'd':<none>}<block_end><else_stmt><block_start><assert_stmt>m._data<eq>{'a':<none> 'b':2 'c':3}<block_end><block_end>@pytest.mark.parametrize('init' (<true> <false>))<def_stmt>test_import_data_with_error init<block_start><class_stmt>M(Model)<block_start>a,b,c,d=IntType() IntType() IntType(required=<true>) IntType()<block_end>m=M({'a':1 'b':<none> 'c':3} init=init)<with_stmt>pytest.raises(DataError)<block_start>m.import_data({'a':<none> 'b':2 'c':<none> })<block_end><if_stmt>init<block_start><assert_stmt>m._data<eq>{'a':1 'b':<none> 'c':3 'd':<none>}<block_end><else_stmt><block_start><assert_stmt>m._data<eq>{'a':1 'b':<none> 'c':3}<block_end><block_end>@pytest.mark.parametrize('preconvert_source, populate_source' [(<false> <none>) (<true> <true>) (<true> <false>)])@pytest.mark.parametrize('recursive, populate_target, init_to_none, populated_result' [(<false> <true> <true> <true>) (<false> <false> <false> <false>) (<true> <true> <true> <true>) (<true> <false> <true> <true>) (<true> <false> <false> <false>)])<def_stmt>test_complex_import_data recursive preconvert_source populate_source populate_target init_to_none populated_result<block_start><class_stmt>M(Model)<block_start>intfield=IntType(max_value=2)<line_sep>matrixfield=ListType(ListType(IntType))<line_sep>dictfield=DictType(IntType)<line_sep>modelfield=ModelType('M')<block_end>origdict={'intfield':'1' 'dictfield':dict(a=1 b=2) 'modelfield':{'intfield':'2' 'matrixfield':[[0 0 0] [1 1 1] [2 2 2]] 'dictfield':dict(a=11 b=22) 'modelfield':{'intfield':'3' 'dictfield':dict(a=111 b=222)}}}<line_sep>m=M(origdict init=populate_target)<line_sep>sourcedict={'intfield':'101' 'dictfield':dict(c=3) 'modelfield':{'matrixfield':[[9]] 'modelfield':{'intfield':'103' 'dictfield':dict(c=33)}}}<line_sep>sourcedata=deepcopy(sourcedict)<if_stmt>preconvert_source<block_start>sourcedata=M(sourcedata init=populate_source)<block_end>m.import_data(sourcedata recursive=recursive init_values=init_to_none)<assert_stmt>id(m)<ne>id(sourcedata)<if_stmt>preconvert_source<and>populate_source<block_start><assert_stmt>m<eq>M(sourcedict init=<true>)<block_end><elif_stmt>recursive<block_start><assert_stmt>m<eq>M({'intfield':'101' 'dictfield':dict(c=3) 'modelfield':{'intfield':'2' 'matrixfield':[[9]] 'dictfield':dict(a=11 b=22) 'modelfield':{'intfield':'103' 'dictfield':dict(c=33)}}} init=populated_result)<block_end><else_stmt><block_start><assert_stmt>m<eq>M(sourcedict init=populated_result)<block_end><block_end>
<import_stmt>pickle<import_stmt>numpy<as>np<import_from_stmt>neupy algorithms<import_from_stmt>neupy.exceptions NotTrained<import_from_stmt>algorithms.memory.data zero one half_one half_zero<import_from_stmt>base BaseTestCase<import_from_stmt>helpers vectors_for_testing<line_sep>zero_hint=np.array([[0 1 0 0]])<line_sep>one_hint=np.array([[1 0 0 0]])<class_stmt>BAMTestCase(BaseTestCase)<block_start><def_stmt>setUp self<block_start>super(BAMTestCase self).setUp()<line_sep>self.data=np.concatenate([zero one] axis=0)<line_sep>self.hints=np.concatenate([zero_hint one_hint] axis=0)<block_end><def_stmt>test_bam_exceptions self<block_start><with_stmt>self.assertRaises(NotTrained)<block_start>dbnet=algorithms.DiscreteBAM()<line_sep>dbnet.predict(np.array([0 1]))<block_end><with_stmt>self.assertRaises(NotTrained)<block_start>dbnet=algorithms.DiscreteBAM()<line_sep>dbnet.predict_input(np.array([0 1]))<block_end><with_stmt>self.assertRaises(ValueError)<block_start>dbnet=algorithms.DiscreteBAM()<line_sep>dbnet.weight=np.array([[0 1] [1 0]])<line_sep>dbnet.train(np.array([0 1 1]) np.array([0 1]))<block_end><block_end><def_stmt>test_bam_X_validation self<block_start>dbnet=algorithms.DiscreteBAM()<line_sep>dbnet.weight=np.array([[0 1] [1 0]])<with_stmt>self.assertRaises(ValueError)# Invalid discrete input values <block_start>dbnet.train(np.array([-1 1]) np.array([0 1]))<block_end><with_stmt>self.assertRaises(ValueError)<block_start>dbnet.train(np.array([0 1]) np.array([-1 1]))<block_end><with_stmt>self.assertRaises(ValueError)<block_start>dbnet.energy(np.array([-1 1]) np.array([0 1]))<block_end><with_stmt>self.assertRaises(ValueError)<block_start>dbnet.energy(np.array([0 1]) np.array([-1 1]))<block_end><with_stmt>self.assertRaises(ValueError)<block_start>dbnet.predict(np.array([-1 1]))<block_end><block_end><def_stmt>test_discrete_bam_storage self<block_start>network=algorithms.DiscreteBAM(mode='sync')<line_sep>network.train(self.data self.hints)<line_sep>stored_network=pickle.dumps(network)<line_sep>loaded_network=pickle.loads(stored_network)<line_sep>network_prediction=network.predict(self.data)<line_sep>loaded_network_prediction=loaded_network.predict(self.data)<line_sep>np.testing.assert_array_almost_equal(loaded_network_prediction[0] network_prediction[0])<line_sep>np.testing.assert_array_almost_equal(loaded_network_prediction[1] network_prediction[1])<block_end><def_stmt>test_discrete_bam_sync self<block_start>bamnet=algorithms.DiscreteBAM(mode='sync')<line_sep>bamnet.train(self.data self.hints)<line_sep>data_before=self.data.copy()<line_sep>hints_before=self.hints.copy()<line_sep>np.testing.assert_array_almost_equal(bamnet.predict(half_zero)[1] zero_hint)<line_sep>np.testing.assert_array_almost_equal(bamnet.predict_output(half_one)[1] one_hint)<line_sep>np.testing.assert_array_almost_equal(bamnet.predict_input(zero_hint)[0] zero)<line_sep>np.testing.assert_array_almost_equal(bamnet.predict_input(one_hint)[0] one)<line_sep># Test 1d input array prediction np.testing.assert_array_almost_equal(bamnet.predict_input(one_hint.ravel())[0] one)<line_sep># Test 1d output array input prediction np.testing.assert_array_almost_equal(bamnet.predict_output(half_one.ravel())[1] one_hint)<line_sep># Test multiple input values prediction input_matrix=np.vstack([one zero])<line_sep>output_matrix=np.vstack([one_hint zero_hint])<line_sep>output_matrix_before=output_matrix.copy()<line_sep>input_matrix_before=input_matrix.copy()<line_sep>np.testing.assert_array_almost_equal(bamnet.predict_input(output_matrix)[0] input_matrix)<line_sep>np.testing.assert_array_almost_equal(bamnet.predict(input_matrix)[1] output_matrix)<line_sep>np.testing.assert_array_equal(self.data data_before)<line_sep>np.testing.assert_array_equal(self.hints hints_before)<line_sep>np.testing.assert_array_equal(output_matrix output_matrix_before)<line_sep>np.testing.assert_array_equal(input_matrix input_matrix_before)<block_end><def_stmt>test_discrete_bam_async self<block_start>bamnet=algorithms.DiscreteBAM(mode='async' n_times=400)<line_sep>data_before=self.data.copy()<line_sep>hints_before=self.hints.copy()<line_sep>bamnet.train(self.data self.hints)<line_sep>input_matrix=np.vstack([one zero])<line_sep>output_matrix=np.vstack([one_hint zero_hint])<line_sep>output_matrix_before=output_matrix.copy()<line_sep>input_matrix_before=input_matrix.copy()<line_sep>np.testing.assert_array_almost_equal(bamnet.predict_input(output_matrix)[0] input_matrix)<line_sep>np.testing.assert_array_almost_equal(bamnet.predict_output(input_matrix)[1] output_matrix)<line_sep>np.testing.assert_array_equal(self.data data_before)<line_sep>np.testing.assert_array_equal(self.hints hints_before)<line_sep>np.testing.assert_array_equal(output_matrix output_matrix_before)<line_sep>np.testing.assert_array_equal(input_matrix input_matrix_before)<block_end><def_stmt>test_bam_argument_in_predict_method self<block_start>dbnet=algorithms.DiscreteBAM(mode='async' n_times=1)<line_sep>dbnet.train(self.data self.hints)<line_sep>self.assertTrue(np.any(one<ne>dbnet.predict_output(half_one)[0]))<line_sep>np.testing.assert_array_almost_equal(one dbnet.predict_output(half_one n_times=100)[0])<block_end><def_stmt>test_bam_energy_function self<block_start>input_vector=np.array([[1 0 0 1 1 0 0]])<line_sep>output_vector=np.array([[1 0]])<line_sep>dbnet=algorithms.DiscreteBAM()<line_sep>dbnet.train(input_vector output_vector)<line_sep>self.assertEqual(-7 dbnet.energy(input_vector output_vector))<line_sep>self.assertEqual(0 dbnet.energy(np.array([[0 0 0 0 0 0 0]]) np.array([[0 0]])))<line_sep>self.assertEqual(-7 dbnet.energy(np.array([[0 1 1 0 0 1 1]]) np.array([[0 1]])))<line_sep># Test 1d array self.assertEqual(-7 dbnet.energy(np.array([0 1 1 0 0 1 1]) np.array([0 1])))<line_sep># Test multiple input values energy calculation np.testing.assert_array_almost_equal(np.array([-7 0]) dbnet.energy(np.array([[0 1 1 0 0 1 1] [0 0 0 0 0 0 0] ]) np.array([[0 1] [0 0] ])))<block_end><def_stmt>test_bam_train_different_inputs self<block_start>self.assertInvalidVectorTrain(algorithms.DiscreteBAM() np.array([1 0 0 1]) np.array([1 0]) is_feature1d=<false>)<block_end><def_stmt>test_bam_predict_different_inputs self<block_start>bamnet=algorithms.DiscreteBAM()<line_sep>data=np.array([[1 0 0 1]])<line_sep>target=np.array([[1 0]])<line_sep>bamnet.train(data target)<line_sep>test_vectors=vectors_for_testing(data.reshape(data.size) is_feature1d=<false>)<for_stmt>test_vector test_vectors<block_start>np.testing.assert_array_almost_equal(bamnet.predict(test_vector)[1] target)<block_end><block_end><block_end>
"""Implementation of magic functions that control various automatic behaviors. """<line_sep>#----------------------------------------------------------------------------- # Copyright (c) 2012 The IPython Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. #----------------------------------------------------------------------------- #----------------------------------------------------------------------------- # Imports #----------------------------------------------------------------------------- # Our own packages <import_from_stmt>IPython.core.magic Bunch Magics magics_class line_magic<import_from_stmt>IPython.testing.skipdoctest skip_doctest<import_from_stmt>logging error<line_sep>#----------------------------------------------------------------------------- # Magic implementation classes #----------------------------------------------------------------------------- @magics_class<class_stmt>AutoMagics(Magics)<block_start>"""Magics that control various autoX behaviors."""<def_stmt>__init__ self shell<block_start>super(AutoMagics self).__init__(shell)<line_sep># namespace for holding state we may need self._magic_state=Bunch()<block_end>@line_magic<def_stmt>automagic self parameter_s=''<block_start>"""Make magic functions callable without having to type the initial %. Without arguments toggles on/off (when off, you must call it as %automagic, of course). With arguments it sets the value, and you can use any of (case insensitive): - on, 1, True: to activate - off, 0, False: to deactivate. Note that magic functions have lowest priority, so if there's a variable whose name collides with that of a magic fn, automagic won't work for that function (you get the variable instead). However, if you delete the variable (del var), the previously shadowed magic function becomes visible to automagic again."""<line_sep>arg=parameter_s.lower()<line_sep>mman=self.shell.magics_manager<if_stmt>arg<in>('on' '1' 'true')<block_start>val=<true><block_end><elif_stmt>arg<in>('off' '0' 'false')<block_start>val=<false><block_end><else_stmt><block_start>val=<not>mman.auto_magic<block_end>mman.auto_magic=val<line_sep>print('\n'+self.shell.magics_manager.auto_status())<block_end>@skip_doctest@line_magic<def_stmt>autocall self parameter_s=''<block_start>"""Make functions callable without having to type parentheses. Usage: %autocall [mode] The mode can be one of: 0->Off, 1->Smart, 2->Full. If not given, the value is toggled on and off (remembering the previous state). In more detail, these values mean: 0 -> fully disabled 1 -> active, but do not apply if there are no arguments on the line. In this mode, you get:: In [1]: callable Out[1]: <built-in function callable> In [2]: callable 'hello' ------> callable('hello') Out[2]: False 2 -> Active always. Even if no arguments are present, the callable object is called:: In [2]: float ------> float() Out[2]: 0.0 Note that even with autocall off, you can still use '/' at the start of a line to treat the first argument on the command line as a function and add parentheses to it:: In [8]: /str 43 ------> str(43) Out[8]: '43' # all-random (note for auto-testing) """<if_stmt>parameter_s<block_start>arg=int(parameter_s)<block_end><else_stmt><block_start>arg='toggle'<block_end><if_stmt><not>arg<in>(0 1 2 'toggle')<block_start>error('Valid modes: (0->Off, 1->Smart, 2->Full')<line_sep><return><block_end><if_stmt>arg<in>(0 1 2)<block_start>self.shell.autocall=arg<block_end><else_stmt># toggle <block_start><if_stmt>self.shell.autocall<block_start>self._magic_state.autocall_save=self.shell.autocall<line_sep>self.shell.autocall=0<block_end><else_stmt><block_start><try_stmt><block_start>self.shell.autocall=self._magic_state.autocall_save<block_end><except_stmt>AttributeError<block_start>self.shell.autocall=self._magic_state.autocall_save=1<block_end><block_end><block_end>print("Automatic calling is:" ['OFF' 'Smart' 'Full'][self.shell.autocall])<block_end><block_end>
# Python Standard Library Imports <import_stmt>base64<import_stmt>hashlib<import_stmt>hmac<import_stmt>json<line_sep># HTK Imports <import_from_stmt>htk.utils htk_setting<import_from_stmt>htk.utils.general resolve_method_dynamically<def_stmt>validate_webhook_request request<block_start>"""Validates a 321Forms webhook request Returns a JSON request body if it is valid Otherwise, returns None """<line_sep>webhook_data=json.loads(request.body)<line_sep>company_id=webhook_data.get('company' {}).get('id')<line_sep>headers=request.META<line_sep>expected_signature=headers.get('HTTP_X_ONBOARDING_SIGNATURE' '')<line_sep>hash_key_retriever=resolve_method_dynamically(htk_setting('HTK_321FORMS_WEBHOOK_HASH_KEY_RETRIEVER'))<line_sep>hash_key=hash_key_retriever(company_id)<line_sep>signature=base64.b64encode(hmac.new(bytes(hash_key) request.body digestmod=hashlib.sha1).digest())<line_sep>is_valid=signature<eq>expected_signature<if_stmt>is_valid<block_start>webhook_data=webhook_data<block_end><else_stmt><block_start>webhook_data=<none><block_end><return>webhook_data<block_end><def_stmt>handle_webhook_request webhook_data<block_start>topic=webhook_data.get('topic' <none>)<line_sep>event_handlers=htk_setting('HTK_321FORMS_WEBHOOK_EVENT_HANDLERS')<line_sep>event_handler_method=event_handlers.get(topic)<line_sep>event_handler=resolve_method_dynamically(event_handler_method)<if>event_handler_method<else><none><if_stmt>event_handler<block_start>event_handler(webhook_data)<block_end><else_stmt><block_start><pass><block_end><block_end>
<import_stmt>socket<import_stmt>os<import_from_stmt>playsound playsound<import_from_stmt>pydub AudioSegment<def_stmt>sendToClient msg<block_start>msg=msg.decode('utf-8')<line_sep>lang=msg[:3]# ITA or ENG msg=msg[3:]# actual message words=msg.split(" ")<if_stmt>len(words)<g>18<block_start>sentences=[]<line_sep>sentence=""<for_stmt>i range(len(words))<block_start>sentence<augadd>words[i]+" "<if_stmt>i%12<eq>0<and>i<ne>0<block_start>sentences.append(sentence)<line_sep>sentence=""<block_end><elif_stmt>i<eq>len(words)-1<block_start>sentences.append(sentence)<block_end><block_end><with_stmt>open('harvard_sentences.txt' 'w')<as>f<block_start>first=<true><for_stmt>i,sentence enumerate(sentences start=1)<block_start><if_stmt>first<block_start>f.write("first line\n1. "+str(sentence)+"\n")<line_sep>first=<false><block_end><else_stmt><block_start>f.write(f"{i}. {str(sentence)}\n")<block_end><block_end><block_end>num_sentences=len(sentences)<block_end><else_stmt><block_start><with_stmt>open('harvard_sentences.txt' 'w')<as>f<block_start>f.write("first line\n1. "+str(msg)+"\n")<block_end>num_sentences=1<block_end>os.system('python synthesize.py '+lang)<line_sep>sounds=0<for_stmt>i range(0 num_sentences)<block_start>sounds<augadd>AudioSegment.from_wav(f"samples/{i+1}.wav")<block_end># increase volume by 10dB sounds<augadd>10<line_sep>sounds.export("backup/final.wav" format="wav")<line_sep>f.close()<with_stmt>open('backup/final.wav' 'rb')<as>f<block_start>audiob=f.read()<block_end>clientsocket.send(audiob)<line_sep>clientsocket.close()<line_sep>f.close()<block_end><if_stmt>__name__<eq>'__main__'<block_start>s=socket.socket(socket.AF_INET socket.SOCK_STREAM)<line_sep>s.bind(("0.0.0.0" 1234))<line_sep>s.listen(5)<while_stmt><true><block_start>print("Waiting for connection...")<line_sep>clientsocket,address=s.accept()<line_sep>print(f"Connection from {address} has been established")<line_sep>msg=clientsocket.recv(2048)<line_sep>print(msg)<line_sep>sendToClient(msg)<block_end><block_end>
<import_stmt>json<import_from_stmt>django.test TestCase RequestFactory<import_from_stmt>django.utils six<import_from_stmt>django.core.exceptions ImproperlyConfigured<import_from_stmt>.views ChartView<import_from_stmt>. Chart<import_from_stmt>.config Title Legend Tooltips Hover InteractionModes Animation Element ElementArc Axes ScaleLabel Tick rgba <class_stmt>LineChart(Chart)<block_start>chart_type='line'<line_sep>title=Title(text='Test Title Line')<line_sep>legend=Legend(display=<false>)<line_sep>tooltips=Tooltips(enabled=<false>)<line_sep>hover=Hover(mode='default')<line_sep>animation=Animation(duration=1.0)<line_sep>scales={'xAxes':[Axes(display=<false> type='time' position='bottom')] 'yAxes':[Axes(type='linear' position='left' scaleLabel=ScaleLabel(fontColor='#fff') ticks=Tick(fontColor='#fff'))] }<def_stmt>get_datasets self *args **kwargs<block_start>data=[1 2 3 4 5 6 7 8 9]<line_sep><return>[dict(label='Test Line Chart' data=data)]<block_end><block_end><class_stmt>LineChartParameterized(LineChart)<block_start><def_stmt>get_datasets self currency_type<block_start>eur_data=list(range(10))<line_sep>do_data=list(range(10 20))<if_stmt>currency_type<eq>'euro'<block_start><return>[dict(label='Euro Chart' data=eur_data)]<block_end><elif_stmt>currency_type<eq>'dollar'<block_start><return>[dict(label='Dollar Chart' data=do_data)]<block_end><raise>ValueError('Unkown currency type: {}'.format(currency_type))<block_end><block_end><class_stmt>LineChartUnresponsive(LineChart)<block_start>responsive=<false><block_end><class_stmt>BarChart(Chart)<block_start>chart_type='radar'<line_sep>title=Title(text='Test Title')<def_stmt>get_datasets self *args **kwargs<block_start>data=[]<line_sep><return>[dict(label='Test Radar Chart' data=data)]<block_end><block_end><class_stmt>PolarChart(Chart)<block_start>chart_type='polarArea'<line_sep>title=Title(text='Test Title')<def_stmt>get_datasets self *args **kwargs<block_start>data=[]<line_sep><return>[dict(label='Test Polar Chart' data=data)]<block_end><block_end><class_stmt>RadarChart(Chart)<block_start>chart_type='bar'<line_sep>title=Title(text='Test Title')<def_stmt>get_datasets self *args **kwargs<block_start>data=[]<line_sep><return>[dict(label='Test Line Chart' data=data)]<block_end><block_end><class_stmt>PieChart(Chart)<block_start>chart_type='pie'<line_sep>title=Title(text='Test Title')<def_stmt>get_datasets self *args **kwargs<block_start>data=[]<line_sep><return>[dict(label='Test Pie Chart' data=data)]<block_end><block_end><class_stmt>BubbleChart(Chart)<block_start>chart_type='bubble'<line_sep>title=Title(text='Test Title')<def_stmt>get_datasets self *args **kwargs<block_start>data=[]<line_sep><return>[dict(label='Test Bubble Chart' data=data)]<block_end><block_end><class_stmt>OptionsChart(Chart)<block_start>chart_type='line'<line_sep>title=Title(text='Precendence')<line_sep>options={'title':Title(text='Overriden') 'responsive':<true> 'maintainAspectRatio':<true> }<def_stmt>get_datasets self *args **kwargs<block_start>data=[1 2 3 4 5 6 7 8 9]<line_sep><return>[dict(label='Test Line Chart' data=data)]<block_end><block_end><class_stmt>ChartViewTestToolkit(TestCase)<block_start>classes=<none><line_sep>url_kwargs={}<line_sep>@property<def_stmt>request self<block_start>request_factory=RequestFactory()<line_sep><return>request_factory.get('/test-url')<block_end>@property<def_stmt>responses self<block_start><for_stmt>klass self.classes<block_start><yield>ChartView.from_chart(klass())(self.request **self.url_kwargs)<block_end><block_end><block_end><class_stmt>ChartViewTestToolkitSolo(ChartViewTestToolkit)<block_start>klass=<none><line_sep>url_kwargs={}<line_sep>@property<def_stmt>response self<block_start><return>ChartView.from_chart(self.klass())(self.request **self.url_kwargs)<line_sep><return>self.klass.as_view()(self.request)<block_end>@property<def_stmt>data self<block_start>charset=getattr(self.response 'charset' 'utf-8')<line_sep>data=self.response.content.decode(charset)<line_sep><return>json.loads(data)<block_end><block_end><class_stmt>ChartResponseTestCase(ChartViewTestToolkit)<block_start>classes=(LineChart BarChart PolarChart RadarChart PieChart BubbleChart )<def_stmt>test_status_code self<block_start><for_stmt>response self.responses<block_start>self.assertEquals(response.status_code 200)<block_end><block_end><def_stmt>test_content_type self<block_start><for_stmt>response self.responses<block_start>self.assertEquals(response.get('content-type') 'application/json')<block_end><block_end><def_stmt>test_chart_config self<block_start><for_stmt>response self.responses<block_start>charset=getattr(response 'charset' 'utf-8')<line_sep>content=response.content.decode(charset)<line_sep>data=json.loads(content)<line_sep>self.assertIn('data' data)<line_sep>self.assertIn('options' data)<line_sep>self.assertIn('type' data)<line_sep>self.assertTrue(isinstance(data['data'] dict))<line_sep>self.assertTrue(isinstance(data['options'] dict))<line_sep>self.assertTrue(isinstance(data['type'] (six.string_types six.text_type)))<line_sep>self.assertIn(data['type'] ['bar' 'line' 'radar' 'polarArea' 'pie' 'bubble'])<line_sep>self.assertIn('title' data['options'])<block_end><block_end><block_end><class_stmt>LineChartTestCase(ChartViewTestToolkitSolo)<block_start>klass=LineChart<def_stmt>test_title self<block_start>self.assertEquals(self.data['options']['title']['text'] 'Test Title Line')<block_end><def_stmt>test_legend self<block_start>self.assertEquals(self.data['options']['legend']['display'] <false>)<block_end><def_stmt>test_tooltips self<block_start>self.assertEquals(self.data['options']['tooltips']['enabled'] <false>)<block_end><def_stmt>test_hover self<block_start>self.assertEquals(self.data['options']['hover']['mode'] 'default')<block_end><def_stmt>test_animation self<block_start>self.assertEquals(self.data['options']['animation']['duration'] 1.0)<block_end><def_stmt>test_dataset self<block_start>self.assertEquals(len(self.data['data']['datasets']) 1)<line_sep>self.assertEquals(len(self.data['data']['labels']) 0)<line_sep>self.assertEquals(self.data['data']['datasets'][0]['data'] list(range(1 10)))<block_end><block_end><class_stmt>TestConfigADTS(TestCase)<block_start><def_stmt>test_rgba self<block_start>self.assertEquals(rgba(255 255 255) 'rgba(255,255,255,1.0)')<line_sep>self.assertEquals(rgba(255 255 255 0.0) 'rgba(255,255,255,0.0)')<block_end><def_stmt>test_title self<block_start>title=Title(text='Hello World')<line_sep>self.assertTrue(isinstance(title dict))<line_sep>self.assertRaises(ValueError <lambda>:Title(nonsense='something'))<block_end><def_stmt>test_legend self<block_start>title=Legend(display=<false>)<line_sep>self.assertTrue(isinstance(title dict))<line_sep>self.assertRaises(ValueError <lambda>:Legend(nonsense='something'))<block_end><def_stmt>test_tooltips self<block_start>title=Tooltips(enabled=<true>)<line_sep>self.assertTrue(isinstance(title dict))<line_sep>self.assertRaises(ValueError <lambda>:Tooltips(nonsense='something'))<block_end><def_stmt>test_hover self<block_start>title=Hover(mode='default')<line_sep>self.assertTrue(isinstance(title dict))<line_sep>self.assertRaises(ValueError <lambda>:Hover(nonsense='something'))<block_end><def_stmt>test_interaction_modes self<block_start>title=InteractionModes(label='Hello World')<line_sep>self.assertTrue(isinstance(title dict))<line_sep>self.assertRaises(ValueError <lambda>:InteractionModes(nonsense='something'))<block_end><def_stmt>test_animation self<block_start>title=Animation(duration=1.0)<line_sep>self.assertTrue(isinstance(title dict))<line_sep>self.assertRaises(ValueError <lambda>:Animation(nonsense='something'))<block_end><def_stmt>test_element self<block_start>arc=ElementArc(borderColor=rgba(255 255 255 1))<line_sep>title=Element(arc=arc)<line_sep>self.assertTrue(isinstance(title dict))<line_sep>self.assertRaises(ValueError <lambda>:Element(nonsense='something'))<block_end><def_stmt>test_scales self<block_start>axes=Axes(type='linear' position='left' scaleLabel=ScaleLabel(fontColor='#fff') ticks=Tick(fontColor='#fff'))<line_sep>self.assertTrue(isinstance(axes dict))<line_sep>self.assertRaises(ValueError <lambda>:Axes(nonsense='something'))<block_end><block_end><class_stmt>ChartViewTestCase(TestCase)<block_start><def_stmt>test_chart_view self<block_start>self.assertTrue(getattr(ChartView 'from_chart' <false>))<line_sep>self.assertRaises(ImproperlyConfigured <lambda>:ChartView())<block_end><def_stmt>test_chart_view_from_chart_classonly self<block_start>ChartViewSubClass=type('ChartViewSubClass' (ChartView ) {'chart_instance':LineChart()})<line_sep>chart_view=ChartViewSubClass()<line_sep>self.assertRaises(AttributeError <lambda>:chart_view.from_chart(LineChart()))<block_end><def_stmt>test_chart_view_from_chart self<block_start>self.assertRaises(ImproperlyConfigured <lambda>:ChartView.from_chart(dict()))<line_sep>self.assertRaises(ImproperlyConfigured <lambda>:ChartView.from_chart(LineChart))<line_sep>ChartView.from_chart(LineChart())<block_end><def_stmt>test_chart_view_get self<block_start>ChartViewSubClass=type('ChartViewSubClass' (ChartView ) {'chart_instance':LineChart()})<line_sep>chart_view=ChartViewSubClass()<line_sep>request_factory=RequestFactory()<line_sep>request=request_factory.get('/test-url')<line_sep>response=chart_view.get(request)<line_sep>self.assertEquals(response.status_code 200)<line_sep>charset=getattr(response 'charset' 'utf-8')<line_sep>content=response.content.decode(charset)<line_sep>data=json.loads(content)<line_sep>self.assertIn('data' data)<line_sep>self.assertIn('options' data)<line_sep>self.assertIn('type' data)<line_sep>self.assertTrue(isinstance(data['data'] dict))<line_sep>self.assertTrue(isinstance(data['options'] dict))<line_sep>self.assertTrue(isinstance(data['type'] (six.string_types six.text_type)))<line_sep>self.assertIn(data['type'] ['bar' 'line' 'radar' 'polarArea' 'pie' 'bubble'])<line_sep>self.assertIn('title' data['options'])<block_end><block_end><class_stmt>ChartTestCase(TestCase)<block_start><def_stmt>test_chart_dimension self<block_start>line_chart=LineChartUnresponsive(width=1000 height=500)<line_sep>self.assertEquals(line_chart.width 1000)<line_sep>self.assertEquals(line_chart.height 500)<line_sep>self.assertIn('height: 500px' line_chart.as_html())<line_sep>self.assertIn('width: 1000px' line_chart.as_html())<block_end><def_stmt>test_chart_no_dimension self<block_start>line_chart=LineChart()<line_sep>self.assertEquals(line_chart.width <none>)<line_sep>self.assertEquals(line_chart.height <none>)<line_sep>self.assertNotIn('height:' line_chart.as_html())<line_sep>self.assertNotIn('width:' line_chart.as_html())<block_end><def_stmt>test_chart_html_id self<block_start>line_chart=LineChart(html_id='test-id')<line_sep>self.assertIn('id="test-id"' line_chart.as_html())<block_end><def_stmt>test_chart_render_html self<block_start>line_chart=LineChart()<line_sep>context={'html_id':'test-id' 'chart':line_chart 'chart_configuration':line_chart.get_configuration() }<line_sep>html=line_chart.render_html(context)<line_sep>self.assertNotIn('<script' html)<block_end><def_stmt>test_chart_render_js self<block_start>line_chart=LineChart()<line_sep>context={'html_id':'test-id' 'chart':line_chart 'chart_configuration':line_chart.get_configuration() }<line_sep>js=line_chart.render_js(context)<line_sep>self.assertNotIn('<canvas' js)<block_end><def_stmt>test_responsive_height_width self<block_start>LineChartUnresponsive(height=500)<line_sep>self.assertRaises(ImproperlyConfigured <lambda>:LineChart(height=500))<block_end><def_stmt>test_chart_parameterization self<block_start>chart=LineChartParameterized()<line_sep>self.assertNotIn('Dollar Chart' chart.as_html('euro'))<line_sep>self.assertIn('Euro Chart' chart.as_html('euro'))<line_sep>self.assertNotIn('Euro Chart' chart.as_html('dollar'))<line_sep>self.assertIn('Dollar Chart' chart.as_html('dollar'))<block_end><block_end><class_stmt>AsyncChartParameterization(ChartViewTestToolkitSolo)<block_start>klass=LineChartParameterized<def_stmt>test_euro self<block_start>self.url_kwargs=dict(currency_type='euro')<line_sep>self.assertEquals('Euro Chart' self.data['data']['datasets'][0]['label'])<block_end><def_stmt>test_dollar self<block_start>self.url_kwargs=dict(currency_type='dollar')<line_sep>self.assertEquals('Dollar Chart' self.data['data']['datasets'][0]['label'])<block_end><block_end><class_stmt>OptionsChartTestCase(ChartViewTestToolkitSolo)<block_start>klass=OptionsChart<def_stmt>test_precedence self<block_start>title=self.data['options']['title']['text']<line_sep>responsive=self.data['options']['responsive']<line_sep>maintainAspectRatio=self.data['options']['maintainAspectRatio']<line_sep>self.assertEquals('Precendence' title)<line_sep>self.assertTrue(responsive)<line_sep>self.assertTrue(maintainAspectRatio)<block_end><block_end>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- <try_stmt><block_start><import_from_stmt>._models_py3 Association<import_from_stmt>._models_py3 AssociationsList<import_from_stmt>._models_py3 CustomRPActionRouteDefinition<import_from_stmt>._models_py3 CustomRPManifest<import_from_stmt>._models_py3 CustomRPResourceTypeRouteDefinition<import_from_stmt>._models_py3 CustomRPRouteDefinition<import_from_stmt>._models_py3 CustomRPValidations<import_from_stmt>._models_py3 ErrorDefinition<import_from_stmt>._models_py3 ErrorResponse<import_from_stmt>._models_py3 ListByCustomRPManifest<import_from_stmt>._models_py3 Resource<import_from_stmt>._models_py3 ResourceProviderOperation<import_from_stmt>._models_py3 ResourceProviderOperationDisplay<import_from_stmt>._models_py3 ResourceProviderOperationList<import_from_stmt>._models_py3 ResourceProvidersUpdate<block_end><except_stmt>(SyntaxError ImportError)<block_start><import_from_stmt>._models Association# type: ignore <import_from_stmt>._models AssociationsList# type: ignore <import_from_stmt>._models CustomRPActionRouteDefinition# type: ignore <import_from_stmt>._models CustomRPManifest# type: ignore <import_from_stmt>._models CustomRPResourceTypeRouteDefinition# type: ignore <import_from_stmt>._models CustomRPRouteDefinition# type: ignore <import_from_stmt>._models CustomRPValidations# type: ignore <import_from_stmt>._models ErrorDefinition# type: ignore <import_from_stmt>._models ErrorResponse# type: ignore <import_from_stmt>._models ListByCustomRPManifest# type: ignore <import_from_stmt>._models Resource# type: ignore <import_from_stmt>._models ResourceProviderOperation# type: ignore <import_from_stmt>._models ResourceProviderOperationDisplay# type: ignore <import_from_stmt>._models ResourceProviderOperationList# type: ignore <import_from_stmt>._models ResourceProvidersUpdate<block_end># type: ignore <import_from_stmt>._customproviders_enums ActionRouting ProvisioningState ResourceTypeRouting ValidationType <line_sep>__all__=['Association' 'AssociationsList' 'CustomRPActionRouteDefinition' 'CustomRPManifest' 'CustomRPResourceTypeRouteDefinition' 'CustomRPRouteDefinition' 'CustomRPValidations' 'ErrorDefinition' 'ErrorResponse' 'ListByCustomRPManifest' 'Resource' 'ResourceProviderOperation' 'ResourceProviderOperationDisplay' 'ResourceProviderOperationList' 'ResourceProvidersUpdate' 'ActionRouting' 'ProvisioningState' 'ResourceTypeRouting' 'ValidationType' ]<line_sep>
# -*- coding: utf-8 -*- """ wakatime.main ~~~~~~~~~~~~~ Module entry point. :copyright: (c) 2013 <NAME>. :license: BSD, see LICENSE for more details. """<import_from_future_stmt> print_function<import_stmt>logging<import_stmt>os<import_stmt>sys<import_stmt>time<import_stmt>traceback<line_sep>pwd=os.path.dirname(os.path.abspath(__file__))<line_sep>sys.path.insert(0 os.path.dirname(pwd))<line_sep>sys.path.insert(0 os.path.join(pwd 'packages'))<import_from_stmt>.__about__ __version__<import_from_stmt>.api send_heartbeats<import_from_stmt>.arguments parse_arguments<import_from_stmt>.compat u json<import_from_stmt>.constants SUCCESS UNKNOWN_ERROR HEARTBEATS_PER_REQUEST<import_from_stmt>.logger setup_logging<line_sep>log=logging.getLogger('WakaTime')<import_from_stmt>.heartbeat Heartbeat<import_from_stmt>.offlinequeue Queue<def_stmt>execute argv=<none><block_start><if_stmt>argv<block_start>sys.argv=['wakatime']+argv<block_end>args,configs=parse_arguments()<line_sep>setup_logging(args __version__)<try_stmt><block_start>heartbeats=[]<line_sep>hb=Heartbeat(vars(args) args configs)<if_stmt>hb<block_start>heartbeats.append(hb)<block_end><else_stmt><block_start>log.debug(hb.skip)<block_end><if_stmt>args.extra_heartbeats<block_start><try_stmt><block_start><for_stmt>extra_data json.loads(sys.stdin.readline())<block_start>hb=Heartbeat(extra_data args configs)<if_stmt>hb<block_start>heartbeats.append(hb)<block_end><else_stmt><block_start>log.debug(hb.skip)<block_end><block_end><block_end><except_stmt>json.JSONDecodeError<as>ex<block_start>log.warning(u('Malformed extra heartbeats json: {msg}').format(msg=u(ex) ))<block_end><block_end>retval=SUCCESS<while_stmt>heartbeats<block_start>retval=send_heartbeats(heartbeats[:HEARTBEATS_PER_REQUEST] args configs)<line_sep>heartbeats=heartbeats[HEARTBEATS_PER_REQUEST:]<if_stmt>retval<ne>SUCCESS<block_start><break><block_end><block_end><if_stmt>heartbeats<block_start>Queue(args configs).push_many(heartbeats)<block_end><if_stmt>retval<eq>SUCCESS<block_start>queue=Queue(args configs)<for_stmt>offline_heartbeats queue.pop_many(args.sync_offline_activity)<block_start>time.sleep(1)<line_sep>retval=send_heartbeats(offline_heartbeats args configs)<if_stmt>retval<ne>SUCCESS<block_start><break><block_end><block_end><block_end><return>retval<block_end><except_stmt><block_start>log.traceback(logging.ERROR)<line_sep>print(traceback.format_exc())<line_sep><return>UNKNOWN_ERROR<block_end><block_end>
""" Evaluate the model using Eigen split of KITTI dataset - prepare gt depth running the script https://github.com/nianticlabs/monodepth2/blob/master/export_gt_depth.py """<import_stmt>argparse<import_stmt>os<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tqdm tqdm<import_from_stmt>eval_utils compute_errors compute_scale_and_shift<import_from_stmt>network Pydnet<line_sep>os.environ["CUDA_VISIBLE_DEVICES"]="-1"<class_stmt>KITTILoader(object)<block_start><def_stmt>__init__ self params<block_start>self.params=params<line_sep>self.height=params["height"]<line_sep>self.width=params["width"]<line_sep>self.data_list_file=params["data_list_file"]<line_sep>self.data_path=params["data_path"]<line_sep>self.num_workers=4<line_sep>self.data_list=np.loadtxt(self.data_list_file dtype=bytes).astype(np.str)<line_sep>self.default_img_shape=<none><block_end><def_stmt>read_and_decode self filename_queue<block_start>"""Read jpeg file from file system"""<line_sep>img0_name=tf.strings.join([self.data_path "/" filename_queue ".jpg"])<line_sep>img0=tf.image.decode_jpeg(tf.io.read_file(img0_name) channels=3)<line_sep>img0=tf.cast(img0 tf.float32)<line_sep><return>img0<block_end><def_stmt>preprocess self filename_queue<block_start>"""Prepare single image at testing time"""<line_sep>img0=self.read_and_decode(filename_queue)<line_sep>img0=tf.image.resize_images(img0 [self.height self.width] tf.image.ResizeMethod.AREA)<line_sep>img0.set_shape([self.height self.width 3])<line_sep>img0=img0/255.0<line_sep><return>img0<block_end><def_stmt>create_iterator self num_parallel_calls=4<block_start>"""Create iterator"""<line_sep>data_list=tf.convert_to_tensor(self.data_list dtype=tf.string)<line_sep>dataset=tf.data.Dataset.from_tensor_slices(data_list)<line_sep>dataset=dataset.map(self.preprocess num_parallel_calls=num_parallel_calls)<line_sep>dataset=dataset.batch(1)<line_sep>dataset=dataset.repeat()<line_sep>iterator=dataset.make_initializable_iterator()<line_sep><return>iterator<block_end><block_end><def_stmt>read_test_files test_file<arrow>list<block_start>"""Read test files from txt file"""<assert_stmt>os.path.exists(test_file)<with_stmt>open(test_file "r")<as>f<block_start>lines=f.readlines()<block_end>lines=[l.strip()<for>l lines]<line_sep><return>lines<block_end><def_stmt>run_inference opts<block_start>"""Run the model on KITTI"""<line_sep>network_params={"height":320 "width":640 "is_training":<false>}<line_sep>dataset_params={"height":320 "width":640 "data_path":opts.data_path "data_list_file":opts.data_list_file }<line_sep>dataset=KITTILoader(dataset_params)<line_sep>iterator=dataset.create_iterator()<line_sep>batch_img=iterator.get_next()<line_sep>network=Pydnet(network_params)<line_sep>predicted_idepth=network.forward(batch_img)<line_sep>predicted_idepth=tf.nn.relu(predicted_idepth)<line_sep># restore graph saver=tf.train.Saver()<line_sep>sess=tf.Session()<line_sep>sess.run(tf.compat.v1.global_variables_initializer())<line_sep>sess.run(iterator.initializer)<line_sep>saver.restore(sess opts.ckpt)<line_sep>os.makedirs(opts.dest exist_ok=<true>)<line_sep>test_images=read_test_files(opts.data_list_file)<line_sep>num_images=len(test_images)<with_stmt>tqdm(total=num_images)<as>pbar<block_start><for_stmt>i range(num_images)<block_start>idepth=sess.run(predicted_idepth)<line_sep>idepth=np.squeeze(idepth)<line_sep>min_idepth=idepth.min()<line_sep>max_idepth=idepth.max()<line_sep>norm_idepth=(idepth-min_idepth)/(max_idepth-min_idepth)<line_sep>norm_idepth<augmul>255.0<line_sep>target_path=os.path.join(opts.data_path f"{test_images[i]}.jpg")<line_sep>target=cv2.imread(target_path)<line_sep>h,w=target.shape[:2]<line_sep>norm_idepth=cv2.resize(norm_idepth (w h))<line_sep>img_path=os.path.join(opts.dest f"{str(i).zfill(4)}.png")<line_sep>cv2.imwrite(img_path (norm_idepth<times>256.0).astype(np.uint16))<line_sep>pbar.update(1)<block_end><block_end>print("Inference done!")<block_end><def_stmt>eval opts<block_start>"""Compute error metrics."""<line_sep>errors=[]<line_sep>test_images=read_test_files(opts.data_list_file)<line_sep>print("=> loading gt data")<line_sep>gt_depths=np.load(opts.gt_path fix_imports=<true> encoding="latin1" allow_pickle=<true>)["data"]<line_sep>print("=> starting evaluation")<with_stmt>tqdm(total=len(test_images))<as>pbar<block_start><for_stmt>i range(len(test_images))<block_start>target=gt_depths[i]<line_sep>pred_path=os.path.join(opts.dest f"{str(i).zfill(4)}.png")<line_sep>prediction_idepth=cv2.imread(pred_path -1)/256.0<line_sep>mask=(target<g>1e-3)&(target<l>opts.max_depth)<line_sep>target_idepth=np.zeros_like(target)<line_sep>target_idepth[mask<eq>1]=1.0/target[mask<eq>1]<line_sep>scale,shift=compute_scale_and_shift(prediction_idepth target_idepth mask)<line_sep>prediction_idepth_aligned=scale<times>prediction_idepth+shift<line_sep>disparity_cap=1.0/opts.max_depth<line_sep>prediction_idepth_aligned[prediction_idepth_aligned<l>disparity_cap]=disparity_cap<line_sep>prediciton_depth_aligned=1.0/prediction_idepth_aligned<line_sep>prediciton_depth_aligned=prediciton_depth_aligned[mask<eq>1]<line_sep>target=target[mask<eq>1]<line_sep>errors.append(compute_errors(target prediciton_depth_aligned))<line_sep>pbar.update(1)<block_end><block_end>mean_errors=np.array(errors).mean(0)<line_sep>labels=["abs_rel" "sq_rel" "rmse" "rmse_log" "a1" "a2" "a3"]<for_stmt>i range(len(labels))<block_start>print(f"{labels[i]}:{mean_errors[i]}")<block_end>print("Evaluation done!")<block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(description="Evaluate depth network on KITTI")<line_sep>parser.add_argument("--ckpt" type=str help="path to checkpoint" required=<true>)<line_sep>parser.add_argument("--data_path" type=str help="path to kitti" required=<true>)<line_sep>parser.add_argument("--gt_path" type=str help="path to gt_depths.npz" required=<true>)<line_sep>parser.add_argument("--data_list_file" type=str help="path to data list" default="test_kitti.txt")<line_sep>parser.add_argument("--dest" type=str help="prediction folder" default="kitti")<line_sep>parser.add_argument("--max_depth" type=float help="maximum depth value" default=80.0)<line_sep>opts=parser.parse_args()<line_sep>run_inference(opts)<line_sep>eval(opts)<block_end>
""" ============== Edge operators ============== Edge operators are used in image processing within edge detection algorithms. They are discrete differentiation operators, computing an approximation of the gradient of the image intensity function. """<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>skimage.data camera<import_from_stmt>skimage.filters roberts sobel sobel_h sobel_v scharr scharr_h scharr_v prewitt prewitt_v prewitt_h farid_v farid_h<line_sep>image=camera()<line_sep>edge_roberts=roberts(image)<line_sep>edge_sobel=sobel(image)<line_sep>fig,ax=plt.subplots(ncols=2 sharex=<true> sharey=<true> figsize=(8 4))<line_sep>ax[0].imshow(edge_roberts cmap=plt.cm.gray)<line_sep>ax[0].set_title('Roberts Edge Detection')<line_sep>ax[1].imshow(edge_sobel cmap=plt.cm.gray)<line_sep>ax[1].set_title('Sobel Edge Detection')<for_stmt>a ax<block_start>a.axis('off')<block_end>plt.tight_layout()<line_sep>plt.show()<line_sep>###################################################################### # Different operators compute different finite-difference approximations of # the gradient. For example, the Scharr filter results in a less rotational # variance than the Sobel filter that is in turn better than the Prewitt # filter [1]_ [2]_ [3]_. The difference between the Prewitt and Sobel filters # and the Scharr filter is illustrated below with an image that is the # discretization of a rotation- invariant continuous function. The # discrepancy between the Prewitt and Sobel filters, and the Scharr filter is # stronger for regions of the image where the direction of the gradient is # close to diagonal, and for regions with high spatial frequencies. For the # example image the differences between the filter results are very small and # the filter results are visually almost indistinguishable. # # .. [1] https://en.wikipedia.org/wiki/Sobel_operator#Alternative_operators # # .. [2] <NAME>, <NAME>, and <NAME>. Principles of filter design. # In Handbook of Computer Vision and Applications. Academic Press, # 1999. # # .. [3] https://en.wikipedia.org/wiki/Prewitt_operator x,y=np.ogrid[:100 :100]<line_sep># Rotation-invariant image with different spatial frequencies img=np.exp(1j<times>np.hypot(x y)<power>1.3/20.).real<line_sep>edge_sobel=sobel(img)<line_sep>edge_scharr=scharr(img)<line_sep>edge_prewitt=prewitt(img)<line_sep>diff_scharr_prewitt=edge_scharr-edge_prewitt<line_sep>diff_scharr_sobel=edge_scharr-edge_sobel<line_sep>max_diff=np.max(np.maximum(diff_scharr_prewitt diff_scharr_sobel))<line_sep>fig,axes=plt.subplots(nrows=2 ncols=2 sharex=<true> sharey=<true> figsize=(8 8))<line_sep>ax=axes.ravel()<line_sep>ax[0].imshow(img cmap=plt.cm.gray)<line_sep>ax[0].set_title('Original image')<line_sep>ax[1].imshow(edge_scharr cmap=plt.cm.gray)<line_sep>ax[1].set_title('Scharr Edge Detection')<line_sep>ax[2].imshow(diff_scharr_prewitt cmap=plt.cm.gray vmax=max_diff)<line_sep>ax[2].set_title('Scharr - Prewitt')<line_sep>ax[3].imshow(diff_scharr_sobel cmap=plt.cm.gray vmax=max_diff)<line_sep>ax[3].set_title('Scharr - Sobel')<for_stmt>a ax<block_start>a.axis('off')<block_end>plt.tight_layout()<line_sep>plt.show()<line_sep>###################################################################### # As in the previous example, here we illustrate the rotational invariance of # the filters. The top row shows a rotationally invariant image along with the # angle of its analytical gradient. The other two rows contain the difference # between the different gradient approximations (Sobel, Prewitt, Scharr & # Farid) and analytical gradient. # # The Farid & Simoncelli derivative filters [4]_, [5]_ are the most # rotationally invariant, but require a 5x5 kernel, which is computationally # more intensive than a 3x3 kernel. # # .. [4] <NAME>. and <NAME>., "Differentiation of discrete # multidimensional signals", IEEE Transactions on Image Processing 13(4): # 496-508, 2004. :DOI:`10.1109/TIP.2004.823819` # # .. [5] Wikipedia, "Farid and Simoncelli Derivatives." Available at: # <https://en.wikipedia.org/wiki/Image_derivatives#Farid_and_Simoncelli_Derivatives> x,y=np.mgrid[-10:10:255j -10:10:255j]<line_sep>img=np.sin(x<power>2+y<power>2)<line_sep>imgx=2<times>x<times>np.cos(x<power>2+y<power>2)<line_sep>imgy=2<times>y<times>np.cos(x<power>2+y<power>2)<def_stmt>angle dx dy<block_start><return>np.mod(np.arctan2(dy dx) np.pi)<block_end>true_angle=angle(imgx imgy)<line_sep>angle_farid=angle(farid_h(img) farid_v(img))<line_sep>angle_sobel=angle(sobel_h(img) sobel_v(img))<line_sep>angle_scharr=angle(scharr_h(img) scharr_v(img))<line_sep>angle_prewitt=angle(prewitt_h(img) prewitt_v(img))<def_stmt>diff_angle angle_1 angle_2<block_start><return>np.minimum(np.pi-np.abs(angle_1-angle_2) np.abs(angle_1-angle_2))<block_end>diff_farid=diff_angle(true_angle angle_farid)<line_sep>diff_sobel=diff_angle(true_angle angle_sobel)<line_sep>diff_scharr=diff_angle(true_angle angle_scharr)<line_sep>diff_prewitt=diff_angle(true_angle angle_prewitt)<line_sep>fig,axes=plt.subplots(nrows=3 ncols=2 sharex=<true> sharey=<true> figsize=(8 8))<line_sep>ax=axes.ravel()<line_sep>ax[0].imshow(img cmap=plt.cm.gray)<line_sep>ax[0].set_title('Original image')<line_sep>ax[1].imshow(true_angle cmap=plt.cm.hsv)<line_sep>ax[1].set_title('Analytical gradient angle')<line_sep>ax[2].imshow(diff_sobel cmap=plt.cm.inferno vmin=0 vmax=0.02)<line_sep>ax[2].set_title('Sobel error')<line_sep>ax[3].imshow(diff_prewitt cmap=plt.cm.inferno vmin=0 vmax=0.02)<line_sep>ax[3].set_title('Prewitt error')<line_sep>ax[4].imshow(diff_scharr cmap=plt.cm.inferno vmin=0 vmax=0.02)<line_sep>ax[4].set_title('Scharr error')<line_sep>cax=ax[5].imshow(diff_farid cmap=plt.cm.inferno vmin=0 vmax=0.02)<line_sep>ax[5].set_title('Farid error')<line_sep>fig.subplots_adjust(right=0.8)<line_sep>cbar_ax=fig.add_axes([0.90 0.10 0.02 0.50])<line_sep>fig.colorbar(cax cax=cbar_ax ticks=[0 0.01 0.02])<for_stmt>a ax<block_start>a.axis('off')<block_end>plt.show()<line_sep>
<import_stmt>tensorflow<as>tf<import_from_stmt>keras.models Model<import_from_stmt>deephar.layers *<import_from_stmt>deephar.utils *<def_stmt>conv_block inp kernel_size filters last_act=<true><block_start>filters1,filters2,filters3=filters<line_sep>x=conv_bn_act(inp filters1 (1 1))<line_sep>x=conv_bn_act(x filters2 kernel_size)<line_sep>x=conv_bn(x filters3 (1 1))<line_sep>shortcut=conv_bn(inp filters3 (1 1))<line_sep>x=add([x shortcut])<if_stmt>last_act<block_start>x=Activation('relu')(x)<block_end><return>x<block_end><def_stmt>identity_block inp kernel_size filters last_act=<true><block_start>filters1,filters2,filters3=filters<line_sep>x=conv_bn_act(inp filters1 (1 1))<line_sep>x=conv_bn_act(x filters2 kernel_size)<line_sep>x=conv_bn(x filters3 (1 1))<line_sep>x=add([x inp])<if_stmt>last_act<block_start>x=Activation('relu')(x)<block_end><return>x<block_end><def_stmt>stem_inception_v4 x image_div=8<block_start>"""Entry-flow network (stem) *based* on Inception_v4."""<assert_stmt>image_div<in>[4 8 16 32] 'Invalid image_div ({}).'.format(image_div)<line_sep>x=conv_bn_act(x 32 (3 3) strides=(2 2))<line_sep>x=conv_bn_act(x 32 (3 3))<if_stmt>image_div<is>32<block_start>x=MaxPooling2D((2 2))(x)<block_end>x=conv_bn_act(x 64 (3 3))<line_sep>a=conv_bn_act(x 96 (3 3) strides=(2 2))<line_sep>b=MaxPooling2D((3 3) strides=(2 2) padding='same')(x)<line_sep>x=concatenate([a b])<line_sep>a=conv_bn_act(x 64 (1 1))<line_sep>a=conv(a 96 (3 3))<line_sep>b=conv_bn_act(x 64 (1 1))<line_sep>b=conv_bn_act(b 64 (5 1))<line_sep>b=conv_bn_act(b 64 (1 5))<line_sep>b=conv(b 96 (3 3))<line_sep>x=concatenate([a b])<line_sep>x=BatchNormalization(axis=-1 scale=<false>)(x)<if_stmt>image_div<is><not>4<block_start>a=act_conv_bn(x 192 (3 3) strides=(2 2))<line_sep>b=MaxPooling2D((3 3) strides=(2 2) padding='same')(x)<line_sep>x=concatenate([a b])<block_end><if_stmt>image_div<in>[16 32]<block_start>a=act_conv_bn(x 192 (3 3) strides=(2 2))<line_sep>b=MaxPooling2D((3 3) strides=(2 2) padding='same')(x)<line_sep>x=concatenate([a b])<block_end><if_stmt>image_div<is>4<block_start>x=residual(x int_size=112 out_size=2<times>192+64 convtype='normal' name='residual0')<block_end><else_stmt><block_start>x=residual(x int_size=144 out_size=3<times>192 convtype='normal' name='residual0')<block_end><return>x<block_end><def_stmt>stem_residual_eccv x image_div=8<block_start>"""Entry-flow network (stem) *based* on ResNet ('residual' option)."""<assert_stmt>image_div<in>[4 8 16 32] 'Invalid image_div ({}).'.format(image_div)<line_sep>x=conv_bn_act(x 64 (7 7) strides=(2 2) padding='same')<line_sep>a=conv_bn_act(x 128 (3 3) padding='same')<line_sep>b=conv_bn_act(x 128 (1 1) padding='same')<line_sep>x=add([a b])<line_sep>x=MaxPooling2D((3 3) strides=(2 2) padding='same')(x)<line_sep>x=residual(x int_size=128 out_size=256 convtype='normal' name='rn0')<line_sep>x=residual(x int_size=128 out_size=256 convtype='normal' name='rn1')<if_stmt>image_div<is>4<block_start>x=residual(x out_size=256 convtype='normal' name='rn3')<block_end><else_stmt><block_start>x=MaxPooling2D((3 3) strides=(2 2) padding='same')(x)<line_sep>x=residual(x int_size=192 out_size=384 convtype='normal' name='rn3')<line_sep>x=residual(x int_size=192 out_size=384 convtype='normal' name='rn4')<if_stmt>image_div<in>[16 32]<block_start>x=MaxPooling2D((3 3) strides=(2 2) padding='same')(x)<line_sep>x=residual(x int_size=256 out_size=512 convtype='normal' name='rn5')<line_sep>x=residual(x int_size=256 out_size=512 convtype='normal' name='rn6')<if_stmt>image_div<is>32<block_start>x=MaxPooling2D((2 2) strides=(2 2) padding='same')(x)<block_end><block_end><block_end><return>x<block_end><def_stmt>reception_block x num_levels kernel_size int_size=<none> convtype='depthwise' name=<none><block_start><def_stmt>hourglass x n<block_start>up1=residual(x kernel_size=kernel_size int_size=int_size convtype=convtype)<line_sep>low=MaxPooling2D((2 2))(x)<if_stmt>n<eq>num_levels<block_start>low=act_conv_bn(low int(K.int_shape(x)[-1]/2) (1 1))<block_end>low=residual(low kernel_size=kernel_size int_size=int_size convtype=convtype)<if_stmt>n<g>2<block_start>low=hourglass(low n-1)<block_end><else_stmt><block_start>low=residual(low kernel_size=kernel_size int_size=int_size convtype=convtype)<block_end><if_stmt>n<eq>num_levels<block_start>low=residual(low kernel_size=kernel_size out_size=K.int_shape(x)[-1] int_size=int_size convtype=convtype)<block_end><else_stmt><block_start>low=residual(low kernel_size=kernel_size int_size=int_size convtype=convtype)<block_end>up2=UpSampling2D((2 2))(low)<line_sep>x=add([up1 up2])<line_sep><return>x<block_end>x=hourglass(x num_levels)<line_sep><return>x<block_end><def_stmt>build_keypoints_regressor input_shape dim num_maps sam_model prob_model name=<none> verbose=0<block_start><assert_stmt>num_maps<ge>1 'The number of maps should be at least 1 (%d given)'%num_maps<line_sep>inputs=[]<line_sep>inputs3d=[]<line_sep>p_concat=[]<line_sep>v_concat=[]<line_sep># Auxiliary functions v_tile=Lambda(<lambda>x:K.tile(x (1 1 dim)))<line_sep># This depends on TensorFlow because keras does not implement divide. tf_div=Lambda(<lambda>x:tf.divide(x[0] x[1]))<for_stmt>i range(num_maps)<block_start>h=Input(shape=input_shape)<line_sep>inputs.append(h)<line_sep>h_s=act_channel_softmax(h)<line_sep>p=sam_model(h_s)<line_sep>v=prob_model(h_s)<if_stmt>dim<eq>3<block_start>d=Input(shape=input_shape)<line_sep>inputs3d.append(d)<line_sep>d_s=Activation('sigmoid')(d)<line_sep>dm=multiply([d_s h_s])<line_sep>z=Lambda(<lambda>x:K.sum(x axis=(1 2)))(dm)<line_sep>z=Lambda(<lambda>x:K.expand_dims(x axis=-1))(z)<line_sep>p=concatenate([p z])<block_end><if_stmt>num_maps<g>1<block_start>t=v_tile(v)<line_sep>p=multiply([p v_tile(v)])<block_end>p_concat.append(p)<line_sep>v_concat.append(v)<block_end><if_stmt>num_maps<g>1<block_start>p=add(p_concat)<line_sep>v_sum=add(v_concat)<line_sep>p=tf_div([p v_tile(v_sum)])<line_sep>v=maximum(v_concat)<block_end><else_stmt><block_start>p=p_concat[0]<line_sep>v=v_concat[0]<block_end>model=Model(inputs+inputs3d [p v] name=name)<if_stmt>verbose<block_start>model.summary()<block_end><return>model<block_end><def_stmt>build_context_aggregation num_joints num_context alpha num_frames=1 name=<none><block_start>inp=Input(shape=(num_joints<times>num_context 1))<line_sep>d=Dense(num_joints use_bias=<false>)<line_sep>x=Lambda(<lambda>x:K.squeeze(x axis=-1))(inp)<line_sep>x=d(x)<line_sep>x=Lambda(<lambda>x:K.expand_dims(x axis=-1))(x)<line_sep>w=d.get_weights()<line_sep>w[0].fill(0)<for_stmt>j range(num_joints)<block_start>start=j<times>num_context<line_sep>w[0][j<times>num_context:(j+1)<times>num_context j]=1.<block_end>d.set_weights(w)<line_sep>d.trainable=<false><line_sep>ctx_sum=Model(inputs=inp outputs=x)<line_sep>ctx_sum.trainable=<false><if_stmt>num_frames<g>1<block_start>ctx_sum=TimeDistributed(ctx_sum input_shape=(num_frames )+K.int_shape(inp)[1:])<block_end># Define auxiliary layers. mul_alpha=Lambda(<lambda>x:alpha<times>x)<line_sep>mul_1alpha=Lambda(<lambda>x:(1-alpha)<times>x)<line_sep># This depends on TensorFlow because keras does not implement divide. tf_div=Lambda(<lambda>x:tf.divide(x[0] x[1]))<if_stmt>num_frames<eq>1# Define inputs <block_start>ys=Input(shape=(num_joints 2))<line_sep>yc=Input(shape=(num_joints<times>num_context 2))<line_sep>pc=Input(shape=(num_joints<times>num_context 1))<line_sep># Split contextual predictions in x and y and do computations separately xi=Lambda(<lambda>x:x[: : 0:1])(yc)<line_sep>yi=Lambda(<lambda>x:x[: : 1:2])(yc)<block_end><else_stmt><block_start>ys=Input(shape=(num_frames num_joints 2))<line_sep>yc=Input(shape=(num_frames num_joints<times>num_context 2))<line_sep>pc=Input(shape=(num_frames num_joints<times>num_context 1))<line_sep># Split contextual predictions in x and y and do computations separately xi=Lambda(<lambda>x:x[: : : 0:1])(yc)<line_sep>yi=Lambda(<lambda>x:x[: : : 1:2])(yc)<block_end>pxi=multiply([xi pc])<line_sep>pyi=multiply([yi pc])<line_sep>pc_sum=ctx_sum(pc)<line_sep>pxi_sum=ctx_sum(pxi)<line_sep>pyi_sum=ctx_sum(pyi)<line_sep>pc_div=Lambda(<lambda>x:x/num_context)(pc_sum)<line_sep>pxi_div=tf_div([pxi_sum pc_sum])<line_sep>pyi_div=tf_div([pyi_sum pc_sum])<line_sep>yc_div=concatenate([pxi_div pyi_div])<line_sep>ys_alpha=mul_alpha(ys)<line_sep>yc_div_1alpha=mul_1alpha(yc_div)<line_sep>y=add([ys_alpha yc_div_1alpha])<line_sep>model=Model(inputs=[ys yc pc] outputs=y name=name)<line_sep>model.trainable=<false><line_sep><return>model<block_end><def_stmt>build_softargmax_1d input_shape name=<none><block_start><if_stmt>name<is><none><block_start>name_sm=<none><block_end><else_stmt><block_start>name_sm=name+'_softmax'<block_end>inp=Input(shape=input_shape)<line_sep>x=act_depth_softmax(inp name=name_sm)<line_sep>x=lin_interpolation_1d(x)<line_sep>model=Model(inputs=inp outputs=x name=name)<line_sep>model.trainable=<false><line_sep><return>model<block_end><def_stmt>build_softargmax_2d input_shape rho=0. name=<none><block_start><if_stmt>name<is><none><block_start>name_sm=<none><block_end><else_stmt><block_start>name_sm=name+'_softmax'<block_end>inp=Input(shape=input_shape)<line_sep>x=act_channel_softmax(inp name=name_sm)<if_stmt>rho<g>0<block_start>x=kl_divergence_regularizer(x rho=rho)<block_end>x_x=lin_interpolation_2d(x axis=0)<line_sep>x_y=lin_interpolation_2d(x axis=1)<line_sep>x=concatenate([x_x x_y])<line_sep>model=Model(inputs=inp outputs=x name=name)<line_sep>model.trainable=<false><line_sep><return>model<block_end><def_stmt>build_joints_probability input_shape name=<none> verbose=0<block_start>inp=Input(shape=input_shape)<line_sep>x=inp<line_sep>x=AveragePooling2D((2 2) strides=(1 1))(x)<line_sep>x=Lambda(<lambda>x:4<times>x)(x)<line_sep>x=GlobalMaxPooling2D()(x)<line_sep>x=Lambda(<lambda>x:K.expand_dims(x axis=-1))(x)<line_sep>model=Model(inputs=inp outputs=x name=name)<if_stmt>verbose<block_start>model.summary()<block_end><return>model<block_end>
<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("schedule" "0001_initial")]<line_sep>operations=[migrations.AddField(model_name="event" name="color_event" field=models.CharField(verbose_name="Color event" blank=<true> max_length=10 null=<true>) )]<block_end>
<import_from_stmt>helpers.api_request request_url<import_from_stmt>config.api settings<def_stmt>get_token data<block_start>response=request_url(verb='POST' headers={'Content-Type':'application/json'} uri='authenticate/access-token-json' json=data)<if_stmt>response.get('status_code')<eq>200<block_start>result=response.get('json')<line_sep><return>result.get('access_token')<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>print(get_token(settings.CREDENTIALS_ADM))<block_end>
# -*- coding:utf-8 -*- __author__='yangjian'<line_sep>""" """<import_stmt>pandas<as>pd<import_from_stmt>deeptables.models DeepTable<import_from_stmt>deeptables.models.hyper_dt HyperDT tiny_dt_space<import_from_stmt>hypernets.core.callbacks SummaryCallback FileStorageLoggingCallback<import_from_stmt>hypernets.core.searcher OptimizeDirection<import_from_stmt>hypernets.searchers RandomSearcher<import_from_stmt>sklearn.datasets load_boston<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>.. homedir<class_stmt>Test_HyperDT_Regression()<block_start><def_stmt>test_boston self<block_start>print("Loading datasets...")<line_sep>boston_dataset=load_boston()<line_sep>df_train=pd.DataFrame(boston_dataset.data)<line_sep>df_train.columns=boston_dataset.feature_names<line_sep>self.y=pd.Series(boston_dataset.target)<line_sep>self.X=df_train<line_sep>self.X_train,self.X_test,self.y_train,self.y_test=train_test_split(self.X self.y test_size=0.2 random_state=42)<line_sep>rs=RandomSearcher(tiny_dt_space optimize_direction=OptimizeDirection.Maximize )<line_sep>hdt=HyperDT(rs callbacks=[SummaryCallback() FileStorageLoggingCallback(rs output_dir=f'{homedir}/hyn_logs')] reward_metric='RootMeanSquaredError' dnn_params={'hidden_units':((256 0 <false>) (256 0 <false>)) 'dnn_activation':'relu' } )<line_sep>hdt.search(self.X_train self.y_train self.X_test self.y_test max_trials=3)<line_sep>best_trial=hdt.get_best_trial()<line_sep>estimator=hdt.final_train(best_trial.space_sample self.X self.y)<line_sep>score=estimator.predict(self.X_test)<line_sep>result=estimator.evaluate(self.X_test self.y_test)<assert_stmt>result<assert_stmt>isinstance(estimator.model DeepTable)<block_end><block_end>
<import_from_stmt>luminaire.model.base_model BaseModel BaseModelHyperParams<import_from_stmt>luminaire.exploration.data_exploration DataExploration<class_stmt>WindowDensityHyperParams(BaseModelHyperParams)<block_start>""" Hyperparameter class for Luminaire Window density model. :param str freq: The frequency of the time-series. Luminaire supports default configuration for 'S', T, '15T', 'H', 'D'. Any other frequency type should be specified as 'custom' and configuration should be set manually. :param float max_missing_train_prop: Maximum proportion of missing observation allowed in the training data. :param bool is_log_transformed: A flag to specify whether to take a log transform of the input data. If the data contain negatives, is_log_transformed is ignored even though it is set to True. :param str baseline_type: A string flag to specify whether to take set a baseline as the previous sub-window from the training data for scoring or to aggregate the overall window as a baseline. Possible values: - "last_window" - "aggregated" :param str detection_method: A string that select between two window testing method. Possible values: - "kldiv" (KL-divergence). This is recommended to be set for high frequency time series such as 'S', 'T' etc. - "sign_test" (Wilcoxon sign rank test). This is recommended to be set for low frequency time series such as 'H', 'D' etc. :param int min_window_length: Minimum size of the scoring window / a stable training sub-window length. .. Note :: This is not the minimum size of the whole training window which is the combination of stable sub-windows. :param int max_window_length: Maximum size of the scoring window / a stable training sub-window length. .. Note :: This is not the maximum size of the whole training window which is the combination of stable sub-windows. :param int window_length: Size of the scoring window / a stable training sub-window length. .. Note :: This is not the size of the whole training window which is the combination of stable sub-windows. :param str detrend_method: A string that select between two stationarizing method. Possible values: - "ma" (moving average based) - "diff" (differencing based). """<def_stmt>__init__ self freq=<none> max_missing_train_prop=0.1 is_log_transformed=<false> baseline_type="aggregated" detection_method=<none> min_window_length=<none> max_window_length=<none> window_length=<none> detrend_method='modeling'<block_start>super(WindowDensityHyperParams self).__init__(model_name="WindowDensityModel" freq=freq max_missing_train_prop=max_missing_train_prop is_log_transformed=is_log_transformed baseline_type=baseline_type detection_method=detection_method min_window_length=min_window_length max_window_length=max_window_length window_length=window_length detrend_method=detrend_method)<block_end><block_end><class_stmt>WindowDensityModel(BaseModel)<block_start>""" This model detects anomalous windows using KL divergence (for high frequency data) and Wilcoxon sign rank test (for low frequency data). This default monitoring frequency is set to pandas time frequency type 'T'. :param dict hyper_params: Hyper parameters for Luminaire window density model. See :class:`luminaire.model.window_density.WindowDensityHyperParams` for detailed information. :return: Anomaly probability for the execution window and other related model outputs :rtype: list[dict] """<line_sep>__version__="0.1"<def_stmt>__init__ self hyper_params:WindowDensityHyperParams().params<or><none> **kwargs# Specifying the minimum and maximum number of training windows <block_start>self.min_num_train_windows=5<line_sep>self.max_num_train_windows=10000<line_sep>self.hyper_params=hyper_params<line_sep>self.sig_level=0.001<line_sep>super(WindowDensityModel self).__init__(**hyper_params **kwargs)<block_end><def_stmt>_volume_shift_detection self mean_list=<none> sd_list=<none> probability_threshold=0.5<block_start>""" This function detects any significant shift in the training data volume using a Bayesian change point detection technique. :param list mean_list: The list of means from each training sub-window. :param list sd_list: The list of standard deviations from each training sub-window. :param float probability_threshold: Threshold for the probability value to be flagged as a change point. :return: Indices with significant vdata volume shift. :rtype: int """<import_stmt>numpy<as>np<import_from_stmt>bayesian_changepoint_detection offline_changepoint_detection<as>offcd<import_from_stmt>functools partial<line_sep># Volume shift detection over the means of the training window q,p,pcp=offcd.offline_changepoint_detection(data=np.array(mean_list) prior_func=partial(offcd.const_prior l=(len(mean_list)+1)) observation_log_likelihood_function=offcd.gaussian_obs_log_likelihood truncate=-10)<line_sep>mask_mean=np.append(0 np.exp(pcp).sum(0))<g>probability_threshold<line_sep># Volume shift detection over the standard deviations of the training window change_points=np.array(mask_mean).nonzero()<line_sep>last_mean_cp=change_points[0][-1]<if>len(change_points[0])<g>0<else>[]<line_sep>q,p,pcp=offcd.offline_changepoint_detection(data=np.array(sd_list) prior_func=partial(offcd.const_prior l=(len(sd_list)+1)) observation_log_likelihood_function=offcd.gaussian_obs_log_likelihood truncate=-10)<line_sep>mask_sd=np.append(0 np.exp(pcp).sum(0))<g>probability_threshold<line_sep>change_points=np.array(mask_sd).nonzero()<line_sep>last_sd_cp=change_points[0][-1]<if>len(change_points[0])<g>0<else>[]<line_sep># Change point is the maximum obtained from mean list and the standard deviation list cdate=max(last_mean_cp last_sd_cp)<line_sep><return>cdate<block_end><def_stmt>_distance_function self data=<none> called_for=<none> baseline=<none><block_start>""" This function finds the distance of the given data from the baseline using KL divergence. :param list data: The list containing the scoring window (for scoring) / training sub-window (for training). :param str distance_method: The method to be used to calculate the distance between two datasets. :param str called_for: A flag to specify whether this function is called for training or scoring. :param list baseline: A list containing the base line to be compared with the given data. :return: KL divergence between two time windows. :rtype: float """<import_stmt>numpy<as>np<import_stmt>scipy.stats<as>stats<line_sep>float_min=1e-50<line_sep>float_max=1e50<line_sep># If called for training, Kl divergence is performed over each pair of consecutive windows to create # the past anomaly scores <if_stmt>called_for<eq>"training"<block_start>distance=[]<for_stmt>i range(0 len(data)-1)<block_start>q=stats.kde.gaussian_kde(data[i])<line_sep>p=stats.kde.gaussian_kde(data[i+1])<line_sep>ts_min=min(np.min(data[i]) np.min(data[i+1]))<line_sep>ts_max=max(np.max(data[i]) np.max(data[i+1]))<line_sep>density_domain=np.linspace(ts_min ts_max 1000)<line_sep>q=q(density_domain)<line_sep>p=p(density_domain)<line_sep># approximating the zero probability regions to avoid divide by zero issue in KL divergence q[q<eq>0]=min(np.array(q)[np.array(q)<g>0])<line_sep>p[p<eq>0]=min(np.array(p)[np.array(p)<g>0])<line_sep>q=np.clip(q float_min float_max)<line_sep>p=np.clip(p float_min float_max)<line_sep>distance.append(stats.entropy(pk=p qk=q))<block_end><block_end># If called for scoring, Kl divergence is performed between the scoring window and the baseline <elif_stmt>called_for<eq>"scoring"<block_start>q=stats.kde.gaussian_kde(baseline)<line_sep>p=stats.kde.gaussian_kde(data)<line_sep>ts_min=min(np.min(baseline) np.min(data))<line_sep>ts_max=max(np.max(baseline) np.max(data))<line_sep>density_domain=np.linspace(ts_min ts_max 1000)<line_sep>q=q(density_domain)<line_sep>p=p(density_domain)<line_sep>q[q<eq>0]=min(np.array(q)[np.array(q)<g>0])<line_sep>p[p<eq>0]=min(np.array(p)[np.array(p)<g>0])<line_sep>q=np.clip(q float_min float_max)<line_sep>p=np.clip(p float_min float_max)<line_sep>distance=stats.entropy(pk=p qk=q)<block_end><return>distance<block_end><def_stmt>_training_data_truncation self sliced_training_data=<none><block_start>""" This function performs the truncation of the training data using the _volume_shift_detection function. :param list sliced_training_data: The list containing the training data. :return: Sliced training sample based on the most recent change point :rtype: list """<import_stmt>numpy<as>np<line_sep># Change point detection is performed over the means and standard deviations of the sub windows window_means=[]<line_sep>window_sds=[]<for_stmt>ts sliced_training_data<block_start>window_means.append(np.mean(ts))<line_sep>window_sds.append(np.std(ts))<block_end>change_point=self._volume_shift_detection(mean_list=window_means sd_list=window_sds)<line_sep># Truncating the training data based on the last change point <if_stmt>change_point<block_start>sliced_training_data_truncated=sliced_training_data[change_point:]<line_sep><return>sliced_training_data_truncated<block_end><else_stmt><block_start><return>sliced_training_data<block_end><block_end><def_stmt>_call_training self df=<none> window_length=<none> imputed_metric=<none> detrend_method=<none> detection_method=<none> freq=<none> **kwargs<block_start>""" This function generates the baseline and training metrics to be used for scoring. :param pandas.DataFrame df: Input training data frame. :param int window_length: The length of a training sub-window. :param str imputed_metric: Column storing the time series values. :param str detrend_method: Detrend method "modeling" or "diff" for nonstationarity. :param str detection_method: Detection method "kldiv" or "sign_test". :param str freq: Data frequency. :return: Returns past anomaly scores based on training data, baseline and other related metrics. :rtype: tuple(list, float, float, float, int, list, luminaire.model, float, dict, list) """<import_stmt>pandas<as>pd<line_sep>past_anomaly_scores=dict()<line_sep>gamma_alpha=dict()<line_sep>gama_loc=dict()<line_sep>gamma_beta=dict()<line_sep>detrend_order=dict()<line_sep>baseline=dict()<line_sep>agg_data_model=dict()<line_sep>agg_data=dict()<line_sep>past_model=kwargs.get('past_model')<line_sep>training_start=df.first_valid_index()<line_sep>training_end=df.last_valid_index()<line_sep>current_training_end=training_end<while_stmt>(training_end-current_training_end)<l>pd.Timedelta('1D')<block_start>df_current=df[df.index<le>current_training_end]<line_sep>past_anomaly_scores_current,gamma_alpha_current,gama_loc_current,gamma_beta_current,detrend_order_current,baseline_current,agg_data_model_current,agg_data_current=self._anomalous_region_detection(input_df=df_current window_length=window_length value_column=imputed_metric called_for="training" detrend_method=detrend_method past_model=past_model detection_method=detection_method)<line_sep>past_anomaly_scores.update({str(current_training_end.time().strftime('%H:%M:%S')):past_anomaly_scores_current})<line_sep>gamma_alpha.update({str(current_training_end.time().strftime('%H:%M:%S')):float(gamma_alpha_current)<if>gamma_alpha_current<else><none>})<line_sep>gama_loc.update({str(current_training_end.time().strftime('%H:%M:%S')):float(gama_loc_current)<if>gama_loc_current<else><none>})<line_sep>gamma_beta.update({str(current_training_end.time().strftime('%H:%M:%S')):float(gamma_beta_current)<if>gamma_beta_current<else><none>})<line_sep>detrend_order.update({str(current_training_end.time().strftime('%H:%M:%S')):detrend_order_current})<line_sep>baseline.update({str(current_training_end.time().strftime('%H:%M:%S')):baseline_current})<line_sep>agg_data_model.update({str(current_training_end.time().strftime('%H:%M:%S')):agg_data_model_current})<line_sep>agg_data.update({str(current_training_end.time().strftime('%H:%M:%S')):agg_data_current})<if_stmt>isinstance(freq str)<block_start>freq=pd.Timedelta('1'+freq)<block_end>current_training_end=current_training_end-min(pd.Timedelta('30T') freq<times>10)<block_end><return>past_anomaly_scores gamma_alpha gama_loc gamma_beta detrend_order baseline agg_data_model agg_data training_start training_end<block_end><def_stmt>_get_model self input_df=<none> window_length=<none> value_column=<none> detrend_method=<none> baseline_type=<none> detection_method=<none> past_model=<none><block_start>""" This function runs the training process given the input parameters. :param pandas.DataFrame input_df: Input data containing the training and the scoring data. :param int window_length: The length of a training sub-window / scoring window. :param str value_column: Column containing the values. :param str detrend_method: Selects between "modeling" or "diff" detrend method. :param str baseline_type: Selects between "aggregated" or "last_window" baseline. :param str detection_method: Selects between "kldiv" or "sign_test" distance method. :param luminaire.model.window_density.WindowDensityModel past_model: luminaire.model to append model metadata from past :return: Returns past anomaly scores based on training data, baseline and other related metrics. :rtype: tuple(list, float, float, float, int, list, luminaire.model, float) """<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>itertools chain<import_stmt>scipy.stats<as>st<line_sep>model_history_truncation_prop=0.25# This is the proportion of history to truncate from both sides # everytime we store the past anomaly scores de_obj=DataExploration()<line_sep>sliced_training_data,agg_datetime=de_obj._partition(input_df window_length value_column)<line_sep># performing the stationarity test sliced_training_data_cleaned,detrend_order,agg_data_model,agg_data=de_obj._detrender(training_data_sliced=sliced_training_data significance_level=0.05 detrend_method=detrend_method agg_datetime=agg_datetime past_model=past_model)<line_sep># Obtain the past anomaly scores and the anomaly means and standard deviation if the detection method # is KL divergence <if_stmt>detection_method<eq>"kldiv"<block_start>past_anomaly_scores=np.array(self._distance_function(data=sliced_training_data_cleaned called_for="training"))<if_stmt>past_model<block_start>model_timestamps=list(past_model._params['PastAnomalyScores'].keys())<line_sep>training_end=input_df.index[-1]<line_sep>current_min_timedelta=pd.Timedelta('10D')<for_stmt>timestamp model_timestamps<block_start>current_datetime=pd.Timestamp(str(training_end.date())+' '+timestamp)<line_sep>temp_timedelta=training_end-current_datetime<line_sep>temp_timedelta=pd.Timedelta('1D')+temp_timedelta<if>temp_timedelta<l>pd.Timedelta(0)<else>temp_timedelta<if_stmt>temp_timedelta<l>current_min_timedelta<block_start>opt_timestamp=timestamp<line_sep>current_min_timedelta=temp_timedelta<block_end><block_end>past_anomaly_scores=np.concatenate([past_model._params['PastAnomalyScores'][opt_timestamp][int(len(past_anomaly_scores)<times>model_history_truncation_prop):-int(len(past_anomaly_scores)<times>model_history_truncation_prop)] past_anomaly_scores])<block_end><if_stmt>len(past_anomaly_scores)<l>100<block_start>alpha=[]<line_sep>loc=[]<line_sep>beta=[]<for_stmt>i range(10)<block_start>boot_scores=np.random.choice(past_anomaly_scores.tolist() size=100 replace=<true>)<line_sep>alpha_i,loc_i,beta_i=st.gamma.fit(boot_scores)<line_sep>alpha.append(alpha_i)<line_sep>loc.append(loc_i)<line_sep>beta.append(beta_i)<block_end>gamma_alpha=np.mean(alpha)<line_sep>gamma_loc=np.mean(loc)<line_sep>gamma_beta=np.mean(beta)<block_end><else_stmt><block_start>gamma_alpha,gamma_loc,gamma_beta=st.gamma.fit(past_anomaly_scores)<block_end><block_end><else_stmt><block_start>past_anomaly_scores,gamma_alpha,gamma_loc,gamma_beta=<none> <none> <none> <none><block_end># If aggregated baseline type is specified, we take the whole training window as a baseline, else we # take the last training sub window from the sliced training data <if_stmt>baseline_type<eq>"aggregated"<block_start>sliced_training_data_cleaned=self._training_data_truncation(sliced_training_data=sliced_training_data_cleaned)<if_stmt>detection_method<eq>"kldiv"<block_start>baseline=list(chain.from_iterable(sliced_training_data_cleaned))<block_end><elif_stmt>detection_method<eq>"sign_test"<block_start>baseline=sliced_training_data_cleaned<block_end><block_end><elif_stmt>baseline_type<eq>"last_window"<block_start>baseline=sliced_training_data_cleaned[-1]<block_end><return>past_anomaly_scores gamma_alpha gamma_loc gamma_beta detrend_order baseline agg_data_model agg_data<block_end><def_stmt>train self data **kwargs<block_start>""" Input time series for training. :param pandas.DataFrame data: Input time series. :return: Trained model with the training timestamp and a success flag :rtype: tuple(bool, str, python model object) >>> data raw interpolated index 2017-10-02 00:00:00 118870 118870 2017-10-02 01:00:00 121914 121914 2017-10-02 02:00:00 116097 116097 2017-10-02 03:00:00 94511 94511 2017-10-02 04:00:00 68330 68330 ... ... ... 2018-10-10 19:00:00 219908 219908 2018-10-10 20:00:00 219149 219149 2018-10-10 21:00:00 207232 207232 2018-10-10 22:00:00 198741 198741 2018-10-10 23:00:00 213751 213751 >>> hyper_params = WindowDensityHyperParams(freq='H').params >>> wdm_obj = WindowDensityModel(hyper_params=hyper_params) >>> success, model = wdm_obj.train(data) >>> success, model (True, "2018-10-10 23:00:00", <luminaire.model.window_density.WindowDensityModel object at 0x7fd7c5a34e80>) """<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<line_sep>freq=pd.Timedelta(self._params['freq'])<if>self._params['freq']<not><in>['S' 'T' '15T' 'H' 'D']<else>self._params['freq']<if_stmt>freq<in>['S' 'T' '15T' 'H' 'D']<block_start>window_length=self._params['window_length']<block_end><else_stmt><block_start>min_window_length=self._params['min_window_length']<line_sep>max_window_length=self._params['max_window_length']<line_sep>window_length=self._params['window_length']<if_stmt><not>min_window_length<or><not>max_window_length<or><not>window_length<block_start><raise>ValueError('Training window length with min and max should be specified in case frequency not in the '<concat>'specified list')<block_end><block_end>is_log_transformed=self._params['is_log_transformed']<line_sep>detrend_method=self._params['detrend_method']<line_sep>target_metric='raw'<line_sep>imputed_metric='interpolated'<if_stmt><not>self._params['detection_method']<block_start><if_stmt>freq<in>['S' 'T' '15T']<block_start>detection_method='kldiv'<block_end><elif_stmt>freq<in>['H' 'D']<block_start>detection_method='sign_test'<block_end><else_stmt><block_start>detection_method='sign_test'<if>freq<g>np.timedelta64(30 'm')<else>'kldiv'<block_end><block_end><else_stmt><block_start>detection_method=self._params['detection_method']<block_end><if_stmt>len(data)<eq>0<block_start>model={'ErrorMessage':'DataFrame length is 0'}<line_sep>success=<false><line_sep><return>success WindowDensityModel(**model)<block_end># Shift the interpolated value by +1 and get the log. This handles values with 0. <if_stmt>is_log_transformed<block_start>neg_flag=<true><if><not>data[data[target_metric]<l>0].empty<else><false><line_sep>data[imputed_metric]=data[imputed_metric]<if>neg_flag<else>np.log(data[imputed_metric]+1)<block_end>past_anomaly_scores,anomaly_scores_gamma_alpha,anomaly_scores_gamma_loc,anomaly_scores_gamma_beta,detrend_order,baseline,agg_data_model,agg_data,training_start,training_end=self._call_training(df=data window_length=window_length imputed_metric=imputed_metric detrend_method=detrend_method detection_method=detection_method freq=freq **kwargs)<line_sep>success=<true><line_sep>self.hyper_params['is_log_transformed']=is_log_transformed<line_sep>self.hyper_params['detection_method']=detection_method<line_sep>model={'TrainingStartDate':str(training_start) 'PastAnomalyScores':past_anomaly_scores 'AnomalyScoresGammaAlpha':anomaly_scores_gamma_alpha 'AnomalyScoresGammaLoc':anomaly_scores_gamma_loc 'AnomalyScoresGammaBeta':anomaly_scores_gamma_beta 'NonStationarityOrder':detrend_order 'Baseline':baseline 'AggregatedDataModel':agg_data_model 'AggregatedData':agg_data}<line_sep><return>success str(training_end) WindowDensityModel(hyper_params=self.hyper_params **model)<block_end><def_stmt>_call_scoring self df=<none> target_metric=<none> anomaly_scores_gamma_alpha=<none> anomaly_scores_gamma_loc=<none> anomaly_scores_gamma_beta=<none> baseline=<none> detrend_order=<none> detrend_method=<none> agg_data_model=<none> detection_method=<none> attributes=<none> agg_data=<none><block_start>""" This function generates the anomaly flag and and probability for the scoring window. :param pandas.DataFrame df: Input training data frame. :param str target_metric: Column storing the time series values. :param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter. :param float anomaly_scores_gamma_loc: Gamma fit location parameter. :param float anomaly_scores_gamma_beta: Gamma fit beta parameter. :param list baseline: A list storing a baseline window used to score the scoring window. :param int detrend_order: The order of detrending based on MA or differencing method. :param str detrend_method: Selects between "modeling" or "diff" detrend method. :param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data. :param str detection_method: Selects between "kldiv" or "sign_test" distance method. :param attributes: Model attributes. :param agg_data: Aggregated Data per day. :return: Returns the anomaly flag with the corresponding anomaly probability. :rtype: tuple(bool, float, dict) """<line_sep>is_anomaly,prob_of_anomaly=self._anomalous_region_detection(input_df=df value_column=target_metric called_for="scoring" anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha anomaly_scores_gamma_loc=anomaly_scores_gamma_loc anomaly_scores_gamma_beta=anomaly_scores_gamma_beta baseline=baseline detrend_order=detrend_order detrend_method=detrend_method agg_data_model=agg_data_model detection_method=detection_method agg_data=agg_data)<line_sep><return>is_anomaly prob_of_anomaly attributes<block_end><def_stmt>_get_result self input_df=<none> detrend_order=<none> agg_data_model=<none> value_column=<none> detrend_method=<none> baseline_type=<none> detection_method=<none> baseline=<none> anomaly_scores_gamma_alpha=<none> anomaly_scores_gamma_loc=<none> anomaly_scores_gamma_beta=<none> agg_data=<none><block_start>""" The function scores the scoring window for anomalies based on the training metrics and the baseline :param pandas.DataFrame input_df: Input data containing the training and the scoring data. :param int detrend_order: The non-negative order of detrending based on Modeling or differencing method. When the detrend_order > 0, corresponding detrending need to be performed using the method specified in the model config. :param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data. :param str value_column: Column containing the values. :param str detrend_method: Selects between "modeling" or "diff" detrend method. :param str baseline_type: Selects between "aggregated" or "last_window" baseline. :param str detection_method: Selects between "kldiv" or "sign_test" distance method. :param list baseline: A list storing a baseline window used to score the scoring window. :param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter. :param float anomaly_scores_gamma_loc: Gamma fit location parameter. :param float anomaly_scores_gamma_beta: Gamma fit beta parameter. :param agg_data: Aggregated Data per day. :return: Returns the anomaly flag with the corresponding anomaly probability. :rtype: tuple(bool, float) """<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>copy<import_stmt>scipy.stats<as>st<import_from_stmt>sklearn.decomposition PCA<import_from_stmt>sklearn.preprocessing StandardScaler<import_from_stmt>sklearn.covariance EmpiricalCovariance MinCovDet<import_stmt>collections<import_stmt>operator<line_sep>is_anomaly=<false><line_sep>execution_data=input_df[value_column]<line_sep>adjusted_execution_data=[]<line_sep>prob_of_anomaly=[]<line_sep>len_req_agg_data_model=42# Setting a hard threshold to have predictions from aggregated data # for stationarity adjustment <if_stmt>detrend_method<eq>'diff'# Obtain the execution data and perform the necessary differencing <block_start>execution_data=list(execution_data)<line_sep>adjusted_execution_data=np.diff(execution_data detrend_order).tolist()<if>detrend_order<g>0<else>execution_data<block_end><elif_stmt>detrend_method<eq>'modeling'<block_start>idx=input_df.index.normalize()<line_sep>dates_freq_dist=dict(collections.Counter(idx))<line_sep>scoring_datetime=str(max(dates_freq_dist.items() key=operator.itemgetter(1))[0])<line_sep>execution_data_avg=np.mean(execution_data)<line_sep># If detrending is needed, we scale the scoring data accordingly using the agg_dat_model forecast <if_stmt>detrend_order<g>0<block_start>snapshot_len_max=min(len(agg_data) len_req_agg_data_model)<line_sep>agg_data_trunc=np.array(agg_data)[: 1][-snapshot_len_max:]<line_sep>data_adjust_forecast=[]<try_stmt># Setting the data adjustment window of the original data using the predictions and the CILower and # CIUpper keeping the prediction uncertainty of the agg_model in mind <block_start><if_stmt>agg_data_model<and>len(agg_data)<g>len_req_agg_data_model<block_start>score=agg_data_model.score(execution_data_avg scoring_datetime)<line_sep>data_adjust_forecast.append(score['Prediction'])<line_sep>data_adjust_forecast.append(score['CILower'])<line_sep>data_adjust_forecast.append(score['CIUpper'])<block_end><else_stmt><block_start>data_adjust_forecast.append(np.median(agg_data_trunc))<line_sep>data_adjust_forecast.append(np.percentile(agg_data_trunc 5))# setting a 2-sigma limit data_adjust_forecast.append(np.percentile(agg_data_trunc 95))# setting a 2-sigma limit <block_end><block_end><except_stmt># If the scoring for the agg_data_model fails for some reason, we use the latest agg_data for the # detrending adjustment <block_start>data_adjust_forecast.append(np.median(agg_data_trunc))<line_sep>data_adjust_forecast.append(np.percentile(agg_data_trunc 5))# setting a 2-sigma limit data_adjust_forecast.append(np.percentile(agg_data_trunc 95))# setting a 2-sigma limit <block_end><for_stmt>i range(3)<block_start><if_stmt>data_adjust_forecast[i]<ne>0<block_start>adjusted_execution_data.append((execution_data/data_adjust_forecast[i]).tolist())<block_end><block_end><block_end><else_stmt><block_start>adjusted_execution_data=list(execution_data)<block_end><block_end># Kl divergence based anomaly detection <if_stmt>detection_method<eq>"kldiv"<block_start><if_stmt>detrend_order<g>0<block_start>prob_of_anomaly=[]<for_stmt>i range(3)<block_start>current_anomaly_score=self._distance_function(data=adjusted_execution_data[i] called_for="scoring" baseline=baseline)<line_sep>prob_of_anomaly.append(st.gamma.cdf(current_anomaly_score anomaly_scores_gamma_alpha anomaly_scores_gamma_loc anomaly_scores_gamma_beta))<block_end>prob_of_anomaly=np.min(prob_of_anomaly)<block_end><else_stmt><block_start>current_anomaly_score=self._distance_function(data=adjusted_execution_data called_for="scoring" baseline=baseline)<line_sep>prob_of_anomaly=st.gamma.cdf(current_anomaly_score anomaly_scores_gamma_alpha anomaly_scores_gamma_loc anomaly_scores_gamma_beta)<block_end><if_stmt>1-prob_of_anomaly<l>self.sig_level<block_start>is_anomaly=<true><block_end><block_end># Sign test based anomaly detection <elif_stmt>detection_method<eq>"sign_test"# If last window is the baseline, we perform the Wilcoxon sign rank test for means and levene # test for variance to detect anomalies <block_start><if_stmt>baseline_type<eq>"last_window"<block_start>test_stat_wilcoxon,pvalue_wilcoxon=st.wilcoxon(execution_data baseline)<line_sep>test_stat_levene,pvalue_levene=st.levene(execution_data baseline)<if_stmt>pvalue_wilcoxon<l>self.sig_level<or>pvalue_levene<l>self.sig_level<block_start>is_anomaly=<true><block_end>prob_of_anomaly=1-min(pvalue_wilcoxon pvalue_levene)<block_end># If aggregated is the baseline, we perform the Wilcoxon sign rank test for means and gamma distribution # based test for the past standard deviations to detect anomalies <elif_stmt>baseline_type<eq>"aggregated"<block_start>baseline_sds=np.array(baseline).std(1).tolist()<if_stmt>detrend_order<eq>0# crearing a 2d list to make it easy to loop through in the following for loop <block_start>adjusted_execution_data=[adjusted_execution_data]<block_end><for_stmt>current_adjusted_data adjusted_execution_data<block_start>baseline_execution_data=copy.copy(baseline)<line_sep>baseline_execution_data.append(current_adjusted_data)<line_sep>pca=PCA()<line_sep>scores=pca.fit_transform(StandardScaler().fit_transform(baseline_execution_data))<line_sep>robust_cov=MinCovDet().fit(scores[: :3])<line_sep>mahalanobis_distance=robust_cov.mahalanobis(scores[: :3])# getting the top 3 dimensions pvalue_mahalanobis=1-st.chi2.cdf(mahalanobis_distance[-1] np.array(baseline_execution_data).shape[1])<line_sep>gamma_alpha,gamma_loc,gamma_beta=st.gamma.fit(baseline_sds)<line_sep>pvalue_gamma=1-st.gamma.cdf(np.std(current_adjusted_data) gamma_alpha gamma_loc gamma_beta)<if_stmt>pvalue_mahalanobis<l>self.sig_level<or>pvalue_gamma<l>self.sig_level<block_start>is_anomaly=<true><block_end>prob_of_anomaly.append(1-min(pvalue_mahalanobis pvalue_gamma))<block_end>prob_of_anomaly=np.min(prob_of_anomaly)<block_end><block_end><return>is_anomaly prob_of_anomaly<block_end><def_stmt>score self data **kwargs<block_start>""" Function scores input series for anomalies :param pandas.DataFrame data: Input time series to score :return: Output dictionary with scoring summary. :rtype: dict >>> data raw interpolated index 2018-10-11 00:00:00 204800 204800 2018-10-11 01:00:00 222218 222218 2018-10-11 02:00:00 218903 218903 2018-10-11 03:00:00 190639 190639 2018-10-11 04:00:00 148214 148214 2018-10-11 05:00:00 106358 106358 2018-10-11 06:00:00 70081 70081 2018-10-11 07:00:00 47748 47748 2018-10-11 08:00:00 36837 36837 2018-10-11 09:00:00 33023 33023 2018-10-11 10:00:00 44432 44432 2018-10-11 11:00:00 72773 72773 2018-10-11 12:00:00 115180 115180 2018-10-11 13:00:00 157568 157568 2018-10-11 14:00:00 180174 180174 2018-10-11 15:00:00 190048 190048 2018-10-11 16:00:00 188391 188391 2018-10-11 17:00:00 189233 189233 2018-10-11 18:00:00 191703 191703 2018-10-11 19:00:00 189848 189848 2018-10-11 20:00:00 192685 192685 2018-10-11 21:00:00 196743 196743 2018-10-11 22:00:00 193016 193016 2018-10-11 23:00:00 196441 196441 >>> model <luminaire.model.window_density.WindowDensityModel object at 0x7fcaab72fdd8> >>> model.score(data) {'Success': True, 'ConfLevel': 99.9, 'IsAnomaly': False, 'AnomalyProbability': 0.6963188902776808} """<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<line_sep>is_log_transformed=self._params['is_log_transformed']<line_sep>detrend_method=self._params['detrend_method']<line_sep>target_metric='raw'<line_sep>imputed_metric='interpolated'<line_sep>detection_method=self._params['detection_method']<line_sep># We want to make sure the time series does not contain any negatives in case of log transformation <if_stmt>is_log_transformed<block_start>neg_flag=<true><if><not>data[data[target_metric]<l>0].empty<else><false><line_sep>data[imputed_metric]=data[imputed_metric]<if>neg_flag<else>np.log(data[imputed_metric]+1)<block_end>model_timestamps=list(self._params['AnomalyScoresGammaAlpha'].keys())<line_sep>scoring_start=data.index[0]<line_sep>current_min_timedelta=pd.Timedelta('10D')<for_stmt>timestamp model_timestamps<block_start>current_datetime=pd.Timestamp(str(scoring_start.date())+' '+timestamp)<line_sep>temp_timedelta=scoring_start-current_datetime<line_sep>temp_timedelta=pd.Timedelta('1D')+temp_timedelta<if>temp_timedelta<l>pd.Timedelta(0)<else>temp_timedelta<if_stmt>temp_timedelta<l>current_min_timedelta<block_start>opt_timestamp=timestamp<line_sep>current_min_timedelta=temp_timedelta<block_end><block_end>anomaly_scores_gamma_alpha=self._params['AnomalyScoresGammaAlpha'][opt_timestamp]<line_sep>anomaly_scores_gamma_loc=self._params['AnomalyScoresGammaLoc'][opt_timestamp]<line_sep>anomaly_scores_gamma_beta=self._params['AnomalyScoresGammaBeta'][opt_timestamp]<line_sep>baseline=self._params['Baseline'][opt_timestamp]<line_sep>detrend_order=self._params['NonStationarityOrder'][opt_timestamp]<line_sep>agg_data_model=self._params['AggregatedDataModel'][opt_timestamp]<line_sep>agg_data=self._params['AggregatedData'][opt_timestamp]<line_sep>is_anomaly,prob_of_anomaly,attributes=self._call_scoring(df=data target_metric=target_metric anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha anomaly_scores_gamma_loc=anomaly_scores_gamma_loc anomaly_scores_gamma_beta=anomaly_scores_gamma_beta baseline=baseline detrend_order=detrend_order detrend_method=detrend_method agg_data_model=agg_data_model detection_method=detection_method agg_data=agg_data)<line_sep>result={'Success':<true> 'ConfLevel':float(1.0-self.sig_level)<times>100 'IsAnomaly':is_anomaly 'AnomalyProbability':float(prob_of_anomaly) }<line_sep><return>result data.reset_index().values.tolist()<block_end><def_stmt>_anomalous_region_detection self input_df=<none> window_length=<none> value_column=<none> called_for=<none> anomaly_scores_gamma_alpha=<none> anomaly_scores_gamma_loc=<none> anomaly_scores_gamma_beta=<none> detrend_order=<none> baseline=<none> detrend_method=<none> agg_data_model=<none> past_model=<none> detection_method=<none> agg_data=<none><block_start>""" This function detects anomaly given a training and a scoring window. :param pandas.DataFrame input_df: Input data containing the training and the scoring data. :param int window_length: The length of a training sub-window / scoring window. :param str value_column: A string identifying the value column from the input dataframe :param str called_for: A flag to specify whether this function is called for training or scoring. :param float anomaly_scores_gamma_alpha: Gamma fit alpha parameter. :param float anomaly_scores_gamma_loc: Gamma fit location parameter. :param float anomaly_scores_gamma_beta: Gamma fit beta parameter. :param int detrend_order: Number of differencing for the scoring data. Only required if called for scoring. :param list baseline: The baseline for the scoring. only required if called for scoring. :param str detrend_method: Selects between "modeling" or "diff" detrend method. :param luminaire.model.lad_structural.LADStructuralModel agg_data_model: Prediction model for aggregated data. :param luminaire.model.window_density.WindowDensityModel past_model: Past stored window density model. :param str detection_method: Selects between "kldiv" or "sign_test" distance method. :param agg_data: Aggregated Data per day. :return: Anomaly flag with the corresponding probability of anomaly. :rtype: tuple(bool, float) """<line_sep>baseline_type=self._params['baseline_type']<line_sep>input_df.fillna(0 inplace=<true>)<line_sep># The function can be called for either training or scoring <if_stmt>called_for<eq>"training"<block_start><return>self._get_model(input_df=input_df window_length=window_length value_column=value_column detrend_method=detrend_method baseline_type=baseline_type detection_method=detection_method past_model=past_model)<block_end><elif_stmt>called_for<eq>"scoring"<block_start><return>self._get_result(input_df=input_df detrend_order=detrend_order agg_data_model=agg_data_model value_column=value_column detrend_method=detrend_method baseline_type=baseline_type detection_method=detection_method baseline=baseline anomaly_scores_gamma_alpha=anomaly_scores_gamma_alpha anomaly_scores_gamma_loc=anomaly_scores_gamma_loc anomaly_scores_gamma_beta=anomaly_scores_gamma_beta agg_data=agg_data)<block_end><block_end><block_end>
<import_from_stmt>PyObjCTools.TestSupport *<import_stmt>objc<import_from_stmt>Foundation *<class_stmt>TestNSSetInteraction(TestCase)<block_start><def_stmt>__testRepeatedAllocInit self<block_start><for_stmt>i range(1 1000)<block_start>a=NSSet.alloc().init()<block_end><block_end><def_stmt>__testContains self<block_start>x=NSSet.setWithArray_(["foo" "bar" "baz"])<line_sep>self.assert_("foo"<in>x)<line_sep>self.assert_("notfoo"<not><in>x)<block_end><def_stmt>__testIteration self<block_start>x=NSSet.setWithArray_(["foo" "bar" "baz"])<for_stmt>i x<block_start>self.assert_(i<in>x)<line_sep>self.assert_(x.containsObject_(i))<block_end><block_end><def_stmt>test_varargsConstruction self<block_start>w=NSSet.setWithObjects_(0 1 2 3 <none>)<line_sep>x=NSSet.alloc().initWithObjects_(0 1 2 3 <none>)<line_sep>y=NSSet.setWithObjects_count_(range(10) 4)<line_sep>z=NSSet.alloc().initWithObjects_count_(range(10) 4)<line_sep>#a = NSSet.alloc().initWithObjects_count_(range(4), None) self.assert_(len(w)<eq>4)<line_sep>self.assert_(len(x)<eq>4)<line_sep>self.assert_(len(y)<eq>4)<line_sep>self.assert_(len(z)<eq>4)<line_sep>#self.assert_(len(a) == 4) self.assert_(0<in>w)<line_sep>self.assert_(1<in>x)<line_sep>self.assert_(2<in>y)<line_sep>self.assert_(3<in>z)<line_sep>#self.assert_(3 in a) <block_end><def_stmt>test_varargsConstruction2 self<block_start>w=NSMutableSet.setWithObjects_(0 1 2 3 <none>)<line_sep>x=NSMutableSet.alloc().initWithObjects_(0 1 2 3 <none>)<line_sep>y=NSMutableSet.setWithObjects_count_(range(10) 4)<line_sep>z=NSMutableSet.alloc().initWithObjects_count_(range(10) 4)<line_sep>self.assert_(len(w)<eq>4)<line_sep>self.assert_(len(x)<eq>4)<line_sep>self.assert_(len(y)<eq>4)<line_sep>self.assert_(len(z)<eq>4)<line_sep>self.assert_(0<in>w)<line_sep>self.assert_(1<in>x)<line_sep>self.assert_(2<in>y)<line_sep>self.assert_(3<in>z)<block_end><block_end><class_stmt>TestVariadic(TestCase)<block_start><def_stmt>testSetWithObjects self<block_start>o=NSSet.setWithObjects_()<line_sep>self.assertEqual(len(o) 0)<line_sep>self.assert_(isinstance(o NSSet))<line_sep>o=NSSet.setWithObjects_(1 2 3)<line_sep>self.assertEqual(len(o) 3)<line_sep>self.assert_(isinstance(o NSSet))<line_sep>self.assert_(1<in>o)<line_sep>self.assert_(2<in>o)<line_sep>self.assert_(3<in>o)<line_sep>o=NSMutableSet.setWithObjects_()<line_sep>self.assertEqual(len(o) 0)<line_sep>self.assert_(isinstance(o NSMutableSet))<line_sep>o=NSMutableSet.setWithObjects_(1 2 3)<line_sep>self.assertEqual(len(o) 3)<line_sep>self.assert_(isinstance(o NSMutableSet))<line_sep>self.assert_(1<in>o)<line_sep>self.assert_(2<in>o)<line_sep>self.assert_(3<in>o)<block_end><def_stmt>testInitWithObjects self<block_start>o=NSSet.alloc().initWithObjects_()<line_sep>self.assertEqual(len(o) 0)<line_sep>self.assert_(isinstance(o NSSet))<line_sep>o=NSSet.alloc().initWithObjects_(1 2 3)<line_sep>self.assertEqual(len(o) 3)<line_sep>self.assert_(isinstance(o NSSet))<line_sep>self.assert_(1<in>o)<line_sep>self.assert_(2<in>o)<line_sep>self.assert_(3<in>o)<line_sep>o=NSMutableSet.alloc().initWithObjects_()<line_sep>self.assertEqual(len(o) 0)<line_sep>self.assert_(isinstance(o NSMutableSet))<line_sep>o=NSMutableSet.alloc().initWithObjects_(1 2 3)<line_sep>self.assertEqual(len(o) 3)<line_sep>self.assert_(isinstance(o NSMutableSet))<line_sep>self.assert_(1<in>o)<line_sep>self.assert_(2<in>o)<line_sep>self.assert_(3<in>o)<block_end><def_stmt>testSetWithObjectsCount self<block_start>o=NSSet.setWithObjects_count_([1 2 3] 3)<line_sep>self.assertEqual(len(o) 3)<line_sep>self.assert_(isinstance(o NSSet))<line_sep>self.assert_(1<in>o)<line_sep>self.assert_(2<in>o)<line_sep>self.assert_(3<in>o)<line_sep>self.assert_(4<not><in>o)<line_sep>o=NSSet.setWithObjects_count_([1 2 3] 0)<line_sep>self.assertEqual(len(o) 0)<line_sep>self.assert_(isinstance(o NSSet))<line_sep>o=NSMutableSet.setWithObjects_count_([1 2 3] 3)<line_sep>self.assertEqual(len(o) 3)<line_sep>self.assert_(isinstance(o NSMutableSet))<line_sep>self.assert_(1<in>o)<line_sep>self.assert_(2<in>o)<line_sep>self.assert_(3<in>o)<line_sep>o=NSMutableSet.setWithObjects_count_([1 2 3] 0)<line_sep>self.assertEqual(len(o) 0)<line_sep>self.assert_(isinstance(o NSMutableSet))<block_end><def_stmt>testInitWithObjectsCount self<block_start>o=NSSet.alloc().initWithObjects_count_([1 2 3] 3)<line_sep>self.assertEqual(len(o) 3)<line_sep>self.assert_(isinstance(o NSSet))<line_sep>self.assert_(1<in>o)<line_sep>self.assert_(2<in>o)<line_sep>self.assert_(3<in>o)<line_sep>self.assert_(4<not><in>o)<line_sep>o=NSSet.alloc().initWithObjects_count_([1 2 3] 0)<line_sep>self.assertEqual(len(o) 0)<line_sep>self.assert_(isinstance(o NSSet))<line_sep>o=NSMutableSet.alloc().initWithObjects_count_([1 2 3] 3)<line_sep>self.assertEqual(len(o) 3)<line_sep>self.assert_(isinstance(o NSMutableSet))<line_sep>self.assert_(1<in>o)<line_sep>self.assert_(2<in>o)<line_sep>self.assert_(3<in>o)<line_sep>o=NSMutableSet.alloc().initWithObjects_count_([1 2 3] 0)<line_sep>self.assertEqual(len(o) 0)<line_sep>self.assert_(isinstance(o NSMutableSet))<block_end><def_stmt>testMethods self<block_start>self.assertResultIsBOOL(NSSet.containsObject_)<line_sep>self.assertResultIsBOOL(NSSet.intersectsSet_)<line_sep>self.assertResultIsBOOL(NSSet.isEqualToSet_)<line_sep>self.assertResultIsBOOL(NSSet.isSubsetOfSet_)<line_sep>self.assertArgIsIn(NSSet.setWithObjects_count_ 0)<line_sep>self.assertArgSizeInArg(NSSet.setWithObjects_count_ 0 1)<line_sep>self.assertArgIsIn(NSSet.initWithObjects_count_ 0)<line_sep>self.assertArgSizeInArg(NSSet.initWithObjects_count_ 0 1)<line_sep>self.assertArgIsBOOL(NSSet.initWithSet_copyItems_ 1)<block_end>@min_os_level('10.6')<def_stmt>testMethods10_6 self<block_start>self.assertArgIsBlock(NSSet.enumerateObjectsUsingBlock_ 0 b'v@o^'+objc._C_NSBOOL)<line_sep>self.assertArgIsBlock(NSSet.enumerateObjectsWithOptions_usingBlock_ 1 b'v@o^'+objc._C_NSBOOL)<line_sep>self.assertArgIsBlock(NSSet.objectsPassingTest_ 0 objc._C_NSBOOL+b'@o^'+objc._C_NSBOOL)<line_sep>self.assertArgIsBlock(NSSet.objectsWithOptions_passingTest_ 1 objc._C_NSBOOL+b'@o^'+objc._C_NSBOOL)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>doublemetaphone<def_stmt>match value1 value2<block_start>value1metaphone=doublemetaphone.doublemetaphone(value1)<line_sep>value2metaphone=doublemetaphone.doublemetaphone(value2)<line_sep>possibilities=[value1metaphone[0]<eq>value2metaphone[0] value1metaphone[0]<eq>value2metaphone[1] value1metaphone[1]<eq>value2metaphone[0] value1metaphone[1]<eq>value2metaphone[1]<ne>'']<line_sep><return>1.0<if><true><in>possibilities<else>0.0<block_end>
''' Helper function that imports a set of unlabeled images into the database. Works recursively (i.e., with images in nested folders) and different file formats and extensions (.jpg, .JPEG, .png, etc.). Skips images that have already been added to the database. Using this script requires the following steps: 1. Make sure your images are of common format and readable by the web server (i.e., convert camera RAW images first). 2. Copy your image folder into the FileServer's root file directory (i.e., corresponding to the path under "staticfiles_dir" in the configuration *.ini file). 3. Call the script from the AIDE code base on the FileServer instance. 2019-21 <NAME> '''<import_stmt>os<import_stmt>argparse<import_from_stmt>psycopg2 sql<import_from_stmt>util.helpers VALID_IMAGE_EXTENSIONS listDirectory<if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser(description='Import images into database.')<line_sep>parser.add_argument('--project' type=str help='Shortname of the project to insert the images into.')<line_sep>parser.add_argument('--settings_filepath' type=str default='config/settings.ini' const=1 nargs='?' help='Manual specification of the directory of the settings.ini file; only considered if environment variable unset (default: "config/settings.ini").')<line_sep>args=parser.parse_args()<line_sep># setup print('Setup...')<if_stmt><not>'AIDE_CONFIG_PATH'<in>os.environ<block_start>os.environ['AIDE_CONFIG_PATH']=str(args.settings_filepath)<block_end><import_from_stmt>tqdm tqdm<import_stmt>datetime<import_from_stmt>util.configDef Config<import_from_stmt>modules Database<line_sep>currentDT=datetime.datetime.now()<line_sep>currentDT='{}-{}-{} {}:{}:{}'.format(currentDT.year currentDT.month currentDT.day currentDT.hour currentDT.minute currentDT.second)<line_sep>config=Config()<line_sep>dbConn=Database(config)<if_stmt><not>dbConn.canConnect()<block_start><raise>Exception('Error connecting to database.')<block_end>project=args.project<line_sep># check if running on file server imgBaseDir=config.getProperty('FileServer' 'staticfiles_dir')<if_stmt><not>os.path.isdir(imgBaseDir)<block_start><raise>Exception(f'"{imgBaseDir}" is not a valid directory on this machine. Are you running the script from the file server?')<block_end><if_stmt><not>imgBaseDir.endswith(os.sep)<block_start>imgBaseDir<augadd>os.sep<block_end># locate all images and their base names print('Locating image paths...')<line_sep>imgs=set()<line_sep>imgFiles=listDirectory(imgBaseDir recursive=<true>)#glob.glob(os.path.join(imgBaseDir, '**'), recursive=True) #TODO: check if correct imgFiles=list(imgFiles)<for_stmt>i tqdm(imgFiles)<block_start><if_stmt>os.path.isdir(i)<block_start><continue><block_end>_,ext=os.path.splitext(i)<if_stmt>ext.lower()<not><in>VALID_IMAGE_EXTENSIONS<block_start><continue><block_end>baseName=i.replace(imgBaseDir '')<line_sep>imgs.add(baseName)<block_end># ignore images that are already in database print('Filter images already in database...')<line_sep>imgs_existing=dbConn.execute(sql.SQL(''' SELECT filename FROM {}; ''').format(sql.Identifier(project 'image')) <none> 'all')<if_stmt>imgs_existing<is><not><none><block_start>imgs_existing=set([i['filename']<for>i imgs_existing])<block_end><else_stmt><block_start>imgs_existing=set()<block_end>imgs=list(imgs.difference(imgs_existing))<line_sep>imgs=[(i )<for>i imgs]<line_sep># push image to database print('Adding to database...')<line_sep>dbConn.insert(sql.SQL(''' INSERT INTO {} (filename) VALUES %s; ''').format(sql.Identifier(project 'image')) imgs)<line_sep>print('Done.')<block_end>
<import_stmt>atexit<import_from_stmt>mock MagicMock PropertyMock<import_from_stmt>nose.tools raises assert_equals<import_stmt>sparkmagic.utils.configuration<as>conf<import_from_stmt>sparkmagic.livyclientlib.exceptions SessionManagementException<import_from_stmt>sparkmagic.livyclientlib.sessionmanager SessionManager<line_sep>@raises(SessionManagementException)<def_stmt>test_get_client_throws_when_client_not_exists <block_start>manager=get_session_manager()<line_sep>manager.get_session("name")<block_end><def_stmt>test_get_client <block_start>client=MagicMock()<line_sep>manager=get_session_manager()<line_sep>manager.add_session("name" client)<line_sep>assert_equals(client manager.get_session("name"))<block_end>@raises(SessionManagementException)<def_stmt>test_delete_client <block_start>client=MagicMock()<line_sep>manager=get_session_manager()<line_sep>manager.add_session("name" client)<line_sep>manager.delete_client("name")<line_sep>manager.get_session("name")<block_end>@raises(SessionManagementException)<def_stmt>test_delete_client_throws_when_client_not_exists <block_start>manager=get_session_manager()<line_sep>manager.delete_client("name")<block_end>@raises(SessionManagementException)<def_stmt>test_add_client_throws_when_client_exists <block_start>client=MagicMock()<line_sep>manager=get_session_manager()<line_sep>manager.add_session("name" client)<line_sep>manager.add_session("name" client)<block_end><def_stmt>test_client_names_returned <block_start>client=MagicMock()<line_sep>manager=get_session_manager()<line_sep>manager.add_session("name0" client)<line_sep>manager.add_session("name1" client)<line_sep>assert_equals({"name0" "name1"} set(manager.get_sessions_list()))<block_end><def_stmt>test_get_any_client <block_start>client=MagicMock()<line_sep>manager=get_session_manager()<line_sep>manager.add_session("name" client)<line_sep>assert_equals(client manager.get_any_session())<block_end>@raises(SessionManagementException)<def_stmt>test_get_any_client_raises_exception_with_no_client <block_start>manager=get_session_manager()<line_sep>manager.get_any_session()<block_end>@raises(SessionManagementException)<def_stmt>test_get_any_client_raises_exception_with_two_clients <block_start>client=MagicMock()<line_sep>manager=get_session_manager()<line_sep>manager.add_session("name0" client)<line_sep>manager.add_session("name1" client)<line_sep>manager.get_any_session()<block_end><def_stmt>test_clean_up <block_start>client0=MagicMock()<line_sep>client1=MagicMock()<line_sep>manager=get_session_manager()<line_sep>manager.add_session("name0" client0)<line_sep>manager.add_session("name1" client1)<line_sep>manager.clean_up_all()<line_sep>client0.delete.assert_called_once_with()<line_sep>client1.delete.assert_called_once_with()<block_end><def_stmt>test_cleanup_all_sessions_on_exit <block_start>conf.override(conf.cleanup_all_sessions_on_exit.__name__ <true>)<line_sep>client0=MagicMock()<line_sep>client1=MagicMock()<line_sep>manager=get_session_manager()<line_sep>manager.add_session("name0" client0)<line_sep>manager.add_session("name1" client1)<line_sep>atexit._run_exitfuncs()<line_sep>client0.delete.assert_called_once_with()<line_sep>client1.delete.assert_called_once_with()<line_sep>manager.ipython_display.writeln.assert_called_once_with(u"Cleaning up livy sessions on exit is enabled")<block_end><def_stmt>test_cleanup_all_sessions_on_exit_fails <block_start>""" Cleanup on exit is best effort only. When cleanup fails, exception is caught and error is logged. """<line_sep>conf.override(conf.cleanup_all_sessions_on_exit.__name__ <true>)<line_sep>client0=MagicMock()<line_sep>client1=MagicMock()<line_sep>client0.delete.side_effect=Exception('Mocked exception for client1.delete')<line_sep>manager=get_session_manager()<line_sep>manager.add_session("name0" client0)<line_sep>manager.add_session("name1" client1)<line_sep>atexit._run_exitfuncs()<line_sep>client0.delete.assert_called_once_with()<line_sep>client1.delete.assert_not_called()<block_end><def_stmt>test_get_session_id_for_client <block_start>manager=get_session_manager()<line_sep>manager.get_sessions_list=MagicMock(return_value=["name"])<line_sep>manager._sessions["name"]=MagicMock()<line_sep>id=manager.get_session_id_for_client("name")<assert_stmt>id<is><not><none><block_end><def_stmt>test_get_session_name_by_id_endpoint <block_start>manager=get_session_manager()<line_sep>id_to_search="0"<line_sep>endpoint_to_search="endpoint"<line_sep>name_to_search="name"<line_sep>name=manager.get_session_name_by_id_endpoint(id_to_search endpoint_to_search)<line_sep>assert_equals(<none> name)<line_sep>session=MagicMock()<line_sep>type(session).id=PropertyMock(return_value=int(id_to_search))<line_sep>session.endpoint=endpoint_to_search<line_sep>manager.add_session(name_to_search session)<line_sep>name=manager.get_session_name_by_id_endpoint(id_to_search endpoint_to_search)<line_sep>assert_equals(name_to_search name)<block_end><def_stmt>test_get_session_id_for_client_not_there <block_start>manager=get_session_manager()<line_sep>manager.get_sessions_list=MagicMock(return_value=[])<line_sep>id=manager.get_session_id_for_client("name")<assert_stmt>id<is><none><block_end><def_stmt>get_session_manager <block_start>ipython_display=MagicMock()<line_sep><return>SessionManager(ipython_display)<block_end>
<import_from_stmt>persimmon.view.pins.circularbutton CircularButton# MYPY HACK <import_from_stmt>persimmon.view.util Type AbstractWidget Connection<import_from_stmt>kivy.properties ObjectProperty<import_from_stmt>kivy.lang Builder<import_from_stmt>kivy.graphics Color Ellipse Line<import_from_stmt>kivy.input MotionEvent<import_from_stmt>abc abstractmethod<line_sep>Builder.load_file('persimmon/view/pins/pin.kv')<class_stmt>Pin(CircularButton metaclass=AbstractWidget)<block_start>val=ObjectProperty(<none> force_dispatch=<true>)<line_sep>block=ObjectProperty()<line_sep>type_=ObjectProperty(Type.ANY)<line_sep>@abstractmethod<def_stmt>on_touch_down self touch:MotionEvent<arrow>bool<block_start><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>on_touch_up self touch:MotionEvent<arrow>bool<block_start><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>on_connection_delete self connection:Connection<block_start><raise>NotImplementedError<block_end>@abstractmethod<def_stmt>connect_pin self connection:Connection<block_start><raise>NotImplementedError<block_end><def_stmt>typesafe self other:'Pin'<arrow>bool<block_start>""" Tells if a relation between two pins is typesafe. """<if_stmt>self.block<eq>other.block<or>self.__class__<eq>other.__class__<block_start><return><false><block_end><elif_stmt>self.type_<eq>Type.ANY<or>other.type_<eq>Type.ANY<block_start><return><true># Anything is possible with ANY <block_end><else_stmt><block_start><return>self.type_<eq>other.type_<block_end><block_end># Hack <def_stmt>on_type_ self instance:'Pin' value:Type<block_start>""" If the kv lang was a bit smarted this would not be needed """<line_sep>self.color=value.value<block_end><block_end>
<import_from_stmt>widgets *<line_sep>
""" .. module:: Multi :platform: Unix, Windows :synopsis: Provides container classes for spline geoemtries .. moduleauthor:: <NAME> <<EMAIL>> """<import_stmt>abc<import_stmt>warnings<import_from_stmt>functools partial<import_from_stmt>multiprocessing Value Lock<import_from_stmt>. abstract<import_from_stmt>. vis<import_from_stmt>. voxelize<import_from_stmt>. utilities<import_from_stmt>. tessellate<import_from_stmt>. _utilities<as>utl<import_from_stmt>.exceptions GeomdlException<line_sep>@utl.add_metaclass(abc.ABCMeta)<class_stmt>AbstractContainer(abstract.GeomdlBase)<block_start>""" Abstract class for geometry containers. This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in a for loop. This class provides the following properties: * :py:attr:`type` = container * :py:attr:`id` * :py:attr:`name` * :py:attr:`dimension` * :py:attr:`opt` * :py:attr:`pdimension` * :py:attr:`evalpts` * :py:attr:`bbox` * :py:attr:`vis` * :py:attr:`delta` * :py:attr:`sample_size` """<def_stmt>__init__ self *args **kwargs<block_start>self._pdim=0<if><not>hasattr(self '_pdim')<else>self._pdim# number of parametric dimensions self._dinit=0.01<if><not>hasattr(self '_dinit')<else>self._dinit# delta initialization value super(AbstractContainer self).__init__(**kwargs)<line_sep>self._geometry_type="container"<line_sep>self._name=self._geometry_type<line_sep>self._delta=[float(self._dinit)<for>_ range(self._pdim)]# evaluation delta self._elements=[]# list of elements contained self._vis_component=<none># visualization component self._cache['evalpts']=[]<block_end><def_stmt>__iter__ self<block_start>self._iter_index=0<line_sep><return>self<block_end><def_stmt>next self<block_start><return>self.__next__()<block_end><def_stmt>__next__ self<block_start><try_stmt><block_start>result=self._elements[self._iter_index]<block_end><except_stmt>IndexError<block_start><raise>StopIteration<block_end>self._iter_index<augadd>1<line_sep><return>result<block_end><def_stmt>__reversed__ self<block_start><return>reversed(self._elements)<block_end><def_stmt>__getitem__ self index<block_start><return>self._elements[index]<block_end><def_stmt>__len__ self<block_start><return>len(self._elements)<block_end><def_stmt>__add__ self other<block_start><if_stmt><not>isinstance(other self.__class__)<block_start><raise>GeomdlException("Cannot add non-matching container types")<block_end>self.add(other)<line_sep><return>self<block_end>@property<def_stmt>pdimension self<block_start>""" Parametric dimension. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the parametric dimension :type: int """<line_sep><return>self._pdim<block_end>@property<def_stmt>evalpts self<block_start>""" Evaluated points. Since there are multiple geometry objects contained in the multi objects, the evaluated points will be returned in the format of list of individual evaluated points which is also a list of Cartesian coordinates. The following code example illustrates these details: .. code-block:: python :linenos: multi_obj = multi.SurfaceContainer() # it can also be multi.CurveContainer() # Add geometries to multi_obj via multi_obj.add() method # Then, the following loop will print all the evaluated points of the Multi object for idx, mpt in enumerate(multi_obj.evalpts): print("Shape", idx+1, "contains", len(mpt), "points. These points are:") for pt in mpt: line = ", ".join([str(p) for p in pt]) print(line) Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the evaluated points of all contained geometries """<if_stmt><not>self._cache['evalpts']<block_start><for_stmt>elem self._elements<block_start>elem.delta=self._delta[0]<if>self._pdim<eq>1<else>self._delta<line_sep>evalpts=elem.evalpts<line_sep>self._cache['evalpts']<augadd>evalpts<block_end><block_end><return>self._cache['evalpts']<block_end>@property<def_stmt>bbox self<block_start>""" Bounding box. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the bounding box of all contained geometries """<line_sep>all_box=[]<for_stmt>elem self._elements<block_start>all_box<augadd>list(elem.bbox)<block_end><return>utilities.evaluate_bounding_box(all_box)<block_end>@property<def_stmt>vis self<block_start>""" Visualization component. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the visualization component :setter: Sets the visualization component """<line_sep><return>self._vis_component<block_end>@vis.setter<def_stmt>vis self value<block_start><if_stmt><not>isinstance(value vis.VisAbstract)<block_start>warnings.warn("Visualization component is NOT an instance of the vis.VisAbstract class")<line_sep><return><block_end>self._vis_component=value<block_end>@property<def_stmt>delta self<block_start>""" Evaluation delta (for all parametric directions). Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points. Therefore; smaller the delta value, smoother the shape. The following figure illustrates the working principles of the delta property: .. math:: \\left[{{u_{start}},{u_{start}} + \\delta ,({u_{start}} + \\delta ) + \\delta , \\ldots ,{u_{end}}} \\right] Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the delta value :setter: Sets the delta value """<line_sep><return>self._delta[0]<if>self._pdim<eq>1<else>self._delta<block_end>@delta.setter<def_stmt>delta self value<block_start><if_stmt>self._pdim<eq>1<and>isinstance(value (int float))<block_start>delta_vals=[value]<block_end><else_stmt><block_start><if_stmt>isinstance(value (list tuple))<block_start><if_stmt>len(value)<ne>self._pdim<block_start><raise>ValueError("The input must be a list of a tuple with a length of "+str(self._pdim))<block_end>delta_vals=value<block_end><elif_stmt>isinstance(value (int float))<block_start>delta_vals=[value<for>_ range(self._pdim)]<block_end><else_stmt><block_start><raise>TypeError("Unsupported input type for evaluation delta. Use float, list or tuple")<block_end><block_end># Set delta values <for_stmt>idx,dval enumerate(delta_vals)<block_start>self._delta_setter_common(idx dval)<block_end># Reset the cache self.reset()<block_end><def_stmt>_delta_setter_common self idx value# Check and set the delta value corresponding to the idx-th parametric dimension <block_start><if_stmt>float(value)<le>0<or>float(value)<ge>1<block_start><raise>ValueError("Evaluation delta should be between 0.0 and 1.0. You are trying to set it to "+str(value)+" for the "+str(idx+1)+"st parametric dimension.")<block_end>self._delta[idx]=float(value)<block_end>@property<def_stmt>sample_size self<block_start>""" Sample size (for all parametric directions). Sample size defines the number of points to evaluate. It also sets the ``delta`` property. The following figure illustrates the working principles of sample size property: .. math:: \\underbrace {\\left[ {{u_{start}}, \\ldots ,{u_{end}}} \\right]}_{{n_{sample}}} Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets sample size :setter: Sets sample size """<line_sep>ssz=[self._sample_size_getter_common(idx)<for>idx range(self._pdim)]<line_sep><return>ssz[0]<if>self._pdim<eq>1<else>ssz<block_end>@sample_size.setter<def_stmt>sample_size self value<block_start><if_stmt>self._pdim<eq>1<and>isinstance(value (int float))<block_start>ssz=[value]<block_end><else_stmt><block_start><if_stmt>isinstance(value (list tuple))<block_start><if_stmt>len(value)<ne>self._pdim<block_start><raise>ValueError("The input must be a list of a tuple with a length of "+str(self._pdim))<block_end>ssz=value<block_end><elif_stmt>isinstance(value (int float))<block_start>ssz=[value<for>_ range(self._pdim)]<block_end><else_stmt><block_start><raise>TypeError("Unsupported input type for sample size. Use float, list or tuple")<block_end><block_end># Set sample size <for_stmt>idx,sval enumerate(ssz)<block_start>self._sample_size_setter_common(idx sval)<block_end># Reset the cache self.reset()<block_end><def_stmt>_sample_size_getter_common self idx<block_start><return>int(1/self._delta[idx])+1<block_end><def_stmt>_sample_size_setter_common self idx value# Check and set the delta value corresponding to the idx-th parametric dimension <block_start><if_stmt><not>isinstance(value int)<block_start><raise>GeomdlException("Sample size must be an integer value bigger than 2")<block_end><if_stmt>value<l>2<block_start><raise>GeomdlException("Sample size must be an integer value bigger than 2")<block_end>self._delta[idx]=1.0/float(value-1)<block_end>@property<def_stmt>data self<block_start>""" Returns a dict which contains the geometry data. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. """<line_sep><return>tuple([e.data<for>e self._elements])<block_end><def_stmt>add self element<block_start>""" Adds geometry objects to the container. The input can be a single geometry, a list of geometry objects or a geometry container object. :param element: geometry object """<if_stmt>isinstance(element (self.__class__ list tuple))<block_start><for_stmt>elem element<block_start>self.add(elem)<block_end><block_end><elif_stmt>hasattr(self '_pdim')<block_start><if_stmt>element.pdimension<eq>self.pdimension<block_start><if_stmt>self.dimension<eq>0<block_start>self._dimension=element.dimension<block_end><else_stmt><block_start><if_stmt>self.dimension<ne>element.dimension<block_start><raise>GeomdlException("The spatial dimensions of the container and the input must be the same")<block_end><block_end>self._elements.append(element)<block_end><block_end><else_stmt><block_start><raise>GeomdlException("Cannot add the element to the container")<block_end># Reset the cache self.reset()<block_end># Make container look like a list append=add<def_stmt>reset self<block_start>""" Resets the cache. """<line_sep>self._cache['evalpts'][:]=[]<block_end># Runs visualization component to render the surface @abc.abstractmethod<def_stmt>render self **kwargs<block_start>""" Renders plots using the visualization component. .. note:: This is an abstract method and it must be implemented in the subclass. """<line_sep><pass><block_end><block_end>@utl.export<class_stmt>CurveContainer(AbstractContainer)<block_start>""" Container class for storing multiple curves. This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in a for loop. This class provides the following properties: * :py:attr:`type` = container * :py:attr:`id` * :py:attr:`name` * :py:attr:`dimension` * :py:attr:`opt` * :py:attr:`pdimension` * :py:attr:`evalpts` * :py:attr:`bbox` * :py:attr:`vis` * :py:attr:`delta` * :py:attr:`sample_size` The following code example illustrates the usage of the Python properties: .. code-block:: python # Create a multi-curve container instance mcrv = multi.CurveContainer() # Add single or multi curves to the multi container using mcrv.add() command # Addition operator, e.g. mcrv1 + mcrv2, also works # Set the evaluation delta of the multi-curve mcrv.delta = 0.05 # Get the evaluated points curve_points = mcrv.evalpts """<def_stmt>__init__ self *args **kwargs<block_start>self._pdim=1# number of parametric dimensions self._dinit=0.01# evaluation delta super(CurveContainer self).__init__(*args **kwargs)<for_stmt>arg args<block_start>self.add(arg)<block_end><block_end><def_stmt>render self **kwargs<block_start>""" Renders the curves. The visualization component must be set using :py:attr:`~vis` property before calling this method. Keyword Arguments: * ``cpcolor``: sets the color of the control points grid * ``evalcolor``: sets the color of the surface * ``filename``: saves the plot with the input name * ``plot``: controls plot window visibility. *Default: True* * ``animate``: activates animation (if supported). *Default: False* * ``delta``: if True, the evaluation delta of the container object will be used. *Default: True* * ``reset_names``: resets the name of the curves inside the container. *Default: False* The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows customization over the color values. If none provided, a random color will be selected. The ``plot`` argument is useful when you would like to work on the command line without any window context. If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the configuration class. """<if_stmt><not>self._vis_component<block_start>warnings.warn("No visualization component has set")<line_sep><return><block_end># Get the color values from keyword arguments cpcolor=kwargs.get('cpcolor')<line_sep>evalcolor=kwargs.get('evalcolor')<line_sep>filename=kwargs.get('filename' <none>)<line_sep>plot_visible=kwargs.get('plot' <true>)<line_sep>animate_plot=kwargs.get('animate' <false>)<line_sep># Flag to control evaluation delta updates update_delta=kwargs.get('delta' <true>)<line_sep>reset_names=kwargs.get('reset_names' <false>)<line_sep># Check if the input list sizes are equal <if_stmt>isinstance(cpcolor (list tuple))<block_start><if_stmt>len(cpcolor)<l>len(self._elements)<block_start><raise>ValueError("The number of color values in 'cpcolor' ("+str(len(cpcolor))+") cannot be less than the number of geometries contained ("+str(len(self._elements))+")")<block_end><block_end><if_stmt>isinstance(evalcolor (list tuple))<block_start><if_stmt>len(evalcolor)<l>len(self._elements)<block_start><raise>ValueError("The number of color values in 'evalcolor' ("+str(len(evalcolor))+") cannot be less than the number of geometries contained ("+str(len(self._elements))+")")<block_end><block_end># Run the visualization component self._vis_component.clear()<for_stmt>idx,elem enumerate(self._elements)<block_start><if_stmt>update_delta<block_start>elem.delta=self.delta<block_end>elem.evaluate()<line_sep># Reset element name <if_stmt>reset_names<block_start>elem.name="curve"<block_end># Fix element name <if_stmt>elem.name<eq>"curve"<block_start>elem.name=elem.name+" "+str(idx)<block_end># Color selection color=select_color(cpcolor evalcolor idx=idx)<line_sep>self._vis_component.add(ptsarr=elem.ctrlpts name=(elem.name "(CP)") color=color[0] plot_type='ctrlpts' idx=idx)<line_sep>self._vis_component.add(ptsarr=elem.evalpts name=elem.name color=color[1] plot_type='evalpts' idx=idx)<block_end># Display the figures <if_stmt>animate_plot<block_start>self._vis_component.animate(fig_save_as=filename display_plot=plot_visible)<block_end><else_stmt><block_start>self._vis_component.render(fig_save_as=filename display_plot=plot_visible)<block_end><block_end><block_end>@utl.export<class_stmt>SurfaceContainer(AbstractContainer)<block_start>""" Container class for storing multiple surfaces. This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in a for loop. This class provides the following properties: * :py:attr:`type` = container * :py:attr:`id` * :py:attr:`name` * :py:attr:`dimension` * :py:attr:`opt` * :py:attr:`pdimension` * :py:attr:`evalpts` * :py:attr:`bbox` * :py:attr:`vis` * :py:attr:`delta` * :py:attr:`delta_u` * :py:attr:`delta_v` * :py:attr:`sample_size` * :py:attr:`sample_size_u` * :py:attr:`sample_size_v` * :py:attr:`tessellator` * :py:attr:`vertices` * :py:attr:`faces` The following code example illustrates the usage of these Python properties: .. code-block:: python # Create a multi-surface container instance msurf = multi.SurfaceContainer() # Add single or multi surfaces to the multi container using msurf.add() command # Addition operator, e.g. msurf1 + msurf2, also works # Set the evaluation delta of the multi-surface msurf.delta = 0.05 # Get the evaluated points surface_points = msurf.evalpts """<def_stmt>__init__ self *args **kwargs<block_start>self._pdim=2# number of parametric dimensions self._dinit=0.05# evaluation delta super(SurfaceContainer self).__init__(*args **kwargs)<line_sep>self._cache['vertices']=[]<line_sep>self._cache['faces']=[]<for_stmt>arg args<block_start>self.add(arg)<block_end><block_end>@property<def_stmt>delta_u self<block_start>""" Evaluation delta for the u-direction. Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points. Therefore; smaller the delta, smoother the shape. Please note that ``delta_u`` and ``sample_size_u`` properties correspond to the same variable with different descriptions. Therefore, setting ``delta_u`` will also set ``sample_size_u``. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the delta value for the u-direction :setter: Sets the delta value for the u-direction :type: float """<line_sep><return>self._delta[0]<block_end>@delta_u.setter<def_stmt>delta_u self value<block_start>self._delta_setter_common(0 value)<block_end>@property<def_stmt>delta_v self<block_start>""" Evaluation delta for the v-direction. Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points. Therefore; smaller the delta, smoother the shape. Please note that ``delta_v`` and ``sample_size_v`` properties correspond to the same variable with different descriptions. Therefore, setting ``delta_v`` will also set ``sample_size_v``. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the delta value for the v-direction :setter: Sets the delta value for the v-direction :type: float """<line_sep><return>self._delta[1]<block_end>@delta_v.setter<def_stmt>delta_v self value<block_start>self._delta_setter_common(1 value)<block_end>@property<def_stmt>sample_size_u self<block_start>""" Sample size for the u-direction. Sample size defines the number of points to evaluate. It also sets the ``delta_u`` property. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets sample size for the u-direction :setter: Sets sample size for the u-direction :type: int """<line_sep><return>self._sample_size_getter_common(0)<block_end>@sample_size_u.setter<def_stmt>sample_size_u self value<block_start>self._sample_size_setter_common(0 value)<block_end>@property<def_stmt>sample_size_v self<block_start>""" Sample size for the v-direction. Sample size defines the number of points to evaluate. It also sets the ``delta_v`` property. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets sample size for the v-direction :setter: Sets sample size for the v-direction :type: int """<line_sep><return>self._sample_size_getter_common(1)<block_end>@sample_size_v.setter<def_stmt>sample_size_v self value<block_start>self._sample_size_setter_common(1 value)<block_end>@property<def_stmt>tessellator self<block_start>""" Tessellation component of the surfaces inside the container. Please refer to :doc:`Tessellation <module_tessellate>` documentation for details. .. code-block:: python :linenos: from geomdl import multi from geomdl import tessellate # Create the surface container surf_container = multi.SurfaceContainer(surf_list) # Set tessellator component surf_container.tessellator = tessellate.TrimTessellate() :getter: gets the tessellation component :setter: sets the tessellation component """<line_sep>tsl_comps=[]<for_stmt>idx range(len(self._elements))<block_start>tsl_comps.append(self._elements[idx].tessellator)<block_end><return>tsl_comps<block_end>@tessellator.setter<def_stmt>tessellator self value# Set tessellation component <block_start><for_stmt>idx range(len(self._elements))<block_start>self._elements[idx].tessellator=value.__class__()<block_end><block_end>@property<def_stmt>vertices self<block_start>""" Vertices generated by the tessellation operation. If the tessellation component is set to None, the result will be an empty list. :getter: Gets the vertices """<if_stmt><not>self._cache['vertices']<block_start>self.tessellate()<block_end><return>self._cache['vertices']<block_end>@property<def_stmt>faces self<block_start>""" Faces (triangles, quads, etc.) generated by the tessellation operation. If the tessellation component is set to None, the result will be an empty list. :getter: Gets the faces """<if_stmt><not>self._cache['faces']<block_start>self.tessellate()<block_end><return>self._cache['faces']<block_end><def_stmt>tessellate self **kwargs<block_start>""" Tessellates the surfaces inside the container. Keyword arguments are directly passed to the tessellation component. The following code snippet illustrates getting the vertices and faces of the surfaces inside the container: .. code-block:: python :linenos: # Tessellate the surfaces inside the container surf_container.tessellate() # Vertices and faces are stored inside the tessellator component tsl = surf_container.tessellator # Loop through all tessellator components for t in tsl: # Get the vertices vertices = t.tessellator.vertices # Get the faces (triangles, quads, etc.) faces = t.tessellator.faces Keyword Arguments: * ``num_procs``: number of concurrent processes for tessellating the surfaces. *Default: 1* * ``delta``: if True, the evaluation delta of the container object will be used. *Default: True* * ``force``: flag to force tessellation. *Default: False* """<line_sep># Keyword arguments force_tsl=kwargs.get('force' <false>)<line_sep>update_delta=kwargs.pop('delta' <true>)<line_sep># Don't re-tessellate if everything is in place <if_stmt>all((self._cache['vertices'] self._cache['faces']))<and><not>force_tsl<block_start><return><block_end># Tessellate the surfaces in the container num_procs=kwargs.pop('num_procs' 1)<line_sep>new_elems=[]<if_stmt>num_procs<g>1<block_start><with_stmt>utl.pool_context(processes=num_procs)<as>pool<block_start>tmp_elem=pool.map(partial(process_tessellate delta=self.delta update_delta=update_delta **kwargs) self._elements)<line_sep>new_elems<augadd>tmp_elem<block_end><block_end><else_stmt><block_start><for_stmt>idx range(len(self._elements))<block_start>tmp_elem=process_tessellate(self._elements[idx] delta=self.delta update_delta=update_delta **kwargs)<line_sep>new_elems.append(tmp_elem)<block_end><block_end>self._elements=new_elems<line_sep># Update caches verts=[]<line_sep>faces=[]<line_sep>v_offset=0<line_sep>f_offset=0<for_stmt>elem self._elements<block_start>v=elem.vertices<for_stmt>i range(len(v))<block_start>v[i].id<augadd>v_offset<block_end>verts<augadd>v<line_sep>f=elem.faces<for_stmt>i range(len(f))<block_start>f[i].id<augadd>f_offset<line_sep># for j in range(len(f[i]._data)): # f[i]._data[j].id += v_offset <block_end>faces<augadd>f<line_sep>v_offset<augadd>len(v)<line_sep>f_offset<augadd>len(f)<block_end>self._cache['vertices']=verts<line_sep>self._cache['faces']=faces<block_end><def_stmt>reset self<block_start>""" Resets the cache. """<line_sep>super(SurfaceContainer self).reset()<line_sep>self._cache['vertices'][:]=[]<line_sep>self._cache['faces'][:]=[]<block_end><def_stmt>render self **kwargs<block_start>""" Renders the surfaces. The visualization component must be set using :py:attr:`~vis` property before calling this method. Keyword Arguments: * ``cpcolor``: sets the color of the control points grids * ``evalcolor``: sets the color of the surface * ``filename``: saves the plot with the input name * ``plot``: controls plot window visibility. *Default: True* * ``animate``: activates animation (if supported). *Default: False* * ``colormap``: sets the colormap of the surfaces * ``delta``: if True, the evaluation delta of the container object will be used. *Default: True* * ``reset_names``: resets the name of the surfaces inside the container. *Default: False* * ``num_procs``: number of concurrent processes for rendering the surfaces. *Default: 1* The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows customization over the color values. If none provided, a random color will be selected. The ``plot`` argument is useful when you would like to work on the command line without any window context. If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the configuration class. Please note that ``colormap`` argument can only work with visualization classes that support colormaps. As an example, please see :py:class:`.VisMPL.VisSurfTriangle()` class documentation. This method expects multiple colormap inputs as a list or tuple, preferable the input list size is the same as the number of surfaces contained in the class. In the case of number of surfaces is bigger than number of input colormaps, this method will automatically assign a random color for the remaining surfaces. """<line_sep># Validation <if_stmt><not>self._vis_component<block_start>warnings.warn("No visualization component has been set")<line_sep><return><block_end># Get the color values from keyword arguments cpcolor=kwargs.get('cpcolor')<line_sep>evalcolor=kwargs.get('evalcolor')<line_sep>trimcolor=kwargs.get('trimcolor' 'black')<line_sep>filename=kwargs.get('filename' <none>)<line_sep>plot_visible=kwargs.get('plot' <true>)<line_sep>animate_plot=kwargs.get('animate' <false>)<line_sep># Flag to control evaluation delta updates update_delta=kwargs.get('delta' <true>)<line_sep>reset_names=kwargs.get('reset_names' <false>)<line_sep># Number of parallel processes num_procs=kwargs.get('num_procs' 1)<line_sep>force_tsl=bool(kwargs.pop('force' <false>))# flag to force re-tessellation # Check if the input list sizes are equal <if_stmt>isinstance(cpcolor (list tuple))<block_start><if_stmt>len(cpcolor)<ne>len(self._elements)<block_start><raise>ValueError("The number of colors in 'cpcolor' ("+str(len(cpcolor))+") cannot be less than the number of geometries contained("+str(len(self._elements))+")")<block_end><block_end><if_stmt>isinstance(evalcolor (list tuple))<block_start><if_stmt>len(evalcolor)<ne>len(self._elements)<block_start><raise>ValueError("The number of colors in 'evalcolor' ("+str(len(evalcolor))+") cannot be less than the number of geometries contained ("+str(len(self._elements))+")")<block_end><block_end># Get colormaps as a list surf_cmaps=kwargs.get('colormap' [])<if_stmt><not>isinstance(surf_cmaps (list tuple))<block_start>warnings.warn("Expecting a list of colormap values, not "+str(type(surf_cmaps)))<line_sep>surf_cmaps=[]<block_end># Run the visualization component self._vis_component.clear()<line_sep>vis_list=[]<if_stmt>num_procs<g>1<block_start>mp_lock=Lock()<line_sep>mp_val=Value('i' 0)<with_stmt>utl.pool_context(initializer=mp_init initargs=(mp_lock mp_val) processes=num_procs)<as>pool<block_start>tmp=pool.map(partial(process_elements_surface mconf=self._vis_component.mconf colorval=(cpcolor evalcolor trimcolor) idx=-1 force_tsl=force_tsl update_delta=update_delta delta=self.delta reset_names=reset_names) self._elements)<line_sep>vis_list<augadd>tmp<block_end><block_end><else_stmt><block_start><for_stmt>idx,elem enumerate(self._elements)<block_start>tmp=process_elements_surface(elem self._vis_component.mconf (cpcolor evalcolor trimcolor) idx force_tsl update_delta self.delta reset_names)<line_sep>vis_list<augadd>tmp<block_end><block_end><for_stmt>vl vis_list<block_start><if_stmt>isinstance(vl dict)<block_start>self._vis_component.add(**vl)<block_end><else_stmt><block_start><for_stmt>v vl<block_start>self._vis_component.add(**v)<block_end><block_end><block_end># Display the figures <if_stmt>animate_plot<block_start>self._vis_component.animate(fig_save_as=filename display_plot=plot_visible colormap=surf_cmaps)<block_end><else_stmt><block_start>self._vis_component.render(fig_save_as=filename display_plot=plot_visible colormap=surf_cmaps)<block_end><block_end><block_end>@utl.export<class_stmt>VolumeContainer(AbstractContainer)<block_start>""" Container class for storing multiple volumes. This class implements Python Iterator Protocol and therefore any instance of this class can be directly used in a for loop. This class provides the following properties: * :py:attr:`type` * :py:attr:`id` * :py:attr:`name` * :py:attr:`dimension` * :py:attr:`opt` * :py:attr:`pdimension` * :py:attr:`evalpts` * :py:attr:`bbox` * :py:attr:`vis` * :py:attr:`delta` * :py:attr:`delta_u` * :py:attr:`delta_v` * :py:attr:`delta_w` * :py:attr:`sample_size` * :py:attr:`sample_size_u` * :py:attr:`sample_size_v` * :py:attr:`sample_size_w` The following code example illustrates the usage of these Python properties: .. code-block:: python # Create a multi-volume container instance mvol = multi.VolumeContainer() # Add single or multi volumes to the multi container using mvol.add() command # Addition operator, e.g. mvol1 + mvol2, also works # Set the evaluation delta of the multi-volume mvol.delta = 0.05 # Get the evaluated points volume_points = mvol.evalpts """<def_stmt>__init__ self *args **kwargs<block_start>self._pdim=3# number of parametric dimensions self._dinit=0.1# evaluation delta super(VolumeContainer self).__init__()<for_stmt>arg args<block_start>self.add(arg)<block_end><block_end>@property<def_stmt>delta_u self<block_start>""" Evaluation delta for the u-direction. Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points. Therefore; smaller the delta, smoother the shape. Please note that ``delta_u`` and ``sample_size_u`` properties correspond to the same variable with different descriptions. Therefore, setting ``delta_u`` will also set ``sample_size_u``. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the delta value for the u-direction :setter: Sets the delta value for the u-direction :type: float """<line_sep><return>self._delta[0]<block_end>@delta_u.setter<def_stmt>delta_u self value<block_start>self._delta_setter_common(0 value)<block_end>@property<def_stmt>delta_v self<block_start>""" Evaluation delta for the v-direction. Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points. Therefore; smaller the delta, smoother the shape. Please note that ``delta_v`` and ``sample_size_v`` properties correspond to the same variable with different descriptions. Therefore, setting ``delta_v`` will also set ``sample_size_v``. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the delta value for the v-direction :setter: Sets the delta value for the v-direction :type: float """<line_sep><return>self._delta[1]<block_end>@delta_v.setter<def_stmt>delta_v self value<block_start>self._delta_setter_common(1 value)<block_end>@property<def_stmt>delta_w self<block_start>""" Evaluation delta for the w-direction. Evaluation delta corresponds to the *step size*. Decreasing the step size results in evaluation of more points. Therefore; smaller the delta, smoother the shape. Please note that ``delta_w`` and ``sample_size_w`` properties correspond to the same variable with different descriptions. Therefore, setting ``delta_w`` will also set ``sample_size_w``. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets the delta value for the w-direction :setter: Sets the delta value for the w-direction :type: float """<line_sep><return>self._delta[2]<block_end>@delta_w.setter<def_stmt>delta_w self value<block_start>self._delta_setter_common(2 value)<block_end>@property<def_stmt>sample_size_u self<block_start>""" Sample size for the u-direction. Sample size defines the number of points to evaluate. It also sets the ``delta_u`` property. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets sample size for the u-direction :setter: Sets sample size for the u-direction :type: int """<line_sep><return>self._sample_size_getter_common(0)<block_end>@sample_size_u.setter<def_stmt>sample_size_u self value<block_start>self._sample_size_setter_common(0 value)<block_end>@property<def_stmt>sample_size_v self<block_start>""" Sample size for the v-direction. Sample size defines the number of points to evaluate. It also sets the ``delta_v`` property. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets sample size for the v-direction :setter: Sets sample size for the v-direction :type: int """<line_sep><return>self._sample_size_getter_common(1)<block_end>@sample_size_v.setter<def_stmt>sample_size_v self value<block_start>self._sample_size_setter_common(1 value)<block_end>@property<def_stmt>sample_size_w self<block_start>""" Sample size for the w-direction. Sample size defines the number of points to evaluate. It also sets the ``delta_w`` property. Please refer to the `wiki <https://github.com/orbingol/NURBS-Python/wiki/Using-Python-Properties>`_ for details on using this class member. :getter: Gets sample size for the w-direction :setter: Sets sample size for the w-direction :type: int """<line_sep><return>self._sample_size_getter_common(2)<block_end>@sample_size_w.setter<def_stmt>sample_size_w self value<block_start>self._sample_size_setter_common(2 value)<block_end><def_stmt>render self **kwargs<block_start>""" Renders the volumes. The visualization component must be set using :py:attr:`~vis` property before calling this method. Keyword Arguments: * ``cpcolor``: sets the color of the control points plot * ``evalcolor``: sets the color of the volume * ``filename``: saves the plot with the input name * ``plot``: controls plot window visibility. *Default: True* * ``animate``: activates animation (if supported). *Default: False* * ``delta``: if True, the evaluation delta of the container object will be used. *Default: True* * ``reset_names``: resets the name of the volumes inside the container. *Default: False* * ``grid_size``: grid size for voxelization. *Default: (16, 16, 16)* * ``num_procs``: number of concurrent processes for voxelization. *Default: 1* The ``cpcolor`` and ``evalcolor`` arguments can be a string or a list of strings corresponding to the color values. Both arguments are processed separately, e.g. ``cpcolor`` can be a string whereas ``evalcolor`` can be a list or a tuple, or vice versa. A single string value sets the color to the same value. List input allows customization over the color values. If none provided, a random color will be selected. The ``plot`` argument is useful when you would like to work on the command line without any window context. If ``plot`` flag is False, this method saves the plot as an image file (.png file where possible) and disables plot window popping out. If you don't provide a file name, the name of the image file will be pulled from the configuration class. """<if_stmt><not>self._vis_component<block_start>warnings.warn("No visualization component has been set")<line_sep><return><block_end>cpcolor=kwargs.pop('cpcolor' <none>)<line_sep>evalcolor=kwargs.pop('evalcolor' <none>)<line_sep>filename=kwargs.pop('filename' <none>)<line_sep>plot_visible=kwargs.pop('plot' <true>)<line_sep>animate_plot=kwargs.pop('animate' <false>)<line_sep># Flag to control evaluation delta updates update_delta=kwargs.pop('delta' <true>)<line_sep>reset_names=kwargs.get('reset_names' <false>)<line_sep># Check if the input list sizes are equal <if_stmt>isinstance(cpcolor (list tuple))<block_start><if_stmt>len(cpcolor)<ne>len(self._elements)<block_start><raise>ValueError("The number of colors in 'cpcolor' ("+str(len(cpcolor))+") cannot be less than the number of geometries contained("+str(len(self._elements))+")")<block_end><block_end><if_stmt>isinstance(evalcolor (list tuple))<block_start><if_stmt>len(evalcolor)<ne>len(self._elements)<block_start><raise>ValueError("The number of colors in 'evalcolor' ("+str(len(evalcolor))+") cannot be less than the number of geometries contained ("+str(len(self._elements))+")")<block_end><block_end># Run the visualization component self._vis_component.clear()<for_stmt>idx,elem enumerate(self._elements)<block_start><if_stmt>update_delta<block_start>elem.delta=self.delta<block_end>elem.evaluate()<line_sep># Reset element name <if_stmt>reset_names<block_start>elem.name="volume"<block_end># Fix element name <if_stmt>elem.name<eq>"volume"<block_start>elem.name=elem.name+" "+str(idx)<block_end># Color selection color=select_color(cpcolor evalcolor idx=idx)<line_sep># Add control points <if_stmt>self._vis_component.mconf['ctrlpts']<eq>'points'<block_start>self._vis_component.add(ptsarr=elem.ctrlpts name=(elem.name "(CP)") color=color[0] plot_type='ctrlpts' idx=idx)<block_end># Add evaluated points <if_stmt>self._vis_component.mconf['evalpts']<eq>'points'<block_start>self._vis_component.add(ptsarr=elem.evalpts name=elem.name color=color[1] plot_type='evalpts' idx=idx)<block_end># Add evaluated points as voxels <if_stmt>self._vis_component.mconf['evalpts']<eq>'voxels'<block_start>grid,filled=voxelize.voxelize(elem **kwargs)<line_sep>polygrid=voxelize.convert_bb_to_faces(grid)<line_sep>self._vis_component.add(ptsarr=[polygrid filled] name=elem.name color=color[1] plot_type='evalpts' idx=idx)<block_end><block_end># Display the figures <if_stmt>animate_plot<block_start>self._vis_component.animate(fig_save_as=filename display_plot=plot_visible)<block_end><else_stmt><block_start>self._vis_component.render(fig_save_as=filename display_plot=plot_visible)<block_end><block_end><block_end><def_stmt>select_color cpcolor evalcolor idx=0<block_start>""" Selects item color for plotting. :param cpcolor: color for control points grid item :type cpcolor: str, list, tuple :param evalcolor: color for evaluated points grid item :type evalcolor: str, list, tuple :param idx: index of the current geometry object :type idx: int :return: a list of color values :rtype: list """<line_sep># Random colors by default color=utilities.color_generator()<line_sep># Constant color for control points grid <if_stmt>isinstance(cpcolor str)<block_start>color[0]=cpcolor<block_end># User-defined color for control points grid <if_stmt>isinstance(cpcolor (list tuple))<block_start>color[0]=cpcolor[idx]<block_end># Constant color for evaluated points grid <if_stmt>isinstance(evalcolor str)<block_start>color[1]=evalcolor<block_end># User-defined color for evaluated points grid <if_stmt>isinstance(evalcolor (list tuple))<block_start>color[1]=evalcolor[idx]<block_end><return>color<block_end><def_stmt>process_tessellate elem update_delta delta **kwargs<block_start>""" Tessellates surfaces. .. note:: Helper function required for ``multiprocessing`` :param elem: surface :type elem: abstract.Surface :param update_delta: flag to control evaluation delta updates :type update_delta: bool :param delta: evaluation delta :type delta: list, tuple :return: updated surface :rtype: abstract.Surface """<if_stmt>update_delta<block_start>elem.delta=delta<line_sep>elem.evaluate()<block_end>elem.tessellate(**kwargs)<line_sep><return>elem<block_end><def_stmt>process_elements_surface elem mconf colorval idx force_tsl update_delta delta reset_names<block_start>""" Processes visualization elements for surfaces. .. note:: Helper function required for ``multiprocessing`` :param elem: surface :type elem: abstract.Surface :param mconf: visualization module configuration :type mconf: dict :param colorval: color values :type colorval: tuple :param idx: index of the surface :type idx: int :param force_tsl: flag to force re-tessellation :type force_tsl: bool :param update_delta: flag to update surface delta :type update_delta: bool :param delta: new surface evaluation delta :type delta: list, tuple :param reset_names: flag to reset names :type reset_names: bool :return: visualization element (as a dict) :rtype: list """<if_stmt>idx<l>0<block_start>lock.acquire()<line_sep>idx=counter.value<line_sep>counter.value<augadd>1<line_sep>lock.release()<block_end><if_stmt>update_delta<block_start>elem.delta=delta<block_end>elem.evaluate()<line_sep># Reset element name <if_stmt>reset_names<block_start>elem.name="surface"<block_end># Fix element name <if_stmt>elem.name<eq>"surface"<and>idx<ge>0<block_start>elem.name=elem.name+" "+str(idx)<block_end># Color selection color=select_color(colorval[0] colorval[1] idx=idx)<line_sep># Initialize the return list rl=[]<line_sep># Add control points <if_stmt>mconf['ctrlpts']<eq>'points'<block_start>ret=dict(ptsarr=elem.ctrlpts name=(elem.name "(CP)") color=color[0] plot_type='ctrlpts' idx=idx)<line_sep>rl.append(ret)<block_end># Add control points as quads <if_stmt>mconf['ctrlpts']<eq>'quads'<block_start>qtsl=tessellate.QuadTessellate()<line_sep>qtsl.tessellate(elem.ctrlpts size_u=elem.ctrlpts_size_u size_v=elem.ctrlpts_size_v)<line_sep>ret=dict(ptsarr=[qtsl.vertices qtsl.faces] name=(elem.name "(CP)") color=color[0] plot_type='ctrlpts' idx=idx)<line_sep>rl.append(ret)<block_end># Add surface points <if_stmt>mconf['evalpts']<eq>'points'<block_start>ret=dict(ptsarr=elem.evalpts name=(elem.name idx) color=color[1] plot_type='evalpts' idx=idx)<line_sep>rl.append(ret)<block_end># Add surface points as quads <if_stmt>mconf['evalpts']<eq>'quads'<block_start>qtsl=tessellate.QuadTessellate()<line_sep>qtsl.tessellate(elem.evalpts size_u=elem.sample_size_u size_v=elem.sample_size_v)<line_sep>ret=dict(ptsarr=[qtsl.vertices qtsl.faces] name=elem.name color=color[1] plot_type='evalpts' idx=idx)<line_sep>rl.append(ret)<block_end># Add surface points as vertices and triangles <if_stmt>mconf['evalpts']<eq>'triangles'<block_start>elem.tessellate(force=force_tsl)<line_sep>ret=dict(ptsarr=[elem.tessellator.vertices elem.tessellator.faces] name=elem.name color=color[1] plot_type='evalpts' idx=idx)<line_sep>rl.append(ret)<block_end># Add the trim curves <for_stmt>itc,trim enumerate(elem.trims)<block_start>ret=dict(ptsarr=elem.evaluate_list(trim.evalpts) name=("trim" itc) color=colorval[2] plot_type='trimcurve' idx=idx)<line_sep>rl.append(ret)<block_end># Return the list <return>rl<block_end><def_stmt>mp_init l c<block_start>""" Initialization function for multi-threaded operations. :param l: lock :param c: value for common counter """<line_sep><global>lock<line_sep><global>counter<line_sep>lock=l<line_sep>counter=c<block_end>
# model model=Model()<line_sep>i0=Input("op_shape" "TENSOR_INT32" "{4}")<line_sep>weights=Parameter("ker" "TENSOR_FLOAT32" "{1, 3, 3, 1}" [1.0 2.0 3.0 4.0 5.0 6.0 7.0 8.0 9.0])<line_sep>i1=Input("in" "TENSOR_FLOAT32" "{1, 4, 4, 1}")<line_sep>pad=Int32Scalar("pad_same" 1)<line_sep>s_x=Int32Scalar("stride_x" 1)<line_sep>s_y=Int32Scalar("stride_y" 1)<line_sep>i2=Output("op" "TENSOR_FLOAT32" "{1, 4, 4, 1}")<line_sep>model=model.Operation("TRANSPOSE_CONV_EX" i0 weights i1 pad s_x s_y).To(i2)<line_sep># Example 1. Input in operand 0, input0={i0:# output shape [1 4 4 1] i1:# input 0 [1.0 2.0 3.0 4.0 5.0 6.0 7.0 8.0 9.0 10.0 11.0 12.0 13.0 14.0 15.0 16.0]}<line_sep>output0={i2:# output 0 [29.0 62.0 83.0 75.0 99.0 192.0 237.0 198.0 207.0 372.0 417.0 330.0 263.0 446.0 485.0 365.0]}<line_sep># Instantiate an example Example((input0 output0))<line_sep>
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- <import_from_stmt>._account_operations_async AccountOperations<import_from_stmt>._consumer_invitation_operations_async ConsumerInvitationOperations<import_from_stmt>._data_set_operations_async DataSetOperations<import_from_stmt>._data_set_mapping_operations_async DataSetMappingOperations<import_from_stmt>._invitation_operations_async InvitationOperations<import_from_stmt>._operation_operations_async OperationOperations<import_from_stmt>._share_operations_async ShareOperations<import_from_stmt>._provider_share_subscription_operations_async ProviderShareSubscriptionOperations<import_from_stmt>._share_subscription_operations_async ShareSubscriptionOperations<import_from_stmt>._consumer_source_data_set_operations_async ConsumerSourceDataSetOperations<import_from_stmt>._synchronization_setting_operations_async SynchronizationSettingOperations<import_from_stmt>._trigger_operations_async TriggerOperations<line_sep>__all__=['AccountOperations' 'ConsumerInvitationOperations' 'DataSetOperations' 'DataSetMappingOperations' 'InvitationOperations' 'OperationOperations' 'ShareOperations' 'ProviderShareSubscriptionOperations' 'ShareSubscriptionOperations' 'ConsumerSourceDataSetOperations' 'SynchronizationSettingOperations' 'TriggerOperations' ]<line_sep>
""" Message editor with a wheel zoom functionality """<line_sep># pylint: disable=bad-continuation <import_from_stmt>PyQt4 QtCore QtGui<class_stmt>MessageCompose(QtGui.QTextEdit)<block_start>"""Editor class with wheel zoom functionality"""<def_stmt>__init__ self parent=0<block_start>super(MessageCompose self).__init__(parent)<line_sep>self.setAcceptRichText(<false>)<line_sep>self.defaultFontPointSize=self.currentFont().pointSize()<block_end><def_stmt>wheelEvent self event<block_start>"""Mouse wheel scroll event handler"""<if_stmt>(QtGui.QApplication.queryKeyboardModifiers()&QtCore.Qt.ControlModifier)<eq>QtCore.Qt.ControlModifier<and>event.orientation()<eq>QtCore.Qt.Vertical<block_start><if_stmt>event.delta()<g>0<block_start>self.zoomIn(1)<block_end><else_stmt><block_start>self.zoomOut(1)<block_end>zoom=self.currentFont().pointSize()<times>100/self.defaultFontPointSize<line_sep>QtGui.QApplication.activeWindow().statusBar().showMessage(QtGui.QApplication.translate("MainWindow" "Zoom level %1%").arg(str(zoom)))<block_end><else_stmt># in QTextEdit, super does not zoom, only scroll <block_start>super(MessageCompose self).wheelEvent(event)<block_end><block_end><def_stmt>reset self<block_start>"""Clear the edit content"""<line_sep>self.setText('')<block_end><block_end>
# Generated by Django 2.1.7 on 2019-04-17 09:25 <import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[migrations.swappable_dependency(settings.AUTH_USER_MODEL) ('engine' '0017_db_redesign_20190221') ]<line_sep>operations=[migrations.CreateModel(name='JobCommit' fields=[('id' models.BigAutoField(primary_key=<true> serialize=<false>)) ('version' models.PositiveIntegerField(default=0)) ('timestamp' models.DateTimeField(auto_now=<true>)) ('message' models.CharField(default='' max_length=4096)) ('author' models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL to=settings.AUTH_USER_MODEL)) ('job' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name='commits' to='engine.Job')) ] options={'abstract':<false> 'default_permissions':() } ) ]<block_end>
# Copyright 2015 The Shaderc Authors. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>expect<import_stmt>os.path<import_from_stmt>glslc_test_framework inside_glslc_testsuite<import_from_stmt>placeholder FileShader StdinShader TempFileName<line_sep>@inside_glslc_testsuite('File')<class_stmt>SimpleFileCompiled(expect.ValidObjectFile)<block_start>"""Tests whether or not a simple glsl file compiles."""<line_sep>shader=FileShader('#version 310 es\nvoid main() {}' '.frag')<line_sep>glslc_args=['-c' shader]<block_end>@inside_glslc_testsuite('File')<class_stmt>NotSpecifyingOutputName(expect.SuccessfulReturn expect.CorrectObjectFilePreamble)<block_start>"""Tests that when there is no -o and -E/-S/-c specified, output as a.spv."""<line_sep>shader=FileShader('#version 140\nvoid main() {}' '.frag')<line_sep>glslc_args=[shader]<def_stmt>check_output_a_spv self status<block_start>output_name=os.path.join(status.directory 'a.spv')<line_sep><return>self.verify_object_file_preamble(output_name)<block_end><block_end>@inside_glslc_testsuite('Parameters')<class_stmt>HelpParameters(expect.ReturnCodeIsZero expect.StdoutMatch expect.StderrMatch)<block_start>"""Tests the --help flag outputs correctly and does not produce and error."""<line_sep>glslc_args=['--help']<line_sep>expected_stdout='''glslc - Compile shaders into SPIR-V Usage: glslc [options] file... An input file of - represents standard input. Options: -c Only run preprocess, compile, and assemble steps. -Dmacro[=defn] Add an implicit macro definition. -E Outputs only the results of the preprocessing step. Output defaults to standard output. -fshader-stage=<stage> Treat subsequent input files as having stage <stage>. Valid stages are vertex, fragment, tesscontrol, tesseval, geometry, and compute. -g Generate source-level debug information. Currently this option has no effect. --help Display available options. --version Display compiler version information. -I <value> Add directory to include search path. -o <file> Write output to <file>. A file name of '-' represents standard output. -std=<value> Version and profile for input files. Possible values are concatenations of version and profile, e.g. 310es, 450core, etc. -M Generate make dependencies. Implies -E and -w. -MM An alias for -M. -MD Generate make dependencies and compile. -MF <file> Write dependency output to the given file. -MT <target> Specify the target of the rule emitted by dependency generation. -S Only run preprocess and compilation steps. --target-env=<environment> Set the target shader environment, and the semantics of warnings and errors. Valid values are 'opengl', 'opengl_compat' and 'vulkan'. The default value is 'vulkan'. -w Suppresses all warning messages. -Werror Treat all warnings as errors. -x <language> Treat subsequent input files as having type <language>. The only supported language is glsl. '''<line_sep>expected_stderr=''<block_end>@inside_glslc_testsuite('Parameters')<class_stmt>HelpIsNotTooWide(expect.StdoutNoWiderThan80Columns)<block_start>"""Tests that --help output is not too wide."""<line_sep>glslc_args=['--help']<block_end>@inside_glslc_testsuite('Parameters')<class_stmt>UnknownSingleLetterArgument(expect.ErrorMessage)<block_start>"""Tests that an unknown argument triggers an error message."""<line_sep>glslc_args=['-a']<line_sep>expected_error=["glslc: error: unknown argument: '-a'\n"]<block_end>@inside_glslc_testsuite('Parameters')<class_stmt>UnknownMultiLetterArgument(expect.ErrorMessage)<block_start>"""Tests that an unknown argument triggers an error message."""<line_sep>glslc_args=['-zzz']<line_sep>expected_error=["glslc: error: unknown argument: '-zzz'\n"]<block_end>@inside_glslc_testsuite('Parameters')<class_stmt>UnsupportedOption(expect.ErrorMessage)<block_start>"""Tests that an unsupported option triggers an error message."""<line_sep>glslc_args=['--unsupported-option']<line_sep>expected_error=["glslc: error: unsupported option: '--unsupported-option'\n"]<block_end>@inside_glslc_testsuite('File')<class_stmt>FileNotFound(expect.ErrorMessage)<block_start>"""Tests the error message if a file cannot be found."""<line_sep>blabla_file=TempFileName('blabla.frag')<line_sep>glslc_args=[blabla_file]<line_sep>expected_error=["glslc: error: cannot open input file: '" blabla_file "': No such file or directory\n"]<block_end>@inside_glslc_testsuite('Unsupported')<class_stmt>LinkingNotSupported(expect.ErrorMessage)<block_start>"""Tests the error message generated by linking not supported yet."""<line_sep>shader1=FileShader('#version 140\nvoid main() {}' '.vert')<line_sep>shader2=FileShader('#version 140\nvoid main() {}' '.frag')<line_sep>glslc_args=[shader1 shader2]<line_sep>expected_error=['glslc: error: linking multiple files is not supported yet. ' 'Use -c to compile files individually.\n']<block_end>@inside_glslc_testsuite('Unsupported')<class_stmt>MultipleStdinUnsupported(expect.ErrorMessage)<block_start>"""Tests the error message generated by having more than one - input."""<line_sep>glslc_args=['-c' '-fshader-stage=vertex' '-' '-']<line_sep>expected_error=['glslc: error: specifying standard input "-" as input more'<concat>' than once is not allowed.\n']<block_end>@inside_glslc_testsuite('Parameters')<class_stmt>StdinWithoutShaderStage(expect.StdoutMatch expect.StderrMatch)<block_start>"""Tests that you must use -fshader-stage when specifying - as input."""<line_sep>shader=StdinShader("""#version 140 int a() { } void main() { int x = a(); } """)<line_sep>glslc_args=[shader]<line_sep>expected_stdout=''<line_sep>expected_stderr=["glslc: error: '-': -fshader-stage required when input is from "<concat>'standard input "-"\n']<block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. <import_from_stmt>uuid UUID<import_from_stmt>botframework.streaming.payload_transport PayloadSender<import_from_stmt>botframework.streaming.payloads.models Header<class_stmt>CancelDisassembler<block_start><def_stmt>__init__ self * sender:PayloadSender identifier:UUID type:str<block_start>self._sender=sender<line_sep>self._identifier=identifier<line_sep>self._type=type<block_end><async_keyword><def_stmt>disassemble self<block_start>header=Header(type=self._type id=self._identifier end=<true>)<line_sep>header.payload_length=0<line_sep>self._sender.send_payload(header <none> <true> <none>)<line_sep><return><block_end><block_end>
<import_stmt>ast<import_stmt>csv<import_stmt>os<import_stmt>sys<import_from_stmt>pickle dump<import_stmt>numpy<as>np<import_from_stmt>tfsnippet.utils makedirs<line_sep>output_folder='processed'<line_sep>makedirs(output_folder exist_ok=<true>)<def_stmt>load_and_save category filename dataset dataset_folder<block_start>temp=np.genfromtxt(os.path.join(dataset_folder category filename) dtype=np.float32 delimiter=',')<line_sep>print(dataset category filename temp.shape)<with_stmt>open(os.path.join(output_folder dataset+"_"+category+".pkl") "wb")<as>file<block_start>dump(temp file)<block_end><block_end><def_stmt>load_data dataset<block_start><if_stmt>dataset<eq>'SMD'<block_start>dataset_folder='ServerMachineDataset'<line_sep>file_list=os.listdir(os.path.join(dataset_folder "train"))<for_stmt>filename file_list<block_start><if_stmt>filename.endswith('.txt')<block_start>load_and_save('train' filename filename.strip('.txt') dataset_folder)<line_sep>load_and_save('test' filename filename.strip('.txt') dataset_folder)<line_sep>load_and_save('test_label' filename filename.strip('.txt') dataset_folder)<block_end><block_end><block_end><elif_stmt>dataset<eq>'SMAP'<or>dataset<eq>'MSL'<block_start>dataset_folder='data'<with_stmt>open(os.path.join(dataset_folder 'labeled_anomalies.csv') 'r')<as>file<block_start>csv_reader=csv.reader(file delimiter=',')<line_sep>res=[row<for>row csv_reader][1:]<block_end>res=sorted(res key=<lambda>k:k[0])<line_sep>label_folder=os.path.join(dataset_folder 'test_label')<line_sep>makedirs(label_folder exist_ok=<true>)<line_sep>data_info=[row<for>row res<if>row[1]<eq>dataset<and>row[0]<ne>'P-2']<line_sep>labels=[]<for_stmt>row data_info<block_start>anomalies=ast.literal_eval(row[2])<line_sep>length=int(row[-1])<line_sep>label=np.zeros([length] dtype=np.bool)<for_stmt>anomaly anomalies<block_start>label[anomaly[0]:anomaly[1]+1]=<true><block_end>labels.extend(label)<block_end>labels=np.asarray(labels)<line_sep>print(dataset 'test_label' labels.shape)<with_stmt>open(os.path.join(output_folder dataset+"_"+'test_label'+".pkl") "wb")<as>file<block_start>dump(labels file)<block_end><def_stmt>concatenate_and_save category<block_start>data=[]<for_stmt>row data_info<block_start>filename=row[0]<line_sep>temp=np.load(os.path.join(dataset_folder category filename+'.npy'))<line_sep>data.extend(temp)<block_end>data=np.asarray(data)<line_sep>print(dataset category data.shape)<with_stmt>open(os.path.join(output_folder dataset+"_"+category+".pkl") "wb")<as>file<block_start>dump(data file)<block_end><block_end><for_stmt>c ['train' 'test']<block_start>concatenate_and_save(c)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>datasets=['SMD' 'SMAP' 'MSL']<line_sep>commands=sys.argv[1:]<line_sep>load=[]<if_stmt>len(commands)<g>0<block_start><for_stmt>d commands<block_start><if_stmt>d<in>datasets<block_start>load_data(d)<block_end><block_end><block_end><else_stmt><block_start>print(""" Usage: python data_preprocess.py <datasets> where <datasets> should be one of ['SMD', 'SMAP', 'MSL'] """)<block_end><block_end>
<import_from_stmt>unittest.mock MagicMock Mock patch<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>chitra.visualization.metrics cm_accuracy detect_multilabel plot_confusion_matrix <def_stmt>test_detect_multilabel <block_start><with_stmt>pytest.raises(UserWarning)<block_start>detect_multilabel({"label1":"this will raise UserWarning"})<block_end><assert_stmt>detect_multilabel([1 2 3 4])<assert_stmt><not>detect_multilabel([0 1 1 0])<block_end><def_stmt>test_cm_accuracy <block_start>x=np.asarray([[1 2] [1 2]])<assert_stmt>cm_accuracy(x)<eq>0.5<block_end>@patch("chitra.visualization.metrics.plt")<def_stmt>test_plot_confusion_matrix mock_plt:Mock<block_start>mock_plt.show=MagicMock()<line_sep>y_pred=[1 1 0 1]<line_sep>y_true=[0 1 0 1]<assert_stmt>plot_confusion_matrix(y_pred y_true)<is><none><line_sep>mock_plt.show.assert_called_once()<block_end>
"""Defines the class that handles a node's cleanup"""<import_from_future_stmt> unicode_literals<import_stmt>logging<import_from_stmt>job.execution.tasks.cleanup_task CleanupTask<import_from_stmt>scheduler.manager scheduler_mgr<line_sep>JOB_EXES_WARNING_THRESHOLD=100<line_sep>MAX_JOB_EXES_PER_CLEANUP=25<line_sep>logger=logging.getLogger(__name__)<class_stmt>NodeCleanup(object)<block_start>"""This class manages all of the cleanup for a node."""<def_stmt>__init__ self<block_start>"""Constructor """<line_sep>self._job_exes={}<block_end># {Job Exe ID: RunningJobExecution} <def_stmt>add_job_execution self job_exe<block_start>"""Adds a job execution that needs to be cleaned up :param job_exe: The job execution to add :type job_exe: :class:`job.execution.job_exe.RunningJobExecution` """<line_sep>self._job_exes[job_exe.id]=job_exe<block_end><def_stmt>delete_job_executions self job_exes<block_start>"""Deletes the given job executions since they have been cleaned up :param job_exes: The job executions to delete :type job_exes: [:class:`job.execution.job_exe.RunningJobExecution`] """<for_stmt>job_exe job_exes<block_start><if_stmt>job_exe.id<in>self._job_exes<block_start><del_stmt>self._job_exes[job_exe.id]<block_end><block_end><block_end><def_stmt>create_next_task self agent_id hostname is_initial_cleanup_completed<block_start>"""Creates and returns the next cleanup task that needs to be run, possibly None :param agent_id: The node's agent ID :type agent_id: string :param hostname: The node's hostname :type hostname: string :param is_initial_cleanup_completed: Indicates if node's initial cleanup is completed :type is_initial_cleanup_completed: bool :returns: The next cleanup task, possibly None :rtype: :class:`job.tasks.base_task.Task` """<line_sep>total_job_exes=self._job_exes.values()<line_sep>count=len(total_job_exes)<if_stmt>count<g>JOB_EXES_WARNING_THRESHOLD<block_start>logger.warning('Node %s has %d job executions waiting to be cleaned up' hostname count)<block_end>cleanup_job_exes=[]<if_stmt>is_initial_cleanup_completed<block_start><if_stmt>count<eq>0# No job executions to clean, so no task <block_start><return><none><block_end><for_stmt>job_exe total_job_exes<block_start>cleanup_job_exes.append(job_exe)<if_stmt>len(cleanup_job_exes)<ge>MAX_JOB_EXES_PER_CLEANUP<block_start><break><block_end><block_end><block_end><return>CleanupTask(scheduler_mgr.framework_id agent_id cleanup_job_exes)<block_end><def_stmt>get_num_job_exes self<block_start>"""Returns the number of job executions waiting to be cleaned up :returns: The number of job executions waiting to be cleaned up :rtype: int """<line_sep><return>len(self._job_exes.values())<block_end><block_end>
<import_stmt>itertools<import_stmt>os<import_stmt>vlcp.service.sdn.ofpportmanager<as>ofpportmanager<import_stmt>vlcp.service.kvdb.objectdb<as>objectdb<import_stmt>vlcp.service.sdn.ioprocessing<as>iop<import_from_stmt>vlcp.service.sdn.flowbase FlowBase<import_from_stmt>vlcp.server.module depend call_api<import_from_stmt>vlcp.config.config defaultconfig<import_from_stmt>vlcp.event.runnable RoutineContainer<import_from_stmt>vlcp.service.sdn.ofpmanager FlowInitialize<import_from_stmt>vlcp.utils.ethernet mac_addr_bytes ip4_addr_bytes ip4_icmp_payload ethernet_l7 ip4_packet_l7 ip4_payload ICMP_ECHOREPLY icmp_bestparse icmp_echo ip_frag<import_from_stmt>vlcp.utils.flowupdater FlowUpdater<import_from_stmt>vlcp.protocol.openflow.openflow OpenflowConnectionStateEvent OpenflowAsyncMessageEvent<import_from_stmt>vlcp.utils.networkmodel SubNet RouterPort<import_from_stmt>namedstruct.stdprim uint16<import_from_stmt>vlcp.event.event M_<class_stmt>ICMPResponderUpdater(FlowUpdater)<block_start><def_stmt>__init__ self connection parent<block_start>super(ICMPResponderUpdater self).__init__(connection () ('icmpresponderupdate' connection) parent._logger)<line_sep>self.parent=parent<line_sep>self._lastlognets=()<line_sep>self._lastlogports=()<line_sep>self._lastsubnetsinfo=dict()<line_sep>self._orig_initialkeys=()<block_end><async_keyword><def_stmt>main self<block_start><try_stmt><block_start>self.subroutine(self._update_handler() <true> "update_handler_routine")<line_sep># use controller to reply icmp ping ,so start routine handler packet in <if_stmt><not>self.parent.prepush<block_start>self.subroutine(self._icmp_packetin_handler() <true> "icmp_packetin_handler_routine")<block_end><await>FlowUpdater.main(self)<block_end><finally_stmt><block_start><if_stmt>hasattr(self "update_handler_routine")<block_start>self.update_handler_routine.close()<block_end><if_stmt>hasattr(self "icmp_packetin_handler_routine")<block_start>self.icmp_packetin_handler_routine.close()<block_end><block_end><block_end><async_keyword><def_stmt>_icmp_packetin_handler self<block_start>conn=self._connection<line_sep>ofdef=self._connection.openflowdef<line_sep>l3input=self.parent._gettableindex("l3input" self._connection.protocol.vhost)<line_sep>transactid=uint16.create(os.urandom(2))<async_keyword><def_stmt>send_packet_out portid packet<block_start><await>self.execute_commands(conn [ofdef.ofp_packet_out(buffer_id=ofdef.OFP_NO_BUFFER in_port=ofdef.OFPP_CONTROLLER actions=[ofdef.ofp_action_output(port=portid max_len=ofdef.OFPCML_NO_BUFFER)] data=packet._tobytes())])<block_end>icmp_packetin_matcher=OpenflowAsyncMessageEvent.createMatcher(ofdef.OFPT_PACKET_IN <none> <none> l3input 2 self._connection self._connection.connmark)<while_stmt><true><block_start>ev=<await>icmp_packetin_matcher<line_sep>msg=ev.message<line_sep>inport=ofdef.ofp_port_no.create(ofdef.get_oxm(msg.match.oxm_fields ofdef.OXM_OF_IN_PORT))<line_sep># it must be icmp packet ... icmp_packet=ethernet_l7.create(msg.data)<if_stmt>ip_frag(icmp_packet)<ne>0# ignore fragmented packets <block_start><continue><block_end>transactid=(transactid+1)&0xffff<line_sep>reply_packet=ip4_packet_l7((ip4_payload ip4_icmp_payload) (icmp_bestparse icmp_echo) dl_src=icmp_packet.dl_dst dl_dst=icmp_packet.dl_src ip_src=icmp_packet.ip_dst ip_dst=icmp_packet.ip_src frag_off=0 ttl=128 identifier=transactid icmp_type=ICMP_ECHOREPLY icmp_code=icmp_packet.icmp_code icmp_id=icmp_packet.icmp_id icmp_seq=icmp_packet.icmp_seq data=icmp_packet.data)<line_sep>self.subroutine(send_packet_out(inport reply_packet))<block_end><block_end><async_keyword><def_stmt>_update_handler self# when lgport,lgnet,phyport,phynet object change , receive this event from ioprocessing module <block_start>dataobjectchange=iop.DataObjectChanged.createMatcher(<none> <none> self._connection)<while_stmt><true><block_start>ev=<await>dataobjectchange<line_sep># save to instance attr , us in other method self._lastlogports,_,self._lastlognets,_=ev.current<line_sep>self._update_walk()<block_end><block_end><def_stmt>_walk_lgport self key value walk save<block_start><if_stmt>value<is><not><none><block_start>save(key)<if_stmt>hasattr(value 'subnet')<block_start><try_stmt><block_start>subnetobj=walk(value.subnet.getkey())<block_end><except_stmt>KeyError<block_start><pass><block_end><else_stmt><block_start>save(value.subnet.getkey())<if_stmt>subnetobj<is><not><none><and>hasattr(subnetobj "router")<block_start><try_stmt><block_start>_=walk(subnetobj.router.getkey())<block_end><except_stmt>KeyError<block_start><pass><block_end><else_stmt><block_start>save(subnetobj.router.getkey())<block_end><block_end><block_end><block_end><block_end><block_end><def_stmt>_walk_lgnet self key value walk save<block_start>save(key)<line_sep># if value is None, also save its key # means watch key, when created , we will recv event <block_end><def_stmt>_update_walk self<block_start>lgportkeys=[p.getkey()<for>p,_ self._lastlogports]<line_sep>lgnetkeys=[p.getkey()<for>p,_ self._lastlognets]<line_sep>self._initialkeys=lgportkeys+lgnetkeys<line_sep>self._orig_initialkeys=lgportkeys+lgnetkeys<line_sep>self._walkerdict=dict(itertools.chain(((p self._walk_lgport)<for>p lgportkeys) ((n self._walk_lgnet)<for>n lgnetkeys)))<line_sep>self.subroutine(self.restart_walk() <false>)<block_end><def_stmt>reset_initialkeys self keys values# walk map logicalport --> subnet ---> routerport # we get subnet object, add keys to initialkeys, # when subnet update, it will restart walk ,, after we will get new routerport <block_start>subnetkeys=[k<for>k,v zip(keys values)<if>v<is><not><none><and><not>v.isdeleted()<and>v.isinstance(SubNet)]<line_sep>self._initialkeys=tuple(itertools.chain(self._orig_initialkeys subnetkeys))<block_end><async_keyword><def_stmt>updateflow self connection addvalues removevalues updatedvalues<block_start><try_stmt><block_start>allobjects=set(o<for>o self._savedresult<if>o<is><not><none><and><not>o.isdeleted())<line_sep>lastsubnetsinfo=self._lastsubnetsinfo<line_sep>currentlognetsinfo=dict((n id)<for>n,id self._lastlognets<if>n<in>allobjects)<line_sep>currentrouterportsinfo=dict((o.subnet o)<for>o allobjects<if>o.isinstance(RouterPort))<line_sep>currentsubnetsinfo=dict((o (getattr(currentrouterportsinfo[o] "ip_address" getattr(o "gateway" <none>)) self.parent.inroutermac o.network.id currentlognetsinfo[o.network]))<for>o allobjects<if>o.isinstance(SubNet)<and>hasattr(o "router")<and>o<in>currentrouterportsinfo<and>o.network<in>currentlognetsinfo<and>(hasattr(currentrouterportsinfo[o] "ip_address")<or>hasattr(o "gateway"))<and>(<not>hasattr(o "isexternal")<or>o.isexternal<eq><false>))<line_sep>self._lastsubnetsinfo=currentsubnetsinfo<line_sep>ofdef=connection.openflowdef<line_sep>vhost=connection.protocol.vhost<line_sep>l3input=self.parent._gettableindex("l3input" vhost)<line_sep>cmds=[]<if_stmt>connection.protocol.disablenxext<block_start><def_stmt>match_network nid<block_start><return>ofdef.create_oxm(ofdef.OXM_OF_METADATA_W (nid&0xffff)<lshift>32 b'\x00\x00\xff\xff\x00\x00\x00\x00')<block_end><block_end><else_stmt><block_start><def_stmt>match_network nid<block_start><return>ofdef.create_oxm(ofdef.NXM_NX_REG4 nid)<block_end><block_end># prepush or not ,, it is same , so .. <def_stmt>_deleteicmpflows ipaddress macaddress networkid<block_start><return>[ofdef.ofp_flow_mod(cookie=0x2 cookie_mask=0xffffffffffffffff table_id=l3input command=ofdef.OFPFC_DELETE priority=ofdef.OFP_DEFAULT_PRIORITY+1 buffer_id=ofdef.OFP_NO_BUFFER out_port=ofdef.OFPP_ANY out_group=ofdef.OFPG_ANY match=ofdef.ofp_match_oxm(oxm_fields=[ofdef.create_oxm(ofdef.NXM_NX_REG4 networkid) ofdef.create_oxm(ofdef.OXM_OF_ETH_DST mac_addr_bytes(macaddress)) ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE ofdef.ETHERTYPE_IP) ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST ip4_addr_bytes(ipaddress)) ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO ofdef.IPPROTO_ICMP) ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE 8) ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE 0)]))]<block_end><if_stmt><not>self.parent.prepush<block_start><def_stmt>_createicmpflows ipaddress macaddress networkid<block_start><return>[ofdef.ofp_flow_mod(cookie=0x2 cookie_mask=0xffffffffffffffff table_id=l3input command=ofdef.OFPFC_ADD # icmp to router matcher same as ip forward to router # so priority + 1 priority=ofdef.OFP_DEFAULT_PRIORITY+1 buffer_id=ofdef.OFP_NO_BUFFER out_port=ofdef.OFPP_ANY out_group=ofdef.OFPG_ANY match=ofdef.ofp_match_oxm(oxm_fields=[match_network(networkid) ofdef.create_oxm(ofdef.OXM_OF_ETH_DST mac_addr_bytes(macaddress)) ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE ofdef.ETHERTYPE_IP) ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST ip4_addr_bytes(ipaddress)) ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO ofdef.IPPROTO_ICMP) ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE 8) ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE 0)]) instructions=[ofdef.ofp_instruction_actions(actions=[ofdef.ofp_action_output(port=ofdef.OFPP_CONTROLLER max_len=ofdef.OFPCML_NO_BUFFER)])])]<block_end><block_end><else_stmt><block_start><def_stmt>_createicmpflows ipaddress macaddress networkid<block_start><return>[ofdef.ofp_flow_mod(cookie=0x2 cookie_mask=0xffffffffffffffff table_id=l3input command=ofdef.OFPFC_ADD # icmp to router matcher same as ip forward to router # so priority + 1 priority=ofdef.OFP_DEFAULT_PRIORITY+1 buffer_id=ofdef.OFP_NO_BUFFER out_port=ofdef.OFPP_ANY out_group=ofdef.OFPG_ANY match=ofdef.ofp_match_oxm(oxm_fields=[match_network(networkid) ofdef.create_oxm(ofdef.OXM_OF_ETH_DST mac_addr_bytes(macaddress)) ofdef.create_oxm(ofdef.OXM_OF_ETH_TYPE ofdef.ETHERTYPE_IP) ofdef.create_oxm(ofdef.OXM_OF_IPV4_DST ip4_addr_bytes(ipaddress)) ofdef.create_oxm(ofdef.OXM_OF_IP_PROTO ofdef.IPPROTO_ICMP) ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE 8) ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_CODE 0)]) instructions=[ofdef.ofp_instruction_actions(actions=[ofdef.nx_action_reg_move(n_bits=48 src=ofdef.OXM_OF_ETH_SRC dst=ofdef.OXM_OF_ETH_DST) ofdef.ofp_action_set_field(field=ofdef.create_oxm(ofdef.OXM_OF_ETH_SRC ofdef.mac_addr(macaddress))) ofdef.nx_action_reg_move(n_bits=32 src=ofdef.OXM_OF_IPV4_SRC dst=ofdef.OXM_OF_IPV4_DST) ofdef.ofp_action_set_field(field=ofdef.create_oxm(ofdef.OXM_OF_IPV4_SRC ofdef.ip4_addr(ipaddress))) ofdef.ofp_action_set_field(field=ofdef.create_oxm(ofdef.OXM_OF_ICMPV4_TYPE ICMP_ECHOREPLY)) ofdef.ofp_action_nw_ttl(nw_ttl=128) ofdef.ofp_action_output(port=ofdef.OFPP_IN_PORT)])])]<block_end><block_end><for_stmt>subnet lastsubnetsinfo.keys()<block_start><if_stmt>subnet<not><in>currentsubnetsinfo<or>(subnet<in>currentsubnetsinfo<and>lastsubnetsinfo[subnet]<ne>currentsubnetsinfo[subnet])# subnet remove or subnet info changed , remove flow info <block_start>ip_address,mac_address,networkid,nid=lastsubnetsinfo[subnet]<line_sep>remove_arp={(ip_address mac_address networkid <true>) }<line_sep><await>call_api(self 'arpresponder' 'removeproxyarp' {'connection':connection 'arpentries':remove_arp})<line_sep>cmds.extend(_deleteicmpflows(ip_address mac_address nid))<block_end><block_end><await>self.execute_commands(connection cmds)<for_stmt>subnet currentsubnetsinfo.keys()<block_start><if_stmt>subnet<not><in>lastsubnetsinfo<or>(subnet<in>lastsubnetsinfo<and>lastsubnetsinfo[subnet]<ne>currentsubnetsinfo[subnet])<block_start>ip_address,mac_address,networkid,nid=currentsubnetsinfo[subnet]<line_sep>add_arp={(ip_address mac_address networkid <true>) }<line_sep><await>call_api(self 'arpresponder' 'createproxyarp' {'connection':connection 'arpentries':add_arp})<line_sep>cmds.extend(_createicmpflows(ip_address mac_address nid))<block_end><block_end><await>self.execute_commands(connection cmds)<block_end><except_stmt>Exception<block_start>self._logger.warning("Unexpected exception in icmp_flow_updater, ignore it! Continue" exc_info=<true>)<block_end><block_end><block_end>@defaultconfig@depend(ofpportmanager.OpenflowPortManager objectdb.ObjectDB)<class_stmt>ICMPResponder(FlowBase)<block_start>""" Respond ICMP echo (ping) requests to the gateway """<line_sep>_tablerequest=(("l3input" ("l2input" ) "") ("l2output" ("l3input" ) ""))<line_sep># True : reply icmp ping with flow # False: reply icmp ping with controller PACKET_IN/PACKET_OUT # # Must use prepush=True with OpenvSwitch 2.5+ # _default_prepush=<false><line_sep># "Gateway" responds with this MAC address _default_inroutermac='1a:23:67:59:63:33'<def_stmt>__init__ self server<block_start>super(ICMPResponder self).__init__(server)<line_sep>self.app_routine=RoutineContainer(self.scheduler)<line_sep>self.app_routine.main=self._main<line_sep>self.routines.append(self.app_routine)<line_sep>self._flowupdater=dict()<block_end><async_keyword><def_stmt>_main self<block_start>flowinit=FlowInitialize.createMatcher(_ismatch=<lambda>x:self.vhostbind<is><none><or>x.vhost<in>self.vhostbind)<line_sep>conndown=OpenflowConnectionStateEvent.createMatcher(state=OpenflowConnectionStateEvent.CONNECTION_DOWN _ismatch=<lambda>x:self.vhostbind<is><none><or>x.createby.vhost<in>self.vhostbind)<while_stmt><true><block_start>ev,m=<await>M_(flowinit conndown)<if_stmt>m<is>flowinit<block_start>c=ev.connection<line_sep>self.app_routine.subroutine(self._init_conn(c))<block_end><if_stmt>m<is>conndown<block_start>c=ev.connection<line_sep>self.app_routine.subroutine(self._remove_conn(c))<block_end><block_end><block_end><async_keyword><def_stmt>_init_conn self conn<block_start><if_stmt>conn<in>self._flowupdater<block_start>updater=self._flowupdater.pop(conn)<line_sep>updater.close()<block_end>updater=ICMPResponderUpdater(conn self)<line_sep>self._flowupdater[conn]=updater<line_sep>updater.start()<block_end><async_keyword><def_stmt>_remove_conn self conn<block_start><if_stmt>conn<in>self._flowupdater<block_start>updater=self._flowupdater.pop(conn)<line_sep>updater.close()<block_end><block_end><block_end>
<import_from_future_stmt> print_function<import_stmt>boto3<import_stmt>base64<import_stmt>os<line_sep>SNS_TOPIC_ARN=os.environ["SNS_TOPIC_ARN"]<line_sep>sns=boto3.client("sns")<line_sep>print("Loading function")<def_stmt>lambda_handler event context<block_start>output=[]<line_sep>success=0<line_sep>failure=0<line_sep>highest_score=0<line_sep>print("event: {}".format(event))<line_sep>r=event["records"]<line_sep>print("records: {}".format(r))<line_sep>print("type_records: {}".format(type(r)))<for_stmt>record event["records"]<block_start><try_stmt># Uncomment the below line to publish the decoded data to the SNS topic. <block_start>payload=base64.b64decode(record["data"])<line_sep>print("payload: {}".format(payload))<line_sep>text=payload.decode("utf-8")<line_sep>print("text: {}".format(text))<line_sep>score=float(text)<if_stmt>(score<ne>0)<and>(score<g>highest_score)<block_start>highest_score=score<line_sep>print("New highest_score: {}".format(highest_score))<line_sep># sns.publish(TopicArn=SNS_TOPIC_ARN, Message='New anomaly score: {}'.format(text), Subject='New Reviews Anomaly Score Detected') output.append({"recordId":record["recordId"] "result":"Ok"})<line_sep>success<augadd>1<block_end><block_end><except_stmt>Exception<as>e<block_start>print(e)<line_sep>output.append({"recordId":record["recordId"] "result":"DeliveryFailed"})<line_sep>failure<augadd>1<block_end><block_end><if_stmt>highest_score<ne>0<block_start>sns.publish(TopicArn=SNS_TOPIC_ARN Message="New anomaly score: {}".format(str(highest_score)) Subject="New Reviews Anomaly Score Detected" )<block_end>print("Successfully delivered {0} records, failed to deliver {1} records".format(success failure))<line_sep><return>{"records":output}<block_end>
<import_from_stmt>.OutputValidator OutputValidator<class_stmt>SegfaultValidator(OutputValidator)<block_start>""" Validate that a file was received. """<def_stmt>validate self<block_start><return><true><block_end><block_end>
# Copyright 2019 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """Consolidated evaluator factory module. This module consolidates the creation of specific evaluator combinators, used throughout Pinpoint to evaluate task graphs we support. """<import_from_future_stmt> print_function<import_from_future_stmt> division<import_from_future_stmt> absolute_import<import_from_stmt>dashboard.pinpoint.models evaluators<import_from_stmt>dashboard.pinpoint.models.tasks find_isolate<import_from_stmt>dashboard.pinpoint.models.tasks performance_bisection<import_from_stmt>dashboard.pinpoint.models.tasks read_value<import_from_stmt>dashboard.pinpoint.models.tasks run_test<line_sep>EXCLUDED_PAYLOAD_KEYS={'commits' 'swarming_request_body'}<class_stmt>ExecutionEngine(evaluators.SequenceEvaluator)<block_start><def_stmt>__init__ self job# We gather all the evaluators from the modules we know. <block_start>super(ExecutionEngine self).__init__(evaluators=[evaluators.DispatchByTaskType({'find_isolate':find_isolate.Evaluator(job) 'find_culprit':performance_bisection.Evaluator(job) 'read_value':read_value.Evaluator(job) 'run_test':run_test.Evaluator(job) }) # We then always lift the task payload up, skipping some of the # larger objects that we know we are not going to need when deciding # what the end result is. evaluators.TaskPayloadLiftingEvaluator(exclude_keys=EXCLUDED_PAYLOAD_KEYS)])<block_end><block_end>
# ------------------------------------------------------------------------ # Copyright 2015 Intel Corporation # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ------------------------------------------------------------------------ <class_stmt>Configuration<block_start>"""Compiler-specific configuration abstract base class"""<def_stmt>__init__ self context<block_start>""" Initialize the Configuration object Arguments: context -- the scons configure context """<if_stmt>type(self)<is>Configuration<block_start><raise>TypeError('abstract class cannot be instantiated')<block_end>self._context=context# scons configure context self._env=context.env<block_end># scons environment <def_stmt>check_c99_flags self<block_start>""" Check if command line flag is required to enable C99 support. Returns 1 if no flag is required, 0 if no flag was found, and the actual flag if one was found. CFLAGS will be updated with appropriate C99 flag, accordingly. """<line_sep><return>self._check_flags(self._c99_flags() self._c99_test_program() '.c' 'CFLAGS')<block_end><def_stmt>check_cxx11_flags self<block_start>""" Check if command line flag is required to enable C++11 support. Returns 1 if no flag is required, 0 if no flag was found, and the actual flag if one was found. CXXFLAGS will be updated with appropriate C++11 flag, accordingly. """<line_sep><return>self._check_flags(self._cxx11_flags() self._cxx11_test_program() '.cpp' 'CXXFLAGS')<block_end><def_stmt>has_pthreads_support self<block_start>""" Check if PThreads are supported by this system Returns 1 if this system DOES support pthreads, 0 otherwise """<line_sep><return>self._context.TryCompile(self._pthreads_test_program() '.c')<block_end># -------------------------------------------------------------- # Check if flag is required to build the given test program. # # Arguments: # test_flags -- list of flags that may be needed to build # test_program # test_program -- program used used to determine if one of the # given flags is required to for a successful # build # test_extension -- file extension associated with the test # program, e.g. '.cpp' for C++ and '.c' for C # flags_key -- key used to retrieve compiler flags that may # be updated by this check from the SCons # environment # -------------------------------------------------------------- <def_stmt>_check_flags self test_flags test_program test_extension flags_key# Check if no additional flags are required. <block_start>ret=self._context.TryCompile(test_program test_extension)<if_stmt>ret<is>0# Try flags known to enable compiler features needed by # the test program. <block_start>last_flags=self._env[flags_key]<for_stmt>flag test_flags<block_start>self._env.Append(**{flags_key:flag})<line_sep>ret=self._context.TryCompile(test_program test_extension)<if_stmt>ret# Found a flag! <block_start><return>flag<block_end><else_stmt># Restore original compiler flags for next flag # test. <block_start>self._env.Replace(**{flags_key:last_flags})<block_end><block_end><block_end><return>ret<block_end># ------------------------------------------------------------ # Return test program to be used when checking for basic C99 # support. # # Subclasses should implement this template method or use the # default test program found in the DefaultConfiguration class # through composition. # ------------------------------------------------------------ <def_stmt>_c99_test_program self<block_start><raise>NotImplementedError('unimplemented method')<block_end># -------------------------------------------------------------- # Get list of flags that could potentially enable C99 support. # # Subclasses should implement this template method if flags are # needed to enable C99 support. # -------------------------------------------------------------- <def_stmt>_c99_flags self<block_start><raise>NotImplementedError('unimplemented method')<block_end># ------------------------------------------------------------ # Return test program to be used when checking for basic C++11 # support. # # Subclasses should implement this template method or use the # default test program found in the DefaultConfiguration class # through composition. # ------------------------------------------------------------ <def_stmt>_cxx11_test_program self<block_start><raise>NotImplementedError('unimplemented method')<block_end># -------------------------------------------------------------- # Get list of flags that could potentially enable C++11 support. # # Subclasses should implement this template method if flags are # needed to enable C++11 support. # -------------------------------------------------------------- <def_stmt>_cxx11_flags self<block_start><raise>NotImplementedError('unimplemented method')<block_end># -------------------------------------------------------------- # Return a test program to be used when checking for PThreads # support # # -------------------------------------------------------------- <def_stmt>_pthreads_test_program self<block_start><return>""" #include <unistd.h> #include <pthread.h> int main() { #ifndef _POSIX_THREADS # error POSIX Threads support not available #endif return 0; } """<block_end><block_end>
# Copyright 2019 <NAME> # License: Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0) <import_stmt>math<import_stmt>numpy<as>np<import_stmt>torch<as>th<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>tf<import_stmt>librosa.filters<as>filters<import_from_stmt>aps.const EPSILON TORCH_VERSION<import_from_stmt>typing Optional Tuple<import_from_stmt>distutils.version LooseVersion<if_stmt>TORCH_VERSION<ge>LooseVersion("1.7")<block_start><import_from_stmt>torch.fft fft<as>fft_func<block_end><else_stmt><block_start><pass><block_end><def_stmt>export_jit transform:nn.Module<arrow>nn.Module<block_start>""" Export transform module for inference """<line_sep>export_out=[module<for>module transform<if>module.exportable()]<line_sep><return>nn.Sequential(*export_out)<block_end><def_stmt>init_window wnd:str frame_len:int device:th.device="cpu"<arrow>th.Tensor<block_start>""" Return window coefficient Args: wnd: window name frame_len: length of the frame """<def_stmt>sqrthann frame_len periodic=<true><block_start><return>th.hann_window(frame_len periodic=periodic)<power>0.5<block_end><if_stmt>wnd<not><in>["bartlett" "hann" "hamm" "blackman" "rect" "sqrthann"]<block_start><raise>RuntimeError(f"Unknown window type: {wnd}")<block_end>wnd_tpl={"sqrthann":sqrthann "hann":th.hann_window "hamm":th.hamming_window "blackman":th.blackman_window "bartlett":th.bartlett_window "rect":th.ones}<if_stmt>wnd<ne>"rect"# match with librosa <block_start>c=wnd_tpl[wnd](frame_len periodic=<true>)<block_end><else_stmt><block_start>c=wnd_tpl[wnd](frame_len)<block_end><return>c.to(device)<block_end><def_stmt>init_kernel frame_len:int frame_hop:int window:th.Tensor round_pow_of_two:bool=<true> normalized:bool=<false> inverse:bool=<false> mode:str="librosa"<arrow>Tuple[th.Tensor th.Tensor]<block_start>""" Return STFT kernels Args: frame_len: length of the frame frame_hop: hop size between frames window: window tensor round_pow_of_two: if true, choose round(#power_of_two) as the FFT size normalized: return normalized DFT matrix inverse: return iDFT matrix mode: framing mode (librosa or kaldi) """<if_stmt>mode<not><in>["librosa" "kaldi"]<block_start><raise>ValueError(f"Unsupported mode: {mode}")<block_end># FFT size: B <if_stmt>round_pow_of_two<or>mode<eq>"kaldi"<block_start>fft_size=2<power>math.ceil(math.log2(frame_len))<block_end><else_stmt><block_start>fft_size=frame_len<block_end># center padding window if needed <if_stmt>mode<eq>"librosa"<and>fft_size<ne>frame_len<block_start>lpad=(fft_size-frame_len)<floordiv>2<line_sep>window=tf.pad(window (lpad fft_size-frame_len-lpad))<block_end><if_stmt>normalized# make K^H * K = I <block_start>S=fft_size<power>0.5<block_end><else_stmt><block_start>S=1<block_end># W x B x 2 <if_stmt>TORCH_VERSION<ge>LooseVersion("1.7")<block_start>K=fft_func(th.eye(fft_size)/S dim=-1)<line_sep>K=th.stack([K.real K.imag] dim=-1)<block_end><else_stmt><block_start>I=th.stack([th.eye(fft_size) th.zeros(fft_size fft_size)] dim=-1)<line_sep>K=th.fft(I/S 1)<block_end><if_stmt>mode<eq>"kaldi"<block_start>K=K[:frame_len]<block_end><if_stmt>inverse<and><not>normalized# to make K^H * K = I <block_start>K=K/fft_size<block_end># 2 x B x W K=th.transpose(K 0 2)<line_sep># 2B x 1 x W K=th.reshape(K (fft_size<times>2 1 K.shape[-1]))<line_sep><return>K.to(window.device) window<block_end><def_stmt>mel_filter frame_len:int round_pow_of_two:bool=<true> num_bins:Optional[int]=<none> sr:int=16000 num_mels:int=80 fmin:float=0.0 fmax:Optional[float]=<none> norm:bool=<false><arrow>th.Tensor<block_start>""" Return mel filter coefficients Args: frame_len: length of the frame round_pow_of_two: if true, choose round(#power_of_two) as the FFT size num_bins: number of the frequency bins produced by STFT num_mels: number of the mel bands fmin: lowest frequency (in Hz) fmax: highest frequency (in Hz) norm: normalize the mel filter coefficients """<line_sep># FFT points <if_stmt>num_bins<is><none><block_start>N=2<power>math.ceil(math.log2(frame_len))<if>round_pow_of_two<else>frame_len<block_end><else_stmt><block_start>N=(num_bins-1)<times>2<block_end># fmin & fmax freq_upper=sr<floordiv>2<if_stmt>fmax<is><none><block_start>fmax=freq_upper<block_end><else_stmt><block_start>fmax=min(fmax+freq_upper<if>fmax<l>0<else>fmax freq_upper)<block_end>fmin=max(0 fmin)<line_sep># mel filter coefficients mel=filters.mel(sr N n_mels=num_mels fmax=fmax fmin=fmin htk=<true> norm="slaney"<if>norm<else><none>)<line_sep># num_mels x (N // 2 + 1) <return>th.tensor(mel dtype=th.float32)<block_end><def_stmt>speed_perturb_filter src_sr:int dst_sr:int cutoff_ratio:float=0.95 num_zeros:int=64<arrow>th.Tensor<block_start>""" Return speed perturb filters, reference: https://github.com/danpovey/filtering/blob/master/lilfilter/resampler.py Args: src_sr: sample rate of the source signal dst_sr: sample rate of the target signal Return: weight (Tensor): coefficients of the filter """<if_stmt>src_sr<eq>dst_sr<block_start><raise>ValueError(f"src_sr should not be equal to dst_sr: {src_sr}/{dst_sr}")<block_end>gcd=math.gcd(src_sr dst_sr)<line_sep>src_sr=src_sr<floordiv>gcd<line_sep>dst_sr=dst_sr<floordiv>gcd<if_stmt>src_sr<eq>1<or>dst_sr<eq>1<block_start><raise>ValueError("do not support integer downsample/upsample")<block_end>zeros_per_block=min(src_sr dst_sr)<times>cutoff_ratio<line_sep>padding=1+int(num_zeros/zeros_per_block)<line_sep># dst_sr x src_sr x K times=(np.arange(dst_sr)[: <none> <none>]/float(dst_sr)-np.arange(src_sr)[<none> : <none>]/float(src_sr)-np.arange(2<times>padding+1)[<none> <none> :]+padding)<line_sep>window=np.heaviside(1-np.abs(times/padding) 0.0)<times>(0.5+0.5<times>np.cos(times/padding<times>math.pi))<line_sep>weight=np.sinc(times<times>zeros_per_block)<times>window<times>zeros_per_block/float(src_sr)<line_sep><return>th.tensor(weight dtype=th.float32)<block_end><def_stmt>splice_feature feats:th.Tensor lctx:int=1 rctx:int=1 op:str="cat"<arrow>th.Tensor<block_start>""" Splice feature Args: feats (Tensor): N x ... x T x F, original feature lctx: left context rctx: right context op: operator on feature context Return: splice (Tensor): feature with context padded """<if_stmt>lctx+rctx<eq>0<block_start><return>feats<block_end><if_stmt>op<not><in>["cat" "stack"]<block_start><raise>ValueError(f"Unknown op for feature splicing: {op}")<block_end># [N x ... x T x F, ...] ctx=[]<line_sep>T=feats.shape[-2]<for_stmt>c range(-lctx rctx+1)<block_start>idx=th.arange(c c+T device=feats.device dtype=th.int64)<line_sep>idx=th.clamp(idx min=0 max=T-1)<line_sep>ctx.append(th.index_select(feats -2 idx))<block_end><if_stmt>op<eq>"cat"# N x ... x T x FD <block_start>splice=th.cat(ctx -1)<block_end><else_stmt># N x ... x T x F x D <block_start>splice=th.stack(ctx -1)<block_end><return>splice<block_end><def_stmt>_forward_stft wav:th.Tensor kernel:th.Tensor window:th.Tensor return_polar:bool=<false> pre_emphasis:float=0 frame_hop:int=256 onesided:bool=<false> center:bool=<false> eps:float=EPSILON<arrow>th.Tensor<block_start>""" STFT function implemented by conv1d (not efficient, but we don't care during training) Args: wav (Tensor): N x (C) x S kernel (Tensor): STFT transform kernels, from init_kernel(...) return_polar: return [magnitude; phase] Tensor or [real; imag] Tensor pre_emphasis: factor of preemphasis frame_hop: frame hop size in number samples onesided: return half FFT bins center: if true, we assumed to have centered frames Return: transform (Tensor): STFT transform results """<line_sep>wav_dim=wav.dim()<if_stmt>wav_dim<not><in>[2 3]<block_start><raise>RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")<block_end># if N x S, reshape N x 1 x S # else: reshape NC x 1 x S N,S=wav.shape[0] wav.shape[-1]<line_sep>wav=wav.view(-1 1 S)<line_sep># NC x 1 x S+2P <if_stmt>center<block_start>pad=kernel.shape[-1]<floordiv>2<line_sep># NOTE: match with librosa wav=tf.pad(wav (pad pad) mode="reflect")<block_end># STFT kernel=kernel<times>window<if_stmt>pre_emphasis<g>0# NC x W x T <block_start>frames=tf.unfold(wav[: <none>] (1 kernel.shape[-1]) stride=frame_hop padding=0)<line_sep># follow Kaldi's Preemphasize frames[: 1:]=frames[: 1:]-pre_emphasis<times>frames[: :-1]<line_sep>frames[: 0]<augmul>(1-pre_emphasis)<line_sep># 1 x 2B x W, NC x W x T, NC x 2B x T packed=th.matmul(kernel[: 0][<none> <ellipsis>] frames)<block_end><else_stmt><block_start>packed=tf.conv1d(wav kernel stride=frame_hop padding=0)<block_end># NC x 2B x T => N x C x 2B x T <if_stmt>wav_dim<eq>3<block_start>packed=packed.view(N -1 packed.shape[-2] packed.shape[-1])<block_end># N x (C) x B x T real,imag=th.chunk(packed 2 dim=-2)<line_sep># N x (C) x B/2+1 x T <if_stmt>onesided<block_start>num_bins=kernel.shape[0]<floordiv>4+1<line_sep>real=real[<ellipsis> :num_bins :]<line_sep>imag=imag[<ellipsis> :num_bins :]<block_end><if_stmt>return_polar<block_start>mag=(real<power>2+imag<power>2+eps)<power>0.5<line_sep>pha=th.atan2(imag real)<line_sep><return>th.stack([mag pha] dim=-1)<block_end><else_stmt><block_start><return>th.stack([real imag] dim=-1)<block_end><block_end><def_stmt>_inverse_stft transform:th.Tensor kernel:th.Tensor window:th.Tensor return_polar:bool=<false> frame_hop:int=256 onesided:bool=<false> center:bool=<false> eps:float=EPSILON<arrow>th.Tensor<block_start>""" iSTFT function implemented by conv1d Args: transform (Tensor): STFT transform results kernel (Tensor): STFT transform kernels, from init_kernel(...) return_polar (bool): keep same with the one in _forward_stft frame_hop: frame hop size in number samples onesided: return half FFT bins center: used in _forward_stft Return: wav (Tensor), N x S """<line_sep># (N) x F x T x 2 transform_dim=transform.dim()<line_sep># if F x T x 2, reshape 1 x F x T x 2 <if_stmt>transform_dim<eq>3<block_start>transform=th.unsqueeze(transform 0)<block_end><if_stmt>transform_dim<ne>4<block_start><raise>RuntimeError(f"Expect 4D tensor, but got {transform_dim}D")<block_end><if_stmt>return_polar<block_start>real=transform[<ellipsis> 0]<times>th.cos(transform[<ellipsis> 1])<line_sep>imag=transform[<ellipsis> 0]<times>th.sin(transform[<ellipsis> 1])<block_end><else_stmt><block_start>real,imag=transform[<ellipsis> 0] transform[<ellipsis> 1]<block_end><if_stmt>onesided# [self.num_bins - 2, ..., 1] <block_start>reverse=range(kernel.shape[0]<floordiv>4-1 0 -1)<line_sep># extend matrix: N x B x T real=th.cat([real real[: reverse]] 1)<line_sep>imag=th.cat([imag -imag[: reverse]] 1)<block_end># pack: N x 2B x T packed=th.cat([real imag] dim=1)<line_sep># N x 1 x T wav=tf.conv_transpose1d(packed kernel<times>window stride=frame_hop padding=0)<line_sep># normalized audio samples # refer: https://github.com/pytorch/audio/blob/2ebbbf511fb1e6c47b59fd32ad7e66023fa0dff1/torchaudio/functional.py#L171 num_frames=packed.shape[-1]<line_sep>win_length=window.shape[0]<line_sep># W x T win=th.repeat_interleave(window[<ellipsis> <none>]<power>2 num_frames dim=-1)<line_sep># Do OLA on windows # v1) I=th.eye(win_length device=win.device)[: <none>]<line_sep>denorm=tf.conv_transpose1d(win[<none> <ellipsis>] I stride=frame_hop padding=0)<line_sep># v2) # num_samples = (num_frames - 1) * frame_hop + win_length # denorm = tf.fold(win[None, ...], (num_samples, 1), (win_length, 1), # stride=frame_hop)[..., 0] <if_stmt>center<block_start>pad=kernel.shape[-1]<floordiv>2<line_sep>wav=wav[<ellipsis> pad:-pad]<line_sep>denorm=denorm[<ellipsis> pad:-pad]<block_end>wav=wav/(denorm+eps)<line_sep># N x S <return>wav.squeeze(1)<block_end><def_stmt>_pytorch_stft wav:th.Tensor frame_len:int frame_hop:int n_fft:int=512 return_polar:bool=<false> window:str="sqrthann" normalized:bool=<false> onesided:bool=<true> center:bool=<false> eps:float=EPSILON<arrow>th.Tensor<block_start>""" Wrapper of PyTorch STFT function Args: wav (Tensor): source audio signal frame_len: length of the frame frame_hop: hop size between frames n_fft: number of the FFT size return_polar: return the results in polar coordinate window: window tensor center: same definition with the parameter in librosa.stft normalized: use normalized DFT kernel onesided: output onesided STFT Return: transform (Tensor), STFT transform results """<if_stmt>TORCH_VERSION<l>LooseVersion("1.7")<block_start><raise>RuntimeError("Can not use this function as TORCH_VERSION < 1.7")<block_end>wav_dim=wav.dim()<if_stmt>wav_dim<not><in>[2 3]<block_start><raise>RuntimeError(f"STFT expect 2D/3D tensor, but got {wav_dim:d}D")<block_end># if N x C x S, reshape NC x S wav=wav.view(-1 wav.shape[-1])<line_sep># STFT: N x F x T x 2 stft=th.stft(wav n_fft hop_length=frame_hop win_length=window.shape[-1] window=window center=center normalized=normalized onesided=onesided return_complex=<false>)<if_stmt>wav_dim<eq>3<block_start>N,F,T,_=stft.shape<line_sep>stft=stft.view(N -1 F T 2)<block_end># N x (C) x F x T x 2 <if_stmt><not>return_polar<block_start><return>stft<block_end># N x (C) x F x T real,imag=stft[<ellipsis> 0] stft[<ellipsis> 1]<line_sep>mag=(real<power>2+imag<power>2+eps)<power>0.5<line_sep>pha=th.atan2(imag real)<line_sep><return>th.stack([mag pha] dim=-1)<block_end><def_stmt>_pytorch_istft transform:th.Tensor frame_len:int frame_hop:int window:th.Tensor n_fft:int=512 return_polar:bool=<false> normalized:bool=<false> onesided:bool=<true> center:bool=<false> eps:float=EPSILON<arrow>th.Tensor<block_start>""" Wrapper of PyTorch iSTFT function Args: transform (Tensor): results of STFT frame_len: length of the frame frame_hop: hop size between frames window: window tensor n_fft: number of the FFT size return_polar: keep same with _pytorch_stft center: same definition with the parameter in librosa.stft normalized: use normalized DFT kernel onesided: output onesided STFT Return: wav (Tensor): synthetic audio """<if_stmt>TORCH_VERSION<l>LooseVersion("1.7")<block_start><raise>RuntimeError("Can not use this function as TORCH_VERSION < 1.7")<block_end>transform_dim=transform.dim()<line_sep># if F x T x 2, reshape 1 x F x T x 2 <if_stmt>transform_dim<eq>3<block_start>transform=th.unsqueeze(transform 0)<block_end><if_stmt>transform_dim<ne>4<block_start><raise>RuntimeError(f"Expect 4D tensor, but got {transform_dim}D")<block_end><if_stmt>return_polar<block_start>real=transform[<ellipsis> 0]<times>th.cos(transform[<ellipsis> 1])<line_sep>imag=transform[<ellipsis> 0]<times>th.sin(transform[<ellipsis> 1])<line_sep>transform=th.stack([real imag] -1)<block_end># stft is a complex tensor of PyTorch stft=th.view_as_complex(transform)<line_sep># (N) x S wav=th.istft(stft n_fft hop_length=frame_hop win_length=window.shape[-1] window=window center=center normalized=normalized onesided=onesided return_complex=<false>)<line_sep><return>wav<block_end><def_stmt>forward_stft wav:th.Tensor frame_len:int frame_hop:int window:str="sqrthann" round_pow_of_two:bool=<true> return_polar:bool=<false> pre_emphasis:float=0 normalized:bool=<false> onesided:bool=<true> center:bool=<false> mode:str="librosa" eps:float=EPSILON<arrow>th.Tensor<block_start>""" STFT function implementation, equals to STFT layer Args: wav: source audio signal frame_len: length of the frame frame_hop: hop size between frames return_polar: return [magnitude; phase] Tensor or [real; imag] Tensor window: window name center: center flag (similar with that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the FFT size pre_emphasis: factor of preemphasis normalized: use normalized DFT kernel onesided: output onesided STFT inverse: using iDFT kernel (for iSTFT) mode: STFT mode, "kaldi" or "librosa" or "torch" Return: transform: results of STFT """<line_sep>window=init_window(window frame_len device=wav.device)<if_stmt>mode<eq>"torch"<block_start>n_fft=2<power>math.ceil(math.log2(frame_len))<if>round_pow_of_two<else>frame_len<line_sep><return>_pytorch_stft(wav frame_len frame_hop n_fft=n_fft return_polar=return_polar window=window normalized=normalized onesided=onesided center=center eps=eps)<block_end><else_stmt><block_start>kernel,window=init_kernel(frame_len frame_hop window=window round_pow_of_two=round_pow_of_two normalized=normalized inverse=<false> mode=mode)<line_sep><return>_forward_stft(wav kernel window return_polar=return_polar frame_hop=frame_hop pre_emphasis=pre_emphasis onesided=onesided center=center eps=eps)<block_end><block_end><def_stmt>inverse_stft transform:th.Tensor frame_len:int frame_hop:int return_polar:bool=<false> window:str="sqrthann" round_pow_of_two:bool=<true> normalized:bool=<false> onesided:bool=<true> center:bool=<false> mode:str="librosa" eps:float=EPSILON<arrow>th.Tensor<block_start>""" iSTFT function implementation, equals to iSTFT layer Args: transform: results of STFT frame_len: length of the frame frame_hop: hop size between frames return_polar: keep same with function forward_stft(...) window: window name center: center flag (similar with that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the FFT size normalized: use normalized DFT kernel onesided: output onesided STFT mode: STFT mode, "kaldi" or "librosa" or "torch" Return: wav: synthetic signals """<line_sep>window=init_window(window frame_len device=transform.device)<if_stmt>mode<eq>"torch"<block_start>n_fft=2<power>math.ceil(math.log2(frame_len))<if>round_pow_of_two<else>frame_len<line_sep><return>_pytorch_istft(transform frame_len frame_hop n_fft=n_fft return_polar=return_polar window=window normalized=normalized onesided=onesided center=center eps=eps)<block_end><else_stmt><block_start>kernel,window=init_kernel(frame_len frame_hop window round_pow_of_two=round_pow_of_two normalized=normalized inverse=<true> mode=mode)<line_sep><return>_inverse_stft(transform kernel window return_polar=return_polar frame_hop=frame_hop onesided=onesided center=center eps=eps)<block_end><block_end><class_stmt>STFTBase(nn.Module)<block_start>""" Base layer for (i)STFT Args: frame_len: length of the frame frame_hop: hop size between frames window: window name center: center flag (similar with that in librosa.stft) round_pow_of_two: if true, choose round(#power_of_two) as the FFT size normalized: use normalized DFT kernel pre_emphasis: factor of preemphasis mode: STFT mode, "kaldi" or "librosa" or "torch" onesided: output onesided STFT inverse: using iDFT kernel (for iSTFT) """<def_stmt>__init__ self frame_len:int frame_hop:int window:str="sqrthann" round_pow_of_two:bool=<true> normalized:bool=<false> pre_emphasis:float=0 onesided:bool=<true> inverse:bool=<false> center:bool=<false> mode:str="librosa"<arrow><none><block_start>super(STFTBase self).__init__()<if_stmt>mode<ne>"torch"<block_start>K,w=init_kernel(frame_len frame_hop init_window(window frame_len) round_pow_of_two=round_pow_of_two normalized=normalized inverse=inverse mode=mode)<line_sep>self.K=nn.Parameter(K requires_grad=<false>)<line_sep>self.w=nn.Parameter(w requires_grad=<false>)<line_sep>self.num_bins=self.K.shape[0]<floordiv>4+1<line_sep>self.pre_emphasis=pre_emphasis<line_sep>self.win_length=self.K.shape[2]<block_end><else_stmt><block_start>self.K=<none><line_sep>w=init_window(window frame_len)<line_sep>self.w=nn.Parameter(w requires_grad=<false>)<line_sep>fft_size=2<power>math.ceil(math.log2(frame_len))<if>round_pow_of_two<else>frame_len<line_sep>self.num_bins=fft_size<floordiv>2+1<line_sep>self.pre_emphasis=0<line_sep>self.win_length=fft_size<block_end>self.frame_len=frame_len<line_sep>self.frame_hop=frame_hop<line_sep>self.window=window<line_sep>self.normalized=normalized<line_sep>self.onesided=onesided<line_sep>self.center=center<line_sep>self.mode=mode<block_end><def_stmt>num_frames self wav_len:th.Tensor<arrow>th.Tensor<block_start>""" Compute number of the frames """<assert_stmt>th.sum(wav_len<le>self.win_length)<eq>0<if_stmt>self.center<block_start>wav_len<augadd>self.win_length<block_end><return>th.div(wav_len-self.win_length self.frame_hop rounding_mode="trunc")+1<block_end><def_stmt>extra_repr self<arrow>str<block_start>str_repr=(f"num_bins={self.num_bins}, win_length={self.win_length}, "+f"stride={self.frame_hop}, window={self.window}, "+f"center={self.center}, mode={self.mode}")<if_stmt><not>self.onesided<block_start>str_repr<augadd>f", onesided={self.onesided}"<block_end><if_stmt>self.pre_emphasis<g>0<block_start>str_repr<augadd>f", pre_emphasis={self.pre_emphasis}"<block_end><if_stmt>self.normalized<block_start>str_repr<augadd>f", normalized={self.normalized}"<block_end><return>str_repr<block_end><block_end><class_stmt>STFT(STFTBase)<block_start>""" Short-time Fourier Transform as a Layer """<def_stmt>__init__ self *args **kwargs<block_start>super(STFT self).__init__(*args inverse=<false> **kwargs)<block_end><def_stmt>forward self wav:th.Tensor return_polar:bool=<false> eps:float=EPSILON<arrow>th.Tensor<block_start>""" Accept (single or multiple channel) raw waveform and output magnitude and phase Args wav (Tensor) input signal, N x (C) x S Return transform (Tensor), N x (C) x F x T x 2 """<if_stmt>self.mode<eq>"torch"<block_start><return>_pytorch_stft(wav self.frame_len self.frame_hop n_fft=(self.num_bins-1)<times>2 return_polar=return_polar window=self.w normalized=self.normalized onesided=self.onesided center=self.center eps=eps)<block_end><else_stmt><block_start><return>_forward_stft(wav self.K self.w return_polar=return_polar frame_hop=self.frame_hop pre_emphasis=self.pre_emphasis onesided=self.onesided center=self.center eps=eps)<block_end><block_end><block_end><class_stmt>iSTFT(STFTBase)<block_start>""" Inverse Short-time Fourier Transform as a Layer """<def_stmt>__init__ self *args **kwargs<block_start>super(iSTFT self).__init__(*args inverse=<true> **kwargs)<block_end><def_stmt>forward self transform:th.Tensor return_polar:bool=<false> eps:float=EPSILON<arrow>th.Tensor<block_start>""" Accept phase & magnitude and output raw waveform Args transform (Tensor): STFT output, N x F x T x 2 Return s (Tensor): N x S """<if_stmt>self.mode<eq>"torch"<block_start><return>_pytorch_istft(transform self.frame_len self.frame_hop n_fft=(self.num_bins-1)<times>2 return_polar=return_polar window=self.w normalized=self.normalized onesided=self.onesided center=self.center eps=eps)<block_end><else_stmt><block_start><return>_inverse_stft(transform self.K self.w return_polar=return_polar frame_hop=self.frame_hop onesided=self.onesided center=self.center eps=eps)<block_end><block_end><block_end>
"""Contains a Graph Attention Network v2 and associated layers."""<import_from_stmt>typing Any Callable Mapping Optional Union<import_stmt>tensorflow<as>tf<import_stmt>tensorflow_gnn<as>tfgnn<line_sep>@tf.keras.utils.register_keras_serializable(package="GNN>models>gat_v2")<class_stmt>GATv2Conv(tfgnn.keras.layers.AnyToAnyConvolutionBase)<block_start>"""The multi-head attention from Graph Attention Networks v2 (GATv2). GATv2 (https://arxiv.org/abs/2105.14491) improves upon the popular GAT architecture (https://arxiv.org/abs/1710.10903) by allowing the network to compute a more expressive "dynamic" instead of just "static" attention, each of whose heads is described by Equations (7), (3) and (4) in https://arxiv.org/abs/2105.14491. Example: GATv2-style attention on incoming edges whose result is concatenated with the old node state and passed through a Dense layer to compute the new node state. ``` dense = tf.keras.layers.Dense graph = tfgnn.keras.layers.GraphUpdate( node_sets={"paper": tfgnn.keras.layers.NodeSetUpdate( {"cites": tfgnn.keras.layers.GATv2Conv( message_dim, receiver_tag=tfgnn.TARGET)}, tfgnn.keras.layers.NextStateFromConcat(dense(node_state_dim)))} )(graph) ``` This layer implements the multi-head attention of GATv2 with the following generalizations: * This implementation of GATv2 attends only to edges that are explicitly stored in the input GraphTensor. Attention of a node to itself is enabled or disabled by storing or not storing an explicit loop in the edge set. The example above uses a separate layer to combine the old node state with the attention result to form the new node state. * Attention values can be computed from a sender node state that gets broadcast onto the edge (see arg `sender_node_feature`), from an edge feature (see arg `sender_edge_feature`), or from their concatenation (by setting both arguments). This choice is used in place of the sender node state $h_j$ in the defining equations cited above. * This layer can be used with `receiver_tag=tfgnn.CONTEXT` to perform a convolution to the context, with graph components as receivers and the containment in graph components used in lieu of edges. * An `edge_dropout` option is provided. This layer can also be configured to do attention pooling from edges to context or to receiver nodes (without regard for source nodes) by setting `sender_node_feature=None` and setting `sender_edge_feature=...` to the applicable edge feature name (e.g., `tfgnn.DEFAULT_FEATURE_NAME`). Like the Keras Dense layer, if the input features have rank greater than 2, this layer computes a point-wise attention along the last axis of the inputs. For example, if the input features have shape [num_nodes, 2, 4, 1], then it will perform an identical computation on each of the num_nodes * 2 * 4 input values. Init args: num_heads: The number of attention heads. per_head_channels: The number of channels for each attention head. This means that the final output size will be per_head_channels * num_heads. receiver_tag: one of `tfgnn.SOURCE`, `tfgnn.TARGET` or `tfgnn.CONTEXT`. The results of attention are aggregated for this graph piece. If set to `tfgnn.SOURCE` or `tfgnn.TARGET`, the layer can be called for an edge set and will aggregate results at the specified endpoint of the edges. If set to `tfgnn.CONTEXT`, the layer can be called for an edge set or node set. If left unset for init, the tag must be passed at call time. receiver_feature: Can be set to override `tfgnn.DEFAULT_FEATURE_NAME` for use as the receiver's input feature to attention. (The attention key is derived from this input.) sender_node_feature: Can be set to override `tfgnn.DEFAULT_FEATURE_NAME` for use as the input feature from sender nodes to attention. IMPORANT: Must be set to `None` for use with `receiver_tag=tfgnn.CONTEXT` on an edge set, or for pooling from edges without sender node states. sender_edge_feature: Can be set to a feature name of the edge set to select it as an input feature. By default, this set to `None`, which disables this input. IMPORTANT: Must be set for use with `receiver_tag=tfgnn.CONTEXT` on an edge set. use_bias: If true, a bias term is added to the transformations of query and value inputs. edge_dropout: Can be set to a dropout rate for edge dropout. (When pooling nodes to context, it's the node's membership in a graph component that is dropped out.) attention_activation: The nonlinearity used on the transformed inputs before multiplying with the trained weights of the attention layer. This can be specified as a Keras layer, a tf.keras.activations.* function, or a string understood by tf.keras.layers.Activation(). Defaults to "leaky_relu", which in turn defaults to a negative slope of alpha=0.2. activation: The nonlinearity applied to the final result of attention, specified in the same ways as attention_activation. kernel_initializer: Can be set to a `kerner_initializer` as understood by tf.keras.layers.Dense etc. """<def_stmt>__init__ self * num_heads:int per_head_channels:int receiver_tag:Optional[tfgnn.IncidentNodeOrContextTag]=<none> receiver_feature:tfgnn.FieldName=tfgnn.HIDDEN_STATE sender_node_feature:Optional[tfgnn.FieldName]=tfgnn.HIDDEN_STATE sender_edge_feature:Optional[tfgnn.FieldName]=<none> use_bias:bool=<true> edge_dropout:float=0. attention_activation:Union[str Callable[<ellipsis> Any]]="leaky_relu" activation:Union[str Callable[<ellipsis> Any]]="relu" kernel_initializer:Union[<none> str tf.keras.initializers.Initializer]=<none> **kwargs<block_start>kwargs.setdefault("name" "gat_v2_conv")<line_sep>super().__init__(receiver_tag=receiver_tag receiver_feature=receiver_feature sender_node_feature=sender_node_feature sender_edge_feature=sender_edge_feature extra_receiver_ops={"softmax":tfgnn.softmax} **kwargs)<if_stmt><not>self.takes_receiver_input<block_start><raise>ValueError("Receiver feature cannot be None")<block_end><if_stmt>num_heads<le>0<block_start><raise>ValueError(f"Number of heads {num_heads} must be greater than 0.")<block_end>self._num_heads=num_heads<if_stmt>per_head_channels<le>0<block_start><raise>ValueError(f"Per-head channels {per_head_channels} must be greater than 0.")<block_end>self._per_head_channels=per_head_channels<line_sep>self._use_bias=use_bias<if_stmt><not>0<le>edge_dropout<l>1<block_start><raise>ValueError(f"Edge dropout {edge_dropout} must be in [0, 1).")<block_end>self._edge_dropout=edge_dropout<line_sep>self._attention_activation=tf.keras.activations.get(attention_activation)<line_sep>self._activation=tf.keras.activations.get(activation)<line_sep>self._kernel_initializer=kernel_initializer<line_sep># Create the transformations for the query input in all heads. self._w_query=tf.keras.layers.Dense(per_head_channels<times>num_heads kernel_initializer=kernel_initializer # This bias gets added to the attention features but not the outputs. use_bias=use_bias name="query")<line_sep># Create the transformations for value input from sender nodes and edges. <if_stmt>self.takes_sender_node_input<block_start>self._w_sender_node=tf.keras.layers.Dense(per_head_channels<times>num_heads kernel_initializer=kernel_initializer # This bias gets added to the attention features and the outputs. use_bias=use_bias name="value_node")<block_end><else_stmt><block_start>self._w_sender_node=<none><block_end><if_stmt>self.takes_sender_edge_input<block_start>self._w_sender_edge=tf.keras.layers.Dense(per_head_channels<times>num_heads kernel_initializer=kernel_initializer # This bias would be redundant with self._w_sender_node. use_bias=use_bias<and>self._w_sender_node<is><none> name="value_edge")<block_end><else_stmt><block_start>self._w_sender_edge=<none><block_end><if_stmt>self._w_sender_node<is><none><and>self._w_sender_edge<is><none><block_start><raise>ValueError("GATv2Attention initialized with no inputs.")<block_end># Create attention logits layers, one for each head. Note that we can't # use a single Dense layer that outputs `num_heads` units because we need # to apply a different attention function a_k to its corresponding # W_k-transformed features. self._attention_logits_fn=tf.keras.layers.experimental.EinsumDense("...ik,ki->...i" output_shape=(<none> num_heads 1) # TODO(b/205825425): (num_heads,) kernel_initializer=kernel_initializer name="attn_logits")<block_end><def_stmt>get_config self<block_start><return>dict(num_heads=self._num_heads per_head_channels=self._per_head_channels use_bias=self._use_bias edge_dropout=self._edge_dropout attention_activation=self._attention_activation activation=self._activation kernel_initializer=self._kernel_initializer **super().get_config())<block_end><def_stmt>convolve self * sender_node_input:Optional[tf.Tensor] sender_edge_input:Optional[tf.Tensor] receiver_input:Optional[tf.Tensor] broadcast_from_sender_node:Callable[[tf.Tensor] tf.Tensor] broadcast_from_receiver:Callable[[tf.Tensor] tf.Tensor] pool_to_receiver:Callable[<ellipsis> tf.Tensor] extra_receiver_ops:Optional[Mapping[str Callable[<ellipsis> Any]]]=<none> training:bool<arrow>tf.Tensor# Form the attention query for each head. # [num_items, *extra_dims, num_heads, channels_per_head] <block_start><assert_stmt>receiver_input<is><not><none> "__init__() should have checked this."<line_sep>query=broadcast_from_receiver(self._split_heads(self._w_query(receiver_input)))<line_sep># Form the attention value by transforming the configured inputs # and adding up the transformed values. # [num_items, *extra_dims, num_heads, channels_per_head] value_terms=[]<if_stmt>sender_node_input<is><not><none><block_start>value_terms.append(broadcast_from_sender_node(self._split_heads(self._w_sender_node(sender_node_input))))<block_end><if_stmt>sender_edge_input<is><not><none><block_start>value_terms.append(self._split_heads(self._w_sender_edge(sender_edge_input)))<block_end><assert_stmt>value_terms "Internal error: no values, __init__ should catch this."<line_sep>value=tf.add_n(value_terms)<line_sep># Compute the features from which attention logits are computed. # [num_items, *extra_dims, num_heads, channels_per_head] attention_features=self._attention_activation(query+value)<line_sep># Compute the attention logits and softmax to get the coefficients. # [num_items, *extra_dims, num_heads, 1] logits=tf.expand_dims(self._attention_logits_fn(attention_features) -1)<line_sep>attention_coefficients=extra_receiver_ops["softmax"](logits)<if_stmt>training# Apply dropout to the normalized attention coefficients, as is done in # the original GAT paper. This should have the same effect as edge # dropout. Also, note that tf.nn.dropout upscales the remaining values, # which should maintain the sum-up-to-1 per node in expectation. <block_start>attention_coefficients=tf.nn.dropout(attention_coefficients self._edge_dropout)<block_end># Apply the attention coefficients to the transformed query. # [num_items, *extra_dims, num_heads, per_head_channels] messages=value<times>attention_coefficients<line_sep># Take the sum of the weighted values, which equals the weighted average. # Receivers without incoming senders get the empty sum 0. # [num_receivers, *extra_dims, num_heads, per_head_channels] pooled_messages=pool_to_receiver(messages reduce_type="sum")<line_sep># Apply the nonlinearity. pooled_messages=self._activation(pooled_messages)<line_sep>pooled_messages=self._merge_heads(pooled_messages)<line_sep><return>pooled_messages<block_end># The following helpers map forth and back between tensors with... # - a separate heads dimension: shape [..., num_heads, channels_per_head], # - all heads concatenated: shape [..., num_heads * channels_per_head]. <def_stmt>_split_heads self tensor<block_start>extra_dims=tensor.shape[1:-1]# Possibly empty. <if_stmt><not>extra_dims.is_fully_defined()<block_start><raise>ValueError("GATv2Attention requires non-ragged Tensors as inputs, "<concat>"and GraphTensor requires these to have statically known "<concat>f"dimensions except the first, but got {tensor.shape}")<block_end>new_shape=(-1 *extra_dims self._num_heads self._per_head_channels)<line_sep><return>tf.reshape(tensor new_shape)<block_end><def_stmt>_merge_heads self tensor<block_start>num_merged=2<line_sep>extra_dims=tensor.shape[1:-num_merged]# Possibly empty. merged_dims=tensor.shape[-num_merged:]<if_stmt><not>extra_dims.is_fully_defined()<or><not>merged_dims.is_fully_defined()<block_start><raise>ValueError(f"Unexpected unknown dimensions in shape {tensor.shape}")<block_end>new_shape=(-1 *extra_dims merged_dims.num_elements())<line_sep><return>tf.reshape(tensor new_shape)<block_end><block_end><def_stmt>GATv2EdgePool * # To be called like a class initializer. pylint: disable=invalid-name num_heads:int per_head_channels:int receiver_tag:Optional[tfgnn.IncidentNodeOrContextTag]=<none> receiver_feature:tfgnn.FieldName=tfgnn.HIDDEN_STATE sender_feature:tfgnn.FieldName=tfgnn.HIDDEN_STATE **kwargs<block_start>"""Returns a layer for pooling edges with GATv2-style attention. When initialized with receiver_tag SOURCE or TARGET, the returned layer can be called on an edge set to compute the weighted sum of edge states at the given endpoint. The weights are computed by the method of Graph Attention Networks v2 (GATv2), except that edge states, not node states broadcast from the edges' other endpoint, are used as input values to attention. When initialized with receiver_tag CONTEXT, the returned layer can be called on an edge set to do the analogous pooling of edge states to context. NOTE: This layer cannot pool node states. For that, use GATv2Conv. Args: num_heads: The number of attention heads. per_head_channels: The number of channels for each attention head. This means that the final output size will be per_head_channels * num_heads. receiver_tag: The results of attention are aggregated for this graph piece. If set to `tfgnn.CONTEXT`, the layer can be called for an edge set or node set. If set to an IncidentNodeTag (e.g., `tfgnn.SOURCE` or `tfgnn.TARGET`), the layer can be called for an edge set and will aggregate results at the specified endpoint of the edges. If left unset, the tag must be passed when calling the layer. receiver_feature: By default, the default state feature of the receiver is used to compute the attention query. A different feature name can be selected by setting this argument. sender_feature: By default, the default state feature of the edge set is used to compute the attention values. A different feature name can be selected by setting this argument. **kwargs: Any other option for GATv2Conv, except sender_node_feature, which is set to None. """<if_stmt>kwargs.pop("sender_node_feature" <none>)<is><not><none><block_start><raise>TypeError("GATv2EdgePool() got an unexpected keyword argument "<concat>"'sender_node_feature'. Did you mean GATv2Conv()?")<block_end>kwargs.setdefault("name" "gat_v2_edge_pool")<line_sep><return>GATv2Conv(num_heads=num_heads per_head_channels=per_head_channels receiver_tag=receiver_tag receiver_feature=receiver_feature sender_edge_feature=sender_feature sender_node_feature=<none> **kwargs)<block_end><def_stmt>GATv2GraphUpdate * # To be called like a class initializer. pylint: disable=invalid-name num_heads:int per_head_channels:int edge_set_name:str feature_name:str=tfgnn.HIDDEN_STATE name:str="gat_v2" **kwargs<block_start>"""Returns a GraphUpdater layer with a Graph Attention Network V2 (GATv2). The returned layer performs one update step of a Graph Attention Network v2 (GATv2) from https://arxiv.org/abs/2105.14491 on an edge set of a GraphTensor. It is best suited for graphs that have just that one edge set. For heterogeneous graphs with multiple node sets and edge sets, users are advised to consider a GraphUpdate with one or more GATv2Conv objects instead. This implementation of GAT attends only to edges that are explicitly stored in the input GraphTensor. Attention of a node to itself requires having an explicit loop in the edge set. Args: num_heads: The number of attention heads. per_head_channels: The number of channels for each attention head. This means that the final output size will be per_head_channels * num_heads. edge_set_name: A GATv2 update happens on this edge set and its incident node set(s) of the input GraphTensor. feature_name: The feature name of node states; defaults to tfgnn.HIDDEN_STATE. name: Optionally, a name for the layer returned. **kwargs: Any optional arguments to GATv2Conv, see there. """<line_sep># Compat logic, remove in late 2021. <if_stmt>"output_feature_name"<in>kwargs<block_start><raise>TypeError("Argument 'output_feature_name' is no longer supported.")<block_end># Build a GraphUpdate for the target node set of the given edge_set_name. # That needs to be deferred until we see a GraphTensorSpec that tells us # the node_set_name. <def_stmt>deferred_init_callback spec:tfgnn.GraphTensorSpec<block_start>node_set_name=spec.edge_sets_spec[edge_set_name].adjacency_spec.node_set_name(tfgnn.TARGET)<line_sep>node_set_updates={node_set_name:tfgnn.keras.layers.NodeSetUpdate({edge_set_name:GATv2Conv(num_heads=num_heads per_head_channels=per_head_channels receiver_tag=tfgnn.TARGET sender_node_feature=feature_name receiver_feature=feature_name **kwargs)} next_state=NextStateForNodeSetFromSingleEdgeSetInput() node_input_feature=feature_name)}<line_sep><return>dict(node_sets=node_set_updates)<block_end><return>tfgnn.keras.layers.GraphUpdate(deferred_init_callback=deferred_init_callback name=name)<block_end># For use by GATv2GraphUpdate(). @tf.keras.utils.register_keras_serializable(package="GNN>models>gat_v2")<class_stmt>NextStateForNodeSetFromSingleEdgeSetInput(tf.keras.layers.Layer)<block_start><def_stmt>call self inputs<block_start>unused_node_input,edge_inputs,unused_context_input=inputs<line_sep>single_edge_set_input,=edge_inputs.values()# Unpack. <return>single_edge_set_input<block_end><block_end>
<import_stmt>torch<import_from_stmt>core.network.rainbow Rainbow<def_stmt>test_rainbow_call <block_start>D_in,D_out,D_hidden=2 3 4<line_sep>N_atom=5<line_sep>noise_type="factorized"<line_sep>net=Rainbow(D_in=D_in D_out=D_out N_atom=N_atom noise_type=noise_type D_hidden=D_hidden)<line_sep>batch_size=6<line_sep>mock_input=torch.rand((batch_size D_in))<line_sep>out=net(mock_input is_train=<true>)<assert_stmt>out.shape<eq>(batch_size D_out N_atom)<block_end>
<import_from_stmt>contextlib contextmanager<import_from_stmt>m2cgen.interpreters.code_generator CodeTemplate ImperativeCodeGenerator<class_stmt>RubyCodeGenerator(ImperativeCodeGenerator)<block_start>tpl_var_declaration=CodeTemplate("")<line_sep>tpl_num_value=CodeTemplate("{value}")<line_sep>tpl_infix_expression=CodeTemplate("({left}) {op} ({right})")<line_sep>tpl_return_statement=tpl_num_value<line_sep>tpl_array_index_access=CodeTemplate("{array_name}[{index}]")<line_sep>tpl_if_statement=CodeTemplate("if {if_def}")<line_sep>tpl_else_statement=CodeTemplate("else")<line_sep>tpl_block_termination=CodeTemplate("end")<line_sep>tpl_var_assignment=CodeTemplate("{var_name} = {value}")<def_stmt>add_function_def self name args<block_start>func_def=f"def {name}({', '.join(args)})"<line_sep>self.add_code_line(func_def)<line_sep>self.increase_indent()<block_end>@contextmanager<def_stmt>function_definition self name args<block_start>self.add_function_def(name args)<line_sep><yield><line_sep>self.add_block_termination()<block_end><def_stmt>method_invocation self method_name obj args<block_start><return>f"({obj}).{method_name}({', '.join(map(str args))})"<block_end><def_stmt>vector_init self values<block_start><return>f"[{', '.join(values)}]"<block_end><block_end>
<import_stmt>glob<import_stmt>json<import_stmt>lldb<import_from_stmt>lldbsuite.test.decorators *<import_from_stmt>lldbsuite.test.lldbtest *<import_from_stmt>lldbsuite.test lldbutil<import_stmt>os<import_stmt>time<class_stmt>DebugIndexCacheTestcase(TestBase)<block_start>mydir=TestBase.compute_mydir(__file__)<def_stmt>setUp self# Call super's setUp(). <block_start>TestBase.setUp(self)<line_sep># Set the lldb module cache directory to a directory inside the build # artifacts directory so no other tests are interfered with. self.cache_dir=os.path.join(self.getBuildDir() 'lldb-module-cache')<block_end><def_stmt>get_module_cache_files self basename<block_start>module_cache_glob=os.path.join(self.cache_dir "llvmcache-*%s*dwarf-index*"%(basename))<line_sep><return>glob.glob(module_cache_glob)<block_end><def_stmt>get_stats self log_path=<none><block_start>""" Get the output of the "statistics dump" and return the JSON as a python dictionary. """<line_sep># If log_path is set, open the path and emit the output of the command # for debugging purposes. <if_stmt>log_path<is><not><none><block_start>f=open(log_path 'w')<block_end><else_stmt><block_start>f=<none><block_end>return_obj=lldb.SBCommandReturnObject()<line_sep>command="statistics dump "<if_stmt>f<block_start>f.write('(lldb) %s\n'%(command))<block_end>self.ci.HandleCommand(command return_obj <false>)<line_sep>metrics_json=return_obj.GetOutput()<if_stmt>f<block_start>f.write(metrics_json)<block_end><return>json.loads(metrics_json)<block_end><def_stmt>enable_lldb_index_cache self<block_start>self.runCmd('settings set symbols.lldb-index-cache-path "%s"'%(self.cache_dir))<line_sep>self.runCmd('settings set symbols.enable-lldb-index-cache true')<block_end>@no_debug_info_test<def_stmt>test_with_caching_enabled self<block_start>""" Test module cache functionality for debug info index caching. We test that a debug info index file is created for the debug information when caching is enabled with a file that contains at least one of each kind of DIE in ManualDWARFIndex::IndexSet. The input file has DWARF that will fill in every member of the ManualDWARFIndex::IndexSet class to ensure we can encode all of the required information. With caching enabled, we also verify that the appropriate statistics specify that the cache file was saved to the cache. """<line_sep>self.enable_lldb_index_cache()<line_sep>src_dir=self.getSourceDir()<line_sep>yaml_path=os.path.join(src_dir "exe.yaml")<line_sep>yaml_base,ext=os.path.splitext(yaml_path)<line_sep>obj_path=self.getBuildArtifact("main.o")<line_sep>self.yaml2obj(yaml_path obj_path)<line_sep># Create a target with the object file we just created from YAML target=self.dbg.CreateTarget(obj_path)<line_sep>self.assertTrue(target VALID_TARGET)<line_sep>debug_index_cache_files=self.get_module_cache_files('main.o')<line_sep>self.assertEqual(len(debug_index_cache_files) 1 "make sure there is one file in the module cache directory (%s) for main.o that is a debug info cache"%(self.cache_dir))<line_sep># Verify that the module statistics have the information that specifies # if we loaded or saved the debug index and symtab to the cache stats=self.get_stats()<line_sep>module_stats=stats['modules'][0]<line_sep>self.assertFalse(module_stats['debugInfoIndexLoadedFromCache'])<line_sep>self.assertTrue(module_stats['debugInfoIndexSavedToCache'])<line_sep>self.assertFalse(module_stats['symbolTableLoadedFromCache'])<line_sep>self.assertTrue(module_stats['symbolTableSavedToCache'])<line_sep># Verify the top level stats track how many things were loaded or saved # to the cache. self.assertEqual(stats["totalDebugInfoIndexLoadedFromCache"] 0)<line_sep>self.assertEqual(stats["totalDebugInfoIndexSavedToCache"] 1)<line_sep>self.assertEqual(stats["totalSymbolTablesLoadedFromCache"] 0)<line_sep>self.assertEqual(stats["totalSymbolTablesSavedToCache"] 1)<block_end>@no_debug_info_test<def_stmt>test_with_caching_disabled self<block_start>""" Test module cache functionality for debug info index caching. We test that a debug info index file is not created for the debug information when caching is disabled with a file that contains at least one of each kind of DIE in ManualDWARFIndex::IndexSet. The input file has DWARF that will fill in every member of the ManualDWARFIndex::IndexSet class to ensure we can encode all of the required information. With caching disabled, we also verify that the appropriate statistics specify that the cache file was not saved to the cache. """<line_sep>src_dir=self.getSourceDir()<line_sep>yaml_path=os.path.join(src_dir "exe.yaml")<line_sep>yaml_base,ext=os.path.splitext(yaml_path)<line_sep>obj_path=self.getBuildArtifact("main.o")<line_sep>self.yaml2obj(yaml_path obj_path)<line_sep># Create a target with the object file we just created from YAML target=self.dbg.CreateTarget(obj_path)<line_sep>self.assertTrue(target VALID_TARGET)<line_sep>debug_index_cache_files=self.get_module_cache_files('main.o')<line_sep>self.assertEqual(len(debug_index_cache_files) 0 "make sure there is no file in the module cache directory (%s) for main.o that is a debug info cache"%(self.cache_dir))<line_sep># Verify that the module statistics have the information that specifies # if we loaded or saved the debug index and symtab to the cache stats=self.get_stats()<line_sep>module_stats=stats['modules'][0]<line_sep>self.assertFalse(module_stats['debugInfoIndexLoadedFromCache'])<line_sep>self.assertFalse(module_stats['debugInfoIndexSavedToCache'])<line_sep>self.assertFalse(module_stats['symbolTableLoadedFromCache'])<line_sep>self.assertFalse(module_stats['symbolTableSavedToCache'])<line_sep># Verify the top level stats track how many things were loaded or saved # to the cache. self.assertEqual(stats["totalDebugInfoIndexLoadedFromCache"] 0)<line_sep>self.assertEqual(stats["totalDebugInfoIndexSavedToCache"] 0)<line_sep>self.assertEqual(stats["totalSymbolTablesLoadedFromCache"] 0)<line_sep>self.assertEqual(stats["totalSymbolTablesSavedToCache"] 0)<block_end><block_end>
<import_from_stmt>argparse ArgumentParser<import_stmt>os<import_stmt>random<import_from_stmt>matplotlib pyplot<as>plt<import_stmt>torch<import_from_stmt>torch optim<import_from_stmt>torch nn<import_from_stmt>torch.nn functional<as>F<import_from_stmt>torch.autograd Variable<import_from_stmt>torch.utils.data DataLoader<import_from_stmt>torchvision models<import_from_stmt>torchvision.utils save_image<import_from_stmt>data CityscapesDataset num_classes full_to_colour train_to_full<import_from_stmt>model FeatureResNet SegResNet<line_sep># Setup parser=ArgumentParser(description='Semantic segmentation')<line_sep>parser.add_argument('--seed' type=int default=42 help='Random seed')<line_sep>parser.add_argument('--workers' type=int default=8 help='Data loader workers')<line_sep>parser.add_argument('--epochs' type=int default=100 help='Training epochs')<line_sep>parser.add_argument('--crop-size' type=int default=512 help='Training crop size')<line_sep>parser.add_argument('--lr' type=float default=5e-5 help='Learning rate')<line_sep>parser.add_argument('--momentum' type=float default=0 help='Momentum')<line_sep>parser.add_argument('--weight-decay' type=float default=2e-4 help='Weight decay')<line_sep>parser.add_argument('--batch-size' type=int default=16 help='Batch size')<line_sep>args=parser.parse_args()<line_sep>random.seed(args.seed)<line_sep>torch.manual_seed(args.seed)<if_stmt><not>os.path.exists('results')<block_start>os.makedirs('results')<block_end>plt.switch_backend('agg')# Allow plotting when running remotely # Data train_dataset=CityscapesDataset(split='train' crop=args.crop_size flip=<true>)<line_sep>val_dataset=CityscapesDataset(split='val')<line_sep>train_loader=DataLoader(train_dataset batch_size=args.batch_size shuffle=<true> num_workers=args.workers pin_memory=<true>)<line_sep>val_loader=DataLoader(val_dataset batch_size=1 num_workers=args.workers pin_memory=<true>)<line_sep># Training/Testing pretrained_net=FeatureResNet()<line_sep>pretrained_net.load_state_dict(models.resnet34(pretrained=<true>).state_dict())<line_sep>net=SegResNet(num_classes pretrained_net).cuda()<line_sep>crit=nn.BCELoss().cuda()<line_sep># Construct optimiser params_dict=dict(net.named_parameters())<line_sep>params=[]<for_stmt>key,value params_dict.items()<block_start><if_stmt>'bn'<in>key# No weight decay on batch norm <block_start>params<augadd>[{'params':[value] 'weight_decay':0}]<block_end><elif_stmt>'.bias'<in>key# No weight decay plus double learning rate on biases <block_start>params<augadd>[{'params':[value] 'lr':2<times>args.lr 'weight_decay':0}]<block_end><else_stmt><block_start>params<augadd>[{'params':[value]}]<block_end><block_end>optimiser=optim.RMSprop(params lr=args.lr momentum=args.momentum weight_decay=args.weight_decay)<line_sep>scores,mean_scores=[] []<def_stmt>train e<block_start>net.train()<for_stmt>i,(input target _) enumerate(train_loader)<block_start>optimiser.zero_grad()<line_sep>input,target=Variable(input.cuda(async=<true>)) Variable(target.cuda(async=<true>))<line_sep>output=F.sigmoid(net(input))<line_sep>loss=crit(output target)<line_sep>print(e i loss.data[0])<line_sep>loss.backward()<line_sep>optimiser.step()<block_end><block_end># Calculates class intersections over unions <def_stmt>iou pred target<block_start>ious=[]<line_sep># Ignore IoU for background class <for_stmt>cls range(num_classes-1)<block_start>pred_inds=pred<eq>cls<line_sep>target_inds=target<eq>cls<line_sep>intersection=(pred_inds[target_inds]).long().sum().data.cpu()[0]# Cast to long to prevent overflows union=pred_inds.long().sum().data.cpu()[0]+target_inds.long().sum().data.cpu()[0]-intersection<if_stmt>union<eq>0<block_start>ious.append(float('nan'))# If there is no ground truth, do not include in evaluation <block_end><else_stmt><block_start>ious.append(intersection/max(union 1))<block_end><block_end><return>ious<block_end><def_stmt>test e<block_start>net.eval()<line_sep>total_ious=[]<for_stmt>i,(input _ target) enumerate(val_loader)<block_start>input,target=Variable(input.cuda(async=<true>) volatile=<true>) Variable(target.cuda(async=<true>) volatile=<true>)<line_sep>output=F.log_softmax(net(input))<line_sep>b,_,h,w=output.size()<line_sep>pred=output.permute(0 2 3 1).contiguous().view(-1 num_classes).max(1)[1].view(b h w)<line_sep>total_ious.append(iou(pred target))<line_sep># Save images <if_stmt>i%25<eq>0<block_start>pred=pred.data.cpu()<line_sep>pred_remapped=pred.clone()<line_sep># Convert to full labels <for_stmt>k,v train_to_full.items()<block_start>pred_remapped[pred<eq>k]=v<block_end># Convert to colour image pred=pred_remapped<line_sep>pred_colour=torch.zeros(b 3 h w)<for_stmt>k,v full_to_colour.items()<block_start>pred_r=torch.zeros(b 1 h w)<line_sep>pred_r[(pred<eq>k)]=v[0]<line_sep>pred_g=torch.zeros(b 1 h w)<line_sep>pred_g[(pred<eq>k)]=v[1]<line_sep>pred_b=torch.zeros(b 1 h w)<line_sep>pred_b[(pred<eq>k)]=v[2]<line_sep>pred_colour.add_(torch.cat((pred_r pred_g pred_b) 1))<block_end>save_image(pred_colour[0].float().div(255) os.path.join('results' str(e)+'_'+str(i)+'.png'))<block_end><block_end># Calculate average IoU total_ious=torch.Tensor(total_ious).transpose(0 1)<line_sep>ious=torch.Tensor(num_classes-1)<for_stmt>i,class_iou enumerate(total_ious)<block_start>ious[i]=class_iou[class_iou<eq>class_iou].mean()# Calculate mean, ignoring NaNs <block_end>print(ious ious.mean())<line_sep>scores.append(ious)<line_sep># Save weights and scores torch.save(net.state_dict() os.path.join('results' str(e)+'_net.pth'))<line_sep>torch.save(scores os.path.join('results' 'scores.pth'))<line_sep># Plot scores mean_scores.append(ious.mean())<line_sep>es=list(range(len(mean_scores)))<line_sep>plt.plot(es mean_scores 'b-')<line_sep>plt.xlabel('Epoch')<line_sep>plt.ylabel('Mean IoU')<line_sep>plt.savefig(os.path.join('results' 'ious.png'))<line_sep>plt.close()<block_end>test(0)<for_stmt>e range(1 args.epochs+1)<block_start>train(e)<line_sep>test(e)<block_end>
<import_stmt>pytest<import_from_stmt>pathlib Path<import_from_stmt>dbt.tests.util run_dbt<line_sep># from `test/integration/009_data_test` # # Models # models__table_copy=""" {{ config( materialized='table' ) }} select * from {{ this.schema }}.seed """<line_sep># # Tests # tests__fail_email_is_always_null=""" select * from {{ ref('table_copy') }} where email is not null """<line_sep>tests__fail_no_ref=""" select 1 """<line_sep>tests__dotted_path_pass_id_not_null=""" {# Same as `pass_id_not_null` but with dots in its name #} select * from {{ ref('table_copy') }} where id is null """<line_sep>tests__pass_id_not_null=""" select * from {{ ref('table_copy') }} where id is null """<line_sep>tests__pass_no_ref=""" select 1 limit 0 """<class_stmt>CustomSingularTestsBase(object)<block_start>@pytest.fixture(scope="class" autouse=<true>)<def_stmt>setUp self project<block_start>"""Create seed and downstream model tests are to be run on"""<line_sep>project.run_sql_file(project.test_data_dir/Path("seed_expected.sql"))<line_sep>results=run_dbt()<assert_stmt>len(results)<eq>1<block_end>@pytest.fixture(scope="class")<def_stmt>models self<block_start><return>{"table_copy.sql":models__table_copy}<block_end><block_end><class_stmt>TestPassingTests(CustomSingularTestsBase)<block_start>@pytest.fixture(scope="class")<def_stmt>tests self<block_start><return>{"my_db.my_schema.table_copy.pass_id_not_null.sql":tests__dotted_path_pass_id_not_null "tests__pass_id_not_null.sql":tests__pass_id_not_null "tests__pass_no_ref.sql":tests__pass_no_ref }<block_end><def_stmt>test_data_tests self project tests<block_start>test_results=run_dbt(["test"])<assert_stmt>len(test_results)<eq>len(tests)<for_stmt>result test_results<block_start><assert_stmt>result.status<eq>"pass"<assert_stmt><not>result.skipped<assert_stmt>result.failures<eq>0<block_end><block_end><block_end><class_stmt>TestFailingTests(CustomSingularTestsBase)<block_start>@pytest.fixture(scope="class")<def_stmt>tests self<block_start><return>{"tests__fail_email_is_always_null.sql":tests__fail_email_is_always_null "tests__fail_no_ref.sql":tests__fail_no_ref }<block_end><def_stmt>test_data_tests self project tests<block_start>"""assert that all deliberately failing tests actually fail"""<line_sep>test_results=run_dbt(["test"] expect_pass=<false>)<assert_stmt>len(test_results)<eq>len(tests)<for_stmt>result test_results<block_start><assert_stmt>result.status<eq>"fail"<assert_stmt><not>result.skipped<assert_stmt>result.failures<g>0<block_end><block_end><block_end>
<import_from_stmt>linear_attention_transformer.linear_attention_transformer LinearAttentionTransformer LinearAttentionTransformerLM LinformerSettings LinformerContextSettings<import_from_stmt>linear_attention_transformer.autoregressive_wrapper AutoregressiveWrapper<import_from_stmt>linear_attention_transformer.images ImageLinearAttention<line_sep>
# (C) Datadog, Inc. 2018-present # All rights reserved # Licensed under a 3-clause BSD style license (see LICENSE) <import_stmt>logging<import_from_stmt>collections defaultdict<import_stmt>pytest<import_from_stmt>datadog_checks.dev.testing requires_windows<try_stmt><block_start><import_from_stmt>datadog_test_libs.win.pdh_mocks # noqa: F401 initialize_pdh_tests pdh_mocks_fixture pdh_mocks_fixture_bad_perf_strings <import_from_stmt>datadog_checks.checks.win.winpdh SINGLE_INSTANCE_KEY WinPDHCounter<block_end><except_stmt>ImportError<block_start><import_stmt>platform<if_stmt>platform.system()<ne>'Windows'<block_start><pass><block_end><block_end>logger=logging.getLogger(__file__)<line_sep>''' WinPDHCounter tests. Test specific behavior of the WinPDHCounter class, which provides the interface to the OS API. '''<line_sep>@requires_windows<def_stmt>test_winpdhcounter_bad_strings_english pdh_mocks_fixture_bad_perf_strings# noqa F811 <block_start>initialize_pdh_tests()<line_sep>counter=WinPDHCounter('System' 'Processor Queue Length' logger)<line_sep>vals=counter.get_all_values()<assert_stmt>len(vals)<eq>1# single instance key, should only have one value <assert_stmt>SINGLE_INSTANCE_KEY<in>vals<block_end>@requires_windows<def_stmt>test_winpdhcounter_throws_on_bad_input pdh_mocks_fixture# noqa F811 <block_start>initialize_pdh_tests()<with_stmt>pytest.raises(AttributeError)<block_start>WinPDHCounter('Ssystem' 'Processor Queue Length' logger)<block_end><with_stmt>pytest.raises(AttributeError)<block_start>WinPDHCounter('System' 'PProcessor Queue Length' logger)<block_end><block_end>@requires_windows<def_stmt>test_winpdhcounter_throws_on_bad_input_with_bad_strings pdh_mocks_fixture_bad_perf_strings# noqa F811 <block_start>initialize_pdh_tests()<with_stmt>pytest.raises(AttributeError)<block_start>WinPDHCounter('Ssystem' 'Processor Queue Length' logger)<block_end><with_stmt>pytest.raises(AttributeError)<block_start>WinPDHCounter('System' 'PProcessor Queue Length' logger)<block_end><block_end>@requires_windows<def_stmt>test_winpdhcounter_bad_strings_not_english pdh_mocks_fixture_bad_perf_strings# noqa F811 <block_start>WinPDHCounter._use_en_counter_names=<false><line_sep>WinPDHCounter.pdh_counter_dict=defaultdict(list)<line_sep>initialize_pdh_tests(lang="se-sv")<line_sep>''' expectation is that the initialization will fail. We attempt to fall back to english counters if the strings database isn't present; however, on non-english windows the english counters won't work '''<with_stmt>pytest.raises(AttributeError)<block_start>WinPDHCounter('System' 'Processor Queue Length' logger)<block_end><block_end>@requires_windows<def_stmt>test_winpdhcounter_non_english pdh_mocks_fixture# noqa F811 <block_start>WinPDHCounter._use_en_counter_names=<false><line_sep>WinPDHCounter.pdh_counter_dict=defaultdict(list)<line_sep>initialize_pdh_tests(lang="se-sv")<line_sep>counter=WinPDHCounter('System' 'Processor Queue Length' logger)<line_sep>vals=counter.get_all_values()<assert_stmt>len(vals)<eq>1# single instance key, should only have one value <assert_stmt>SINGLE_INSTANCE_KEY<in>vals<block_end>
# Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Utilities for loading MNIST into TensorFlow."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<line_sep># Dependency imports <import_stmt>numpy<as>np<import_stmt>tensorflow.compat.v1<as>tf<line_sep>__all__=['load_mnist_as_tensors' 'load_mnist_as_dataset' 'load_mnist_as_iterator' ]<def_stmt>load_mnist_as_tensors flatten_images=<true> dtype=tf.float32<block_start>"""Loads MNIST as Tensors. Args: flatten_images: bool. If True, [28, 28, 1]-shaped images are flattened into [784]-shaped vectors. dtype: The TF dtype to return the images as. Returns: images, labels, num_examples """<line_sep># mnist_data = tf.contrib.learn.datasets.mnist.read_data_sets( # '/tmp/mnist', reshape=flatten_images) # num_examples = len(mnist_data.train.labels) # images = mnist_data.train.images # labels = mnist_data.train.labels # # images = tf.constant(np.asarray(images, dtype=np.float32)) # labels = tf.constant(np.asarray(labels, dtype=np.int64)) # # return images, labels, num_examples (images labels),_=tf.keras.datasets.mnist.load_data()<line_sep>num_examples=images.shape[0]<if_stmt>flatten_images<block_start>images=images.reshape(images.shape[0] 28<power>2)<block_end><else_stmt><block_start>images=images.reshape(images.shape[0] 28 28 1)<block_end>images=images.astype('float64')<line_sep>labels=labels.astype('int32')<line_sep>images<augdiv>255.<line_sep>images=tf.constant(images dtype=dtype)<line_sep>labels=tf.constant(labels)<line_sep><return>images labels num_examples<block_end><def_stmt>load_mnist_as_dataset flatten_images=<true><block_start>"""Loads MNIST as a Dataset object. Args: flatten_images: bool. If True, [28, 28, 1]-shaped images are flattened into [784]-shaped vectors. Returns: dataset, num_examples, where dataset is a Dataset object containing the whole MNIST training dataset and num_examples is the number of examples in the MNIST dataset (should be 60000). """<line_sep>images,labels,num_examples=load_mnist_as_tensors(flatten_images=flatten_images)<line_sep>dataset=tf.data.Dataset.from_tensor_slices((images labels))<line_sep><return>dataset num_examples<block_end><def_stmt>load_mnist_as_iterator num_epochs batch_size use_fake_data=<false> flatten_images=<true><block_start>"""Loads MNIST dataset as an iterator Tensor. Args: num_epochs: int. Number of passes to make over the dataset. batch_size: int. Number of examples per minibatch. use_fake_data: bool. If True, generate a synthetic dataset rather than reading MNIST in. flatten_images: bool. If True, [28, 28, 1]-shaped images are flattened into [784]-shaped vectors. Returns: examples: Tensor of shape [batch_size, 784] if 'flatten_images' is True, else [batch_size, 28, 28, 1]. Each row is one example. Values in [0, 1]. labels: Tensor of shape [batch_size]. Indices of integer corresponding to each example. Values in {0...9}. """<if_stmt>use_fake_data<block_start>rng=np.random.RandomState(42)<line_sep>num_examples=batch_size<times>4<line_sep>images=rng.rand(num_examples 28<times>28)<if_stmt><not>flatten_images<block_start>images=np.reshape(images [num_examples 28 28 1])<block_end>labels=rng.randint(10 size=num_examples)<line_sep>dataset=tf.data.Dataset.from_tensor_slices((np.asarray(images dtype=np.float32) np.asarray(labels dtype=np.int64)))<block_end><else_stmt><block_start>dataset,num_examples=load_mnist_as_dataset(flatten_images=flatten_images)<block_end>dataset=(dataset.shuffle(num_examples).repeat(num_epochs).batch(batch_size).prefetch(5))<line_sep><return>tf.compat.v1.data.make_one_shot_iterator(dataset).get_next()<block_end>
# encoding=utf8 <import_from_stmt>collections OrderedDict<import_stmt>json<import_stmt>nltk<import_from_stmt>datasets load_metric<def_stmt>load_entities kvret_entity_file_path<block_start>""" @param kvret_entity_file_path: the path of kvret_entities.json @return: """<line_sep>under_scored_entity_dict=OrderedDict()<with_stmt>open(kvret_entity_file_path)<as>f<block_start>entity=json.load(f)<for_stmt>sub_class_name,sub_class_entity_list entity.items()<block_start><if_stmt>sub_class_name<eq>'poi'<block_start><for_stmt>entity_item sub_class_entity_list<block_start>under_scored_entity_dict[str(entity_item['address'])]=(str(entity_item['address']).replace(" " "_"))<line_sep>under_scored_entity_dict[str(entity_item['poi'])]=(str(entity_item['poi']).replace(" " "_"))<line_sep>under_scored_entity_dict[str(entity_item['type'])]=(str(entity_item['type']).replace(" " "_"))<block_end><block_end><elif_stmt>sub_class_name<eq>"distance"<block_start><for_stmt>entity_item sub_class_entity_list<block_start>under_scored_entity_dict[str(entity_item)+" miles"]=str(entity_item)+" miles"<block_end><block_end><elif_stmt>sub_class_name<eq>"temperature"<block_start><for_stmt>entity_item sub_class_entity_list<block_start>under_scored_entity_dict[str(entity_item)+"f"]=str(entity_item)+"f"<block_end><block_end><else_stmt><block_start><for_stmt>entity_item sub_class_entity_list<block_start>under_scored_entity_dict[str(entity_item)]=(str(entity_item).replace(" " "_"))<block_end><block_end><block_end># add missing entities, missed_entities=["yoga" "tennis" "swimming" "football" " lab " "doctor" "optometrist" "dentist" "1st" "2nd" "3rd" "4th" "5th" "6th" "7th" "8th" "9th" "10th" "11th" "12th" "13th" "14th" "15th" "16th" "17th" "18th" "19th" "20th" "Jill" "Jack"]<for_stmt>missed_entity missed_entities<block_start>under_scored_entity_dict[str(missed_entity)]=(missed_entity)<block_end># special handle of "HR" <del_stmt>under_scored_entity_dict['HR']<line_sep>under_scored_entity_dict[' HR ']=' HR '<block_end><return>under_scored_entity_dict<block_end><def_stmt>postprocess_text preds responses metric_name<block_start>_preds=[pred.strip()<for>pred preds]<line_sep>_responses=[response.strip()<for>response responses]<line_sep># rougeLSum expects newline after each sentence <if_stmt>metric_name<eq>"rouge"<block_start>_preds=["\n".join(nltk.sent_tokenize(pred))<for>pred _preds]<line_sep>_responses=["\n".join(nltk.sent_tokenize(response))<for>response _responses]<block_end><elif_stmt>metric_name<eq>"sacrebleu"# sacrebleu <block_start>_responses=[[response]<for>response _responses]<block_end><elif_stmt>metric_name<eq>"bleu"<block_start>_preds=[pred.split(" ")<for>pred _preds]<line_sep>_responses=[[response.split(" ")]<for>response _responses]<block_end><else_stmt><block_start><pass><block_end><return>_preds _responses<block_end><class_stmt>EvaluateTool(object)<block_start><def_stmt>__init__ self args<block_start>self.args=args<block_end><def_stmt>evaluate self preds golds section<block_start>summary={}<assert_stmt>len(golds)<g>0<line_sep>global_entities=load_entities(golds[0]["entities_file"])<line_sep>metric_list=[]<if_stmt>section<in>["train" "dev"]<block_start>metric_list=["bleu"]<block_end><elif_stmt>section<eq>"test"<block_start>metric_list=["bleu" "metrics/kvret/response_entity_hit.py"]<block_end><for_stmt>metric_name metric_list<block_start>metric=load_metric(metric_name)<if_stmt>metric_name<eq>"metrics/kvret/response_entity_hit.py"<block_start>gold_responses=[{"response":item["seq_out"] "intents":[item["intent"]] }<for>item golds]<line_sep>res=metric.compute(**{"predictions":preds "references":gold_responses "global_entities":global_entities })<line_sep>summary.update(res)<block_end><else_stmt><block_start>gold_responses=[item["seq_out"]<for>item golds]<line_sep>processed_preds,processed_golds=postprocess_text(preds gold_responses metric_name)<line_sep>res=metric.compute(predictions=processed_preds references=processed_golds )<line_sep>summary[metric_name]=res[metric_name]<block_end><block_end><return>summary<block_end><block_end>
# -*- coding: utf-8 -*- r""" ================================= Wasserstein unmixing with PyTorch ================================= In this example we estimate mixing parameters from distributions that minimize the Wasserstein distance. In other words we suppose that a target distribution :math:`\mu^t` can be expressed as a weighted sum of source distributions :math:`\mu^s_k` with the following model: .. math:: \mu^t = \sum_{k=1}^K w_k\mu^s_k where :math:`\mathbf{w}` is a vector of size :math:`K` and belongs in the distribution simplex :math:`\Delta_K`. In order to estimate this weight vector we propose to optimize the Wasserstein distance between the model and the observed :math:`\mu^t` with respect to the vector. This leads to the following optimization problem: .. math:: \min_{\mathbf{w}\in\Delta_K} \quad W \left(\mu^t,\sum_{k=1}^K w_k\mu^s_k\right) This minimization is done in this example with a simple projected gradient descent in PyTorch. We use the automatic backend of POT that allows us to compute the Wasserstein distance with :any:`ot.emd2` with differentiable losses. """<line_sep># Author: <NAME> <<EMAIL>> # # License: MIT License # sphinx_gallery_thumbnail_number = 2 <import_stmt>numpy<as>np<import_stmt>matplotlib.pylab<as>pl<import_stmt>ot<import_stmt>torch<line_sep>############################################################################## # Generate data # ------------- #%% Data nt=100<line_sep>nt1=10# ns1=50<line_sep>ns=2<times>ns1<line_sep>rng=np.random.RandomState(2)<line_sep>xt=rng.randn(nt 2)<times>0.2<line_sep>xt[:nt1 0]<augadd>1<line_sep>xt[nt1: 1]<augadd>1<line_sep>xs1=rng.randn(ns1 2)<times>0.2<line_sep>xs1[: 0]<augadd>1<line_sep>xs2=rng.randn(ns1 2)<times>0.2<line_sep>xs2[: 1]<augadd>1<line_sep>xs=np.concatenate((xs1 xs2))<line_sep># Sample reweighting matrix H H=np.zeros((ns 2))<line_sep>H[:ns1 0]=1/ns1<line_sep>H[ns1: 1]=1/ns1<line_sep># each columns sums to 1 and has weights only for samples form the # corresponding source distribution M=ot.dist(xs xt)<line_sep>############################################################################## # Plot data # --------- #%% plot the distributions pl.figure(1)<line_sep>pl.scatter(xt[: 0] xt[: 1] label='Target $\mu^t$' alpha=0.5)<line_sep>pl.scatter(xs1[: 0] xs1[: 1] label='Source $\mu^s_1$' alpha=0.5)<line_sep>pl.scatter(xs2[: 0] xs2[: 1] label='Source $\mu^s_2$' alpha=0.5)<line_sep>pl.title('Sources and Target distributions')<line_sep>pl.legend()<line_sep>############################################################################## # Optimization of the model wrt the Wasserstein distance # ------------------------------------------------------ #%% Weights optimization with gradient descent # convert numpy arrays to torch tensors H2=torch.tensor(H)<line_sep>M2=torch.tensor(M)<line_sep># weights for the source distributions w=torch.tensor(ot.unif(2) requires_grad=<true>)<line_sep># uniform weights for target b=torch.tensor(ot.unif(nt))<line_sep>lr=2e-3# learning rate niter=500# number of iterations losses=[]# loss along the iterations # loss for the minimal Wasserstein estimator <def_stmt>get_loss w<block_start>a=torch.mv(H2 w)# distribution reweighting <return>ot.emd2(a b M2)<block_end># squared Wasserstein 2 <for_stmt>i range(niter)<block_start>loss=get_loss(w)<line_sep>losses.append(float(loss))<line_sep>loss.backward()<with_stmt>torch.no_grad()<block_start>w<augsub>lr<times>w.grad# gradient step w[:]=ot.utils.proj_simplex(w)<block_end># projection on the simplex w.grad.zero_()<block_end>############################################################################## # Estimated weights and convergence of the objective # --------------------------------------------------- we=w.detach().numpy()<line_sep>print('Estimated mixture:' we)<line_sep>pl.figure(2)<line_sep>pl.semilogy(losses)<line_sep>pl.grid()<line_sep>pl.title('Wasserstein distance')<line_sep>pl.xlabel("Iterations")<line_sep>############################################################################## # Ploting the reweighted source distribution # ------------------------------------------ pl.figure(3)<line_sep># compute source weights ws=H.dot(we)<line_sep>pl.scatter(xt[: 0] xt[: 1] label='Target $\mu^t$' alpha=0.5)<line_sep>pl.scatter(xs[: 0] xs[: 1] color='C3' s=ws<times>20<times>ns label='Weighted sources $\sum_{k} w_k\mu^s_k$' alpha=0.5)<line_sep>pl.title('Target and reweighted source distributions')<line_sep>pl.legend()<line_sep>
# -*- coding:utf-8 -*- <import_stmt>os<import_stmt>sys<line_sep>sys.path.append(os.path.dirname(os.path.abspath(__file__)))<import_from_stmt>appUI.MainWindow main<if_stmt>__name__<eq>"__main__"# <block_start>main()<block_end>
<import_from_future_stmt> unicode_literals<import_from_stmt>..conversions *<import_from_stmt>..func_utils *<class_stmt>BooleanPrototype<block_start><def_stmt>toString this args<block_start><if_stmt>GetClass(this)<ne>'Boolean'<block_start><raise>MakeError('TypeError' 'Boolean.prototype.toString is not generic')<block_end><if_stmt>is_object(this)<block_start>this=this.value<block_end><return>u'true'<if>this<else>u'false'<block_end><def_stmt>valueOf this args<block_start><if_stmt>GetClass(this)<ne>'Boolean'<block_start><raise>MakeError('TypeError' 'Boolean.prototype.valueOf is not generic')<block_end><if_stmt>is_object(this)<block_start>this=this.value<block_end><return>this<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>math<import_from_stmt>collections OrderedDict<import_stmt>flask<import_stmt>pandas<as>pd<import_stmt>netCDF4<import_stmt>numpy<as>np<import_from_stmt>bokeh.embed components<import_from_stmt>bokeh.resources INLINE<import_from_stmt>bokeh.templates RESOURCES<import_from_stmt>bokeh.util.string encode_utf8<import_from_stmt>bokeh.models DatetimeTickFormatter ColumnDataSource HoverTool Plot Range1d<import_from_stmt>bokeh.palettes RdBu11<import_from_stmt>bokeh.models.glyphs Text Rect<import_from_stmt>bokeh.plotting figure show output_notebook hplot vplot<import_stmt>utils.world_countries<as>wc<import_from_stmt>utils.colormap RGBAColorMapper<import_from_stmt>viz2 climate_map timeseries legend title get_slice<line_sep>app=flask.Flask(__name__)<line_sep>colormap=RGBAColorMapper(-6 6 RdBu11)<line_sep>@app.route("/")<def_stmt>index # Create layout <block_start>c_map=climate_map()<line_sep>ts=timeseries()<line_sep>l=legend()<line_sep>t=title()<line_sep>map_legend=hplot(c_map l)<line_sep>layout=vplot(t map_legend ts)<line_sep>plot_resources=RESOURCES.render(js_raw=INLINE.js_raw css_raw=INLINE.css_raw js_files=INLINE.js_files css_files=INLINE.css_files )<line_sep>script,div=components(layout INLINE)<line_sep>html=flask.render_template('embed.html' plot_script=script plot_div=div plot_resources=plot_resources )<line_sep><return>encode_utf8(html)<block_end><if_stmt>__name__<eq>"__main__"<block_start>app.run(debug=<true>)<block_end>
# coding: utf-8 #------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. #-------------------------------------------------------------------------- <import_stmt>time<try_stmt><block_start><import_from_stmt>unittest mock<block_end><except_stmt>ImportError<block_start><import_stmt>mock<block_end><import_stmt>pytest<import_from_stmt>azure.common.credentials _CliCredentials<import_stmt>azure.common.credentials<class_stmt>MockCliCredentials<block_start><def_stmt>_token_retriever self<block_start><return>"NOTUSED" "TOKEN" {'expiresIn':42}<block_end><def_stmt>signed_session self session=<none><block_start><return>session<block_end><block_end><class_stmt>MockCliProfile<block_start><def_stmt>__init__ self<block_start>self.received_resource=<none><block_end><def_stmt>get_login_credentials self resource<block_start>self.received_resource=resource<line_sep><return>MockCliCredentials() "NOTUSED" "NOTUSED"<block_end><block_end><def_stmt>test_cli_credentials_mgmt <block_start>cli_profile=MockCliProfile()<line_sep>cred=_CliCredentials(cli_profile "http://resource.id")<line_sep># Mgmt scenario session=cred.signed_session("session")<assert_stmt>cli_profile.received_resource<eq>"http://resource.id"<assert_stmt>session<eq>"session"<line_sep># Trying to mock azure-core not here <with_stmt>mock.patch('azure.common.credentials._AccessToken' <none>)# Should not crash <block_start>cred.signed_session("session")<block_end><block_end><def_stmt>test_cli_credentials_accesstoken <block_start>cli_profile=MockCliProfile()<line_sep>cred=_CliCredentials(cli_profile "http://resource.id")<line_sep># Track2 scenario access_token=cred.get_token("http://resource.id/.default")<assert_stmt>cli_profile.received_resource<eq>"http://resource.id"<assert_stmt>access_token.token<eq>"TOKEN"<assert_stmt>access_token.expires_on<le>int(time.time()+42)<line_sep>access_token=cred.get_token("http://resource.newid")<assert_stmt>cli_profile.received_resource<eq>"http://resource.newid"<line_sep># Trying to mock azure-core not here <with_stmt>mock.patch('azure.common.credentials._AccessToken' <none>)<block_start><with_stmt>pytest.raises(ImportError)<block_start>cred.get_token("http://resource.yetid")<block_end><block_end><block_end>
# -*- coding: utf-8 -*- """ equip.analysis.graph.io ~~~~~~~~~~~~~~~~~~~~~~~ Outputs the graph structures :copyright: (c) 2014 by <NAME> (@rgaucher) :license: Apache 2, see LICENSE for more details. """<import_from_stmt>.graphs DiGraph Tree<line_sep>DOT_STYLE=""" rankdir=TD; ordering=out; graph[fontsize=10 fontname="Verdana"]; color="#efefef"; node[shape=box style=filled fontsize=8 fontname="Verdana" fillcolor="#efefef"]; edge[fontsize=8 fontname="Verdana"]; """<class_stmt>DotConverter(object)<block_start><def_stmt>__init__ self graph<block_start>self.g=graph<line_sep>self.buffer=''<line_sep>self.node_ids={}<block_end>@staticmethod<def_stmt>process graph<block_start>converter=DotConverter(graph)<line_sep>converter.run()<line_sep><return>converter.buffer<block_end><def_stmt>run self<block_start>self.buffer<augadd>'digraph G {'<line_sep>self.buffer<augadd>DOT_STYLE<if_stmt>isinstance(self.g DiGraph)<block_start><for_stmt>edge self.g.edges<block_start>self.add_edge(edge)<block_end><block_end><elif_stmt>isinstance(self.g Tree)<block_start>root=self.g.root<line_sep>worklist=[root]<while_stmt>worklist<block_start>current=worklist.pop(0)<if_stmt>current.has_children()<block_start>num_children=current.num_children()<line_sep>i=0<while_stmt>i<l>num_children<block_start>child=current.children[i]<if_stmt>child<is><none><block_start>i<augadd>1<line_sep><continue><block_end>self.add_tree_edge(current child)<line_sep>worklist.insert(0 child)<line_sep>i<augadd>1<block_end><block_end><else_stmt><block_start>nid=self.get_node_id(current)<block_end><block_end><block_end>self.buffer<augadd>'}\n'<block_end><def_stmt>add_edge self edge<block_start>labels=''<if_stmt>edge.kind<is><not><none><block_start>data=''<if>edge.data<is><none><else>str(edge.data)<line_sep>labels='[label="%s - %s"]'%(edge.kind data)<block_end>nid1=self.get_node_id(edge.source)<line_sep>nid2=self.get_node_id(edge.dest)<line_sep>self.buffer<augadd>'%s -> %s %s;\n'%(nid1 nid2 labels)<block_end><def_stmt>add_tree_edge self node1 node2<block_start>nid1=self.get_node_id(node1)<line_sep>nid2=self.get_node_id(node2)<line_sep>self.buffer<augadd>'%s -> %s;\n'%(nid1 nid2)<block_end><def_stmt>get_node_id self node<block_start><if_stmt>node<not><in>self.node_ids<block_start>self.node_ids[node]='node_%d'%node.gid<line_sep>self.add_node(node self.node_ids[node])<block_end><return>self.node_ids[node]<block_end><def_stmt>add_node self node node_id<block_start>label=''<if_stmt>node.data<is><not><none><block_start>node_kind=('%s - '%node.kind)<if>node.kind<is><not><none><else>''<line_sep>label='[label="Node%d - %s%s"]'%(node.gid node_kind node.data)<block_end>self.buffer<augadd>'%s %s;\n'%(node_id label)<block_end><block_end>
<import_stmt>importlib<import_stmt>logging<import_stmt>math<import_stmt>os<import_stmt>re<import_stmt>shutil<import_stmt>subprocess<import_stmt>sys<import_stmt>time<import_stmt>traceback<import_from_stmt>collections defaultdict<import_from_stmt>random shuffle<import_stmt>GPUtil<import_stmt>tensorflow<as>tf<import_from_stmt>ruamel.yaml YAML<import_from_stmt>ruamel.yaml.comments CommentedMap<import_from_stmt>tensorflow.contrib.training HParams<import_from_stmt>tensorflow.python.ops.image_ops_impl ResizeMethod<import_from_stmt>gpu_env APP_NAME DEVICE_ID IGNORE_PATTERNS<line_sep>millnames=['' ' K' ' M' ' BL' ' TL']<line_sep>regex_title_source=re.compile(r'^([^_\-—]*).*?[_\-—]\s?([^_\-—]+)[\s_\-—]?$')<def_stmt>set_logger model_id=<none><block_start>logger=logging.getLogger(APP_NAME)<line_sep>logger.setLevel(logging.INFO)<if_stmt>model_id<block_start>formatter=logging.Formatter('%(levelname)-.1s:'+model_id+':[%(filename).3s:%(funcName).3s:%(lineno)3d]:%(message)s' datefmt='%m-%d %H:%M:%S')<block_end><else_stmt><block_start>formatter=logging.Formatter('%(levelname)-.1s:[%(filename)s:%(lineno)d]:%(message)s' datefmt='%m-%d %H:%M:%S')<block_end>console_handler=logging.StreamHandler()<line_sep>console_handler.setLevel(logging.INFO)<line_sep>console_handler.setFormatter(formatter)<line_sep>logger.handlers=[]<line_sep>logger.addHandler(console_handler)<line_sep><return>logger<block_end><def_stmt>touch fname:str times=<none> create_dirs:bool=<false><block_start><import_stmt>os<if_stmt>create_dirs<block_start>base_dir=os.path.dirname(fname)<if_stmt><not>os.path.exists(base_dir)<block_start>os.makedirs(base_dir)<block_end><block_end><with_stmt>open(fname 'a')<block_start>os.utime(fname times)<block_end><block_end><def_stmt>touch_dir base_dir:str<arrow><none><block_start><import_stmt>os<if_stmt><not>os.path.exists(base_dir)<block_start>os.makedirs(base_dir)<block_end><block_end><def_stmt>millify n<block_start>n=float(n)<line_sep>millidx=max(0 min(len(millnames)-1 int(math.floor(0<if>n<eq>0<else>math.log10(abs(n))/3))))<line_sep><return>'{:.0f}{}'.format(n/10<power>(3<times>millidx) millnames[millidx])<block_end><def_stmt>args2hparam args vocab<block_start>params=vars(args)<line_sep>params['vocab']=vocab<line_sep>p=HParams()<for_stmt>k,v params.items()<block_start>p.add_hparam(k v)<block_end><return>p<block_end><def_stmt>runner main *done<block_start>logger=logging.getLogger(APP_NAME)<try_stmt><block_start>main()<block_end><except_stmt>(tf.errors.OutOfRangeError IndexError)<as>e<block_start>logger.warning('Data has been exhausted! Done!')<block_end><finally_stmt><block_start>[f()<for>f done]<block_end><block_end><def_stmt>parse_yaml yaml_path model_id<block_start><import_from_stmt>tensorflow.contrib.training HParams<import_from_stmt>ruamel.yaml YAML<line_sep>hparams=HParams()<line_sep>hparams.add_hparam('model_id' model_id)<with_stmt>open(yaml_path)<as>fp<block_start>customized=YAML().load(fp)<for_stmt>k,v customized.items()<block_start><if_stmt>k<in>hparams<block_start>hparams.set_hparam(k v)<block_end><else_stmt><block_start>hparams.add_hparam(k v)<block_end><block_end><block_end><return>hparams<block_end><def_stmt>parse_args yaml_path model_id default_set followup=<none><block_start>logger=logging.getLogger(APP_NAME)<line_sep>hparams=HParams()<line_sep>hparams.add_hparam('model_id' model_id)<with_stmt>open('default.yaml')<as>fp<block_start>configs=YAML().load(fp)<line_sep>default_cfg=configs[default_set]<line_sep>add_param_recur(hparams default_cfg)<if_stmt>yaml_path<block_start>logger.info('loading parameters...')<with_stmt>open(yaml_path)<as>fp<block_start>customized=YAML().load(fp)<for_stmt>k,v customized.items()<block_start><if_stmt>k<in>hparams<and>hparams.get(k)<ne>v<block_start>logger.info('%20s: %20s -> %20s'%(k hparams.get(k) v))<line_sep>hparams.set_hparam(k v)<block_end><elif_stmt>k<not><in>hparams# add new parameter <block_start>hparams.add_hparam(k v)<line_sep>logger.info('%30s %20s: %20s'%("[add from %s]"%yaml_path k hparams.get(k)))<block_end><block_end><block_end><block_end><block_end><if_stmt>followup# useful when changing args for prediction <block_start>logger.info('override args with follow-up args...')<for_stmt>k,v followup.items()<block_start><if_stmt>k<in>hparams<and>hparams.get(k)<ne>v<block_start>logger.info('%20s: %20s -> %20s'%(k hparams.get(k) v))<line_sep>hparams.set_hparam(k v)<block_end><elif_stmt>k<not><in>hparams<block_start>logger.warning('%s is not a valid attribute! ignore!'%k)<block_end><block_end><block_end><if_stmt>'save_dir'<not><in>hparams<block_start>hparams.add_hparam('save_dir' os.path.join(hparams.get('model_dir') hparams.get('model_id')))<block_end><if_stmt>'code_dir'<not><in>hparams<block_start>hparams.add_hparam('code_dir' os.path.join(hparams.get('save_dir') 'code'))<block_end>hparams.set_hparam('summary_dir' os.path.join(hparams.get('save_dir') 'summary'))<line_sep># reset logger model id logger=set_logger(model_id='%s:%s'%(DEVICE_ID hparams.get('model_id')))<try_stmt><block_start>shutil.copytree('./' hparams.get('code_dir') ignore=shutil.ignore_patterns(*IGNORE_PATTERNS))<line_sep>logger.info('current code base is copied to %s'%hparams.get('save_dir'))<block_end><except_stmt>FileExistsError<block_start>logger.info('code base exist, no need to copy!')<block_end># if hparams.get('model_id') != model_id: # logger.warning('model id is changed %s -> %s! ' # 'This happens when you train a pretrained model' % ( # hparams.get('model_id'), model_id)) # hparams.set_hparam('model_id', model_id) <if_stmt>'loss_csv_file'<not><in>hparams<block_start>hparams.add_hparam('loss_csv_file' os.path.join(hparams.get('save_dir') 'loss.csv'))<block_end><if_stmt>'is_serving'<not><in>hparams<block_start>hparams.add_hparam('is_serving' <false>)<block_end>logger.info('current parameters')<for_stmt>k,v sorted(vars(hparams).items())<block_start><if_stmt><not>k.startswith('_')<block_start>logger.info('%20s = %-20s'%(k v))<block_end><block_end><return>hparams<block_end><def_stmt>add_param_recur root p_tree<block_start><for_stmt>k,v p_tree.items()<block_start><if_stmt>isinstance(v CommentedMap)<block_start>new_node=HParams()<line_sep>add_param_recur(new_node v)<line_sep>root.add_hparam(k new_node)<block_end><else_stmt><block_start>root.add_hparam(k v)<block_end><block_end><block_end><def_stmt>fill_gpu_jobs all_jobs logger job_parser wait_until_next=300 retry_delay=300 do_shuffle=<false><block_start><if_stmt>do_shuffle<block_start>shuffle(all_jobs)<block_end>all_procs=[]<while_stmt>all_jobs<block_start>logger.info('number of jobs in the queue: %d'%len(all_jobs))<line_sep>j=all_jobs.pop()<line_sep>logger.info('will start the job: %s ...'%job_parser(j))<try_stmt><block_start>GPUtil.getFirstAvailable()<line_sep># check if there is a free GPU! process=subprocess.Popen(job_parser(j) shell=<true>)<line_sep>all_procs.append((process j))<line_sep>time.sleep(wait_until_next)<block_end><except_stmt>FileNotFoundError<block_start>logger.warning('there is no gpu, running on cpu!')<line_sep>process=subprocess.Popen(job_parser(j) shell=<true>)<line_sep>all_procs.append((process j))<block_end><except_stmt>RuntimeError<as>e<block_start>logger.error(str(e))<line_sep>logger.warning('all gpus are busy! waiting for a free slot...')<line_sep># add job back all_jobs.append(j)<line_sep>time.sleep(retry_delay)<block_end><block_end>exit_codes=[(p.wait() j)<for>p,j all_procs]<line_sep><return>[v<for>p,v exit_codes<if>p<ne>0]<block_end><def_stmt>get_args_cli args<block_start>d=defaultdict(list)<if_stmt>args<block_start><for_stmt>k,v ((k.lstrip('-') v)<for>k,v (a.split('=')<for>a args))<block_start>d[k].append(v)<block_end><for_stmt>k,v d.items()<block_start>parsed_v=[s<for>s (parse_arg(vv)<for>vv v)<if>s<is><not><none>]<if_stmt>len(parsed_v)<g>1<block_start>d[k]=parsed_v<block_end><if_stmt>len(parsed_v)<eq>1<block_start>d[k]=parsed_v[0]<block_end><block_end><block_end><return>d<block_end><def_stmt>parse_arg v:str<block_start><if_stmt>v.startswith('[')<and>v.endswith(']')# function args must be immutable tuples not list <block_start>tmp=v.replace('[' '').replace(']' '').strip().split(',')<if_stmt>len(tmp)<g>0<block_start><return>[parse_arg(vv.strip())<for>vv tmp]<block_end><else_stmt><block_start><return>[]<block_end><block_end><try_stmt><block_start>v=int(v)# parse int parameter <block_end><except_stmt>ValueError<block_start><try_stmt><block_start>v=float(v)# parse float parameter <block_end><except_stmt>ValueError<block_start><if_stmt>len(v)<eq>0# ignore it when the parameter is empty <block_start>v=<none><block_end><elif_stmt>v.lower()<eq>'true'# parse boolean parameter <block_start>v=<true><block_end><elif_stmt>v.lower()<eq>'false'<block_start>v=<false><block_end><block_end><block_end><return>v<block_end><def_stmt>get_scope_name <block_start><return>tf.get_variable_scope().name.split('/')[0]<block_end><def_stmt>sparse_nll_loss probs labels epsilon=1e-9 scope=<none><block_start>""" negative log likelihood loss """<with_stmt>tf.name_scope(scope "log_loss")<block_start>labels=tf.one_hot(labels tf.shape(probs)[1] axis=1 dtype=tf.float32)<line_sep>losses=-tf.reduce_sum(labels<times>tf.log(probs+epsilon) 1)<block_end><return>losses<block_end><def_stmt>normalize_distribution p eps=1e-9<block_start>p<augadd>eps<line_sep>norm=tf.reduce_sum(p axis=1)<line_sep><return>tf.cast(p tf.float32)/tf.reshape(norm (-1 1))<block_end><def_stmt>kl_divergence p q eps=1e-9<block_start>p=normalize_distribution(p eps)<line_sep>q=normalize_distribution(q eps)<line_sep><return>tf.reduce_sum(p<times>tf.log(p/q) axis=1)<block_end><def_stmt>get_kl_loss start_label start_probs bandwidth=1.0<block_start>a=tf.reshape(tf.range(tf.shape(start_probs)[1]) (1 -1))<line_sep>b=tf.reshape(start_label (-1 1))<line_sep>start_true_probs=tf.exp(-tf.cast(tf.squared_difference(a b) tf.float32)/bandwidth)<line_sep><return>sym_kl_divergence(start_true_probs start_probs)<block_end><def_stmt>sym_kl_divergence p q eps=1e-9<block_start><return>(kl_divergence(p q eps)+kl_divergence(q p eps))/2.0<block_end><def_stmt>get_conv1d x out_dim window_len name act_fn<block_start><return>tf.layers.conv1d(x out_dim window_len strides=1 padding='SAME' name=name activation=act_fn)<block_end><def_stmt>upsampling_a2b a b D_a<block_start><return>tf.squeeze(tf.image.resize_images(tf.expand_dims(a axis=-1) [tf.shape(b)[1] D_a] method=ResizeMethod.NEAREST_NEIGHBOR) axis=-1)<block_end><def_stmt>dropout args keep_prob is_train mode="recurrent"<block_start><if_stmt>keep_prob<l>1.0<block_start>noise_shape=<none><line_sep>scale=1.0<line_sep>shape=tf.shape(args)<if_stmt>mode<eq>"embedding"<block_start>noise_shape=[shape[0] 1]<line_sep>scale=keep_prob<block_end><if_stmt>mode<eq>"recurrent"<and>len(args.get_shape().as_list())<eq>3<block_start>noise_shape=[shape[0] 1 shape[-1]]<block_end>args=tf.cond(is_train <lambda>:tf.nn.dropout(args keep_prob noise_shape=noise_shape)<times>scale <lambda>:args)<block_end><return>args<block_end><def_stmt>get_tmp_yaml par prefix=<none><block_start><import_stmt>tempfile<with_stmt>tempfile.NamedTemporaryFile('w' delete=<false> prefix=prefix)<as>tmp<block_start>YAML().dump(par tmp)<line_sep><return>tmp.name<block_end><block_end><def_stmt>build_model args reset_graph=<true><block_start>rccore=importlib.import_module(args.package_rccore)<if_stmt>reset_graph<block_start>tf.reset_default_graph()<block_end><return>rccore.RCCore(args)<block_end><def_stmt>get_last_output output sequence_length name<block_start>"""Get the last value of the returned output of an RNN. http://disq.us/p/1gjkgdr output: [batch x number of steps x ... ] Output of the dynamic lstm. sequence_length: [batch] Length of each of the sequence. """<line_sep>rng=tf.range(0 tf.shape(sequence_length)[0])<line_sep>indexes=tf.stack([rng sequence_length-1] 1)<line_sep><return>tf.gather_nd(output indexes name)<block_end><def_stmt>import_class import_str<block_start>mod_str,_sep,class_str=import_str.rpartition('.')<line_sep>cur_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>sys.path.insert(0 cur_dir)<line_sep>__import__(mod_str)<line_sep>sys.path.remove(cur_dir)<try_stmt><block_start><return>getattr(sys.modules[mod_str] class_str)<block_end><except_stmt>AttributeError<block_start><raise>ImportError('Class %s cannot be found (%s)'%(class_str traceback.format_exception(*sys.exc_info())))<block_end><block_end><def_stmt>delete_module modname<block_start><import_from_stmt>sys modules<line_sep>del_keys=[]<for_stmt>mod_key,mod_value modules.items()<block_start><if_stmt>modname<in>mod_key<block_start>del_keys.append(mod_key)<block_end><elif_stmt>modname<in>str(mod_value)<block_start>del_keys.append(mod_key)<block_end><block_end><for_stmt>key del_keys<block_start><del_stmt>modules[key]<block_end><block_end>
<import_from_stmt>layer *<class_stmt>SparseCodeLayer(Layer)<block_start><def_stmt>AllocateBatchsizeDependentMemory self batchsize<block_start>super(SparseCodeLayer self).AllocateBatchsizeDependentMemory(batchsize)<line_sep>self.approximator=cm.empty(self.state.shape)<line_sep>self.temp3=cm.empty(self.state.shape)<line_sep>self.grad=cm.empty(self.state.shape)<line_sep>self.grad_scale=cm.CUDAMatrix(np.zeros((self.state.shape[0] 1)))<line_sep>self.m_by_m=cm.empty((self.state.shape[0] self.state.shape[0]))<block_end><def_stmt>ApplyActivation self state<block_start><if_stmt>self.activation<eq>deepnet_pb2.Hyperparams.LOGISTIC<block_start>cm.sigmoid(state)<block_end><elif_stmt>self.activation<eq>deepnet_pb2.Hyperparams.TANH<block_start>cm.tanh(state)<block_end><elif_stmt>self.activation<eq>deepnet_pb2.Hyperparams.RECTIFIED_LINEAR<block_start>state.greater_than(0 target=self.temp)<line_sep>state.mult(self.temp)<block_end><elif_stmt>self.activation<eq>deepnet_pb2.Hyperparams.RECTIFIED_LINEAR_SMOOTH<block_start>cm.log_1_plus_exp(state)<block_end><elif_stmt>self.activation<eq>deepnet_pb2.Hyperparams.LINEAR<block_start><pass><block_end><block_end><def_stmt>ComputeDeriv self state<block_start>"""Compute derivative w.r.t input given derivative w.r.t output."""<if_stmt>self.activation<eq>deepnet_pb2.Hyperparams.LOGISTIC<block_start>self.deriv.apply_logistic_deriv(state)<block_end><elif_stmt>self.activation<eq>deepnet_pb2.Hyperparams.TANH<block_start>self.deriv.apply_tanh_deriv(state)<if_stmt>self.hyperparams.dropout<block_start>self.deriv.mult(self.mask)<block_end><block_end><elif_stmt>self.activation<eq>deepnet_pb2.Hyperparams.RECTIFIED_LINEAR<block_start>self.deriv.apply_rectified_linear_deriv(state)<block_end><elif_stmt>self.activation<eq>deepnet_pb2.Hyperparams.RECTIFIED_LINEAR_SMOOTH<block_start>self.deriv.apply_rectified_linear_smooth_deriv(state)<block_end><elif_stmt>self.activation<eq>deepnet_pb2.Hyperparams.LINEAR<block_start><if_stmt>self.hyperparams.dropout<block_start>self.deriv.mult(self.mask)<block_end><block_end><elif_stmt>self.activation<eq>deepnet_pb2.Hyperparams.SOFTMAX<block_start><raise>Exception('Not implemented.')<block_end><else_stmt><block_start><raise>Exception('Unknown activation.')<block_end><block_end><block_end>
<import_stmt>random<import_stmt>string<import_stmt>json<import_stmt>boto3<import_stmt>pytest<import_from_stmt>stateful_set AWS_REGIONS InstanceParams find_ubuntu_ami AwsEC2Launcher AwsEC2Terminator find_instances valid_instances get_tag manage_instances <class_stmt>EC2TestCtx(object)<block_start><def_stmt>__init__ self region resource client prices=<none><block_start>self.region=region<line_sep>self.resource=resource<line_sep>self.client=client<line_sep>self.prices=prices<block_end><block_end>############ # FIXTURES # ############ @pytest.fixture<def_stmt>ec2 regions ec2_all<block_start><return>[ec2_all[r]['rc']<for>r regions]<block_end>@pytest.fixture<def_stmt>ec2cl regions ec2_all<block_start><return>[ec2_all[r]['cl']<for>r regions]<block_end>@pytest.fixture<def_stmt>ec2_resources request regions ec2<block_start><def_stmt>gen_params group_suffix=<none> key_name_suffix=<none> security_group_suffix=<none><block_start><def_stmt>_random N=7<block_start><return>''.join(random.choice(string.ascii_uppercase+string.digits)<for>_ range(N))<block_end><return>InstanceParams(project='Indy-PA-dev' add_tags={'Purpose':'Test Pool Automation'} namespace='test_stateful_set' group="group_{}".format(group_suffix<if>group_suffix<else>_random()) key_name="test_stateful_set_key_{}".format(key_name_suffix<if>key_name_suffix<else>_random()) security_group="test_stateful_set_security_group_{}".format(security_group_suffix<if>security_group_suffix<else>_random()) type_name='t2.micro' # TODO docs market_spot=(request.config.getoption("--market-type")<eq>'spot') spot_max_price=<none> # TODO docs ebs_volume_size=9 ebs_volume_type='gp2' )<block_end><def_stmt>manage_key_pair ec2 present params<block_start>count=0<for_stmt>key ec2.key_pairs.all()<block_start><if_stmt>key.key_name<ne>params.key_name<block_start><continue><block_end><if_stmt>present<and>count<eq>0<block_start>count=1<block_end><else_stmt><block_start>key.delete()<block_end><block_end><if_stmt>present<and>count<eq>0<block_start>ec2.create_key_pair(KeyName=params.key_name)<block_end><block_end><def_stmt>manage_security_group ec2 present params<block_start>count=0<for_stmt>sgroup ec2.security_groups.all()<block_start><if_stmt>sgroup.group_name<ne>params.security_group<block_start><continue><block_end><if_stmt>present<and>count<eq>0<block_start>count=1<block_end><else_stmt><block_start>sgroup.delete()<block_end><block_end><if_stmt>present<and>count<eq>0<block_start>sg=ec2.create_security_group(GroupName=params.security_group Description='Test security group')<line_sep>sg.create_tags(Tags=[{'Key':'Name' 'Value':"{}-{}-{}".format(params.project params.namespace params.group)} {'Key':'Project' 'Value':params.project} {'Key':'Namespace' 'Value':params.namespace} {'Key':'Group' 'Value':params.group}])<block_end><block_end>params=gen_params(group_suffix=request.node.name key_name_suffix=request.node.name security_group_suffix=request.node.name)<for_stmt>rc ec2<block_start>manage_key_pair(rc <true> params)<line_sep>manage_security_group(rc <true> params)<block_end><yield>params<line_sep>terminator=AwsEC2Terminator()<for_stmt>region,rc zip(regions ec2)<block_start><for_stmt>inst find_instances(rc params.project params.namespace params.group)<block_start>terminator.terminate(inst region)<block_end><block_end>terminator.wait(<false>)<for_stmt>rc ec2<block_start>manage_key_pair(rc <false> params)<line_sep>manage_security_group(rc <false> params)<block_end><block_end>@pytest.fixture(scope="session")<def_stmt>pricing_client # pricing API is available only through us-east-1 and ap-south-1 # https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/using-pelong.html <block_start><return>boto3.client('pricing' region_name='us-east-1')<block_end>@pytest.fixture<def_stmt>on_demand_prices request pricing_client ec2_prices regions ec2_resources<block_start>marker=request.node.get_closest_marker('prices')<if_stmt><not>(marker<and>('on-demand'<in>marker.kwargs.get('term' [])))<block_start><return><block_end><for_stmt>region_code regions<block_start>res=ec2_prices[region_code]['on-demand'].get(ec2_resources.type_name)<if_stmt>res<is><none># Search product filters # https://docs.aws.amazon.com/aws-cost-management/latest/APIReference/API_pricing_Filter.html # https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/using-ppslong.html <block_start>filters=[{'Field':k 'Type':'TERM_MATCH' 'Value':v}<for>k,v (('tenancy' 'shared') ('capacitystatus' 'UnusedCapacityReservation') ('location' AWS_REGIONS[region_code].location) ('operatingSystem' 'Linux') # TODO might be parametrized ('instanceType' ec2_resources.type_name) ('preInstalledSw' 'NA'))]<line_sep>products=pricing_client.get_products(ServiceCode='AmazonEC2' Filters=filters)<line_sep>price_info=json.loads(products['PriceList'][0])<line_sep># https://docs.aws.amazon.com/awsaccountbilling/latest/aboutv2/reading-an-offer.html # # "terms": { # "OnDemand": { # "<sku.offerTermCode>": { # "offerTermCode":"The term code of the product", # "sku":"The SKU of the product", # ... # "priceDimensions": { # "<sku.offerTermCode.rateCode>": { # "rateCode":"The rate code of the price", # ... # "pricePerUnit": { # "currencyCode":"currencyRate", # } # } # } # } # } # } offer=price_info['terms']['OnDemand'].popitem()[1]<line_sep>price_tier=offer['priceDimensions'].popitem()[1]<line_sep>res=float(price_tier['pricePerUnit']['USD'])<line_sep>ec2_prices[region_code]['on-demand'][ec2_resources.type_name]=res<block_end><block_end><block_end>@pytest.fixture<def_stmt>ec2ctxs regions ec2 ec2cl on_demand_prices ec2_prices<block_start><assert_stmt>len(set([len(l)<for>l (regions ec2 ec2cl)]))<eq>1<line_sep><return>[EC2TestCtx(r rc cl ec2_prices[r])<for>r,rc,cl zip(regions ec2 ec2cl)]<block_end>@pytest.fixture<def_stmt>ec2ctx ec2ctxs<block_start><assert_stmt>len(ec2ctxs)<eq>1<line_sep><return>ec2ctxs[0]<block_end>######### # TESTS # ######### <def_stmt>test_find_ubuntu_image ec2ctx<block_start>image_id=find_ubuntu_ami(ec2ctx.resource)<assert_stmt>image_id<is><not><none><line_sep>image=ec2ctx.resource.Image(image_id)<assert_stmt>image.owner_id<eq>'099720109477'# Canonical <assert_stmt>image.state<eq>'available'<assert_stmt>image.architecture<eq>'x86_64'<assert_stmt>'Canonical'<in>image.description<assert_stmt>'Ubuntu'<in>image.description<assert_stmt>'16.04'<in>image.description<assert_stmt>'UNSUPPORTED'<not><in>image.description<block_end># TODO split test_AwsEC2Launcher tests into multiple more focused ones <def_stmt>check_instance_params inst params ec2cl=<none> price=<none># https://stackoverflow.com/questions/5595425/what-is-the-best-way-to-compare-floats-for-almost-equality-in-python # https://www.python.org/dev/peps/pep-0485/#proposed-implementation <block_start><def_stmt>isclose a b rel_tol=1e-09 abs_tol=0.0<block_start><return>abs(a-b)<le>max(rel_tol<times>max(abs(a) abs(b)) abs_tol)<block_end><def_stmt>check_tags obj<block_start><assert_stmt>{'Key':'Project' 'Value':params.project}<in>obj.tags<assert_stmt>{'Key':'Namespace' 'Value':params.namespace}<in>obj.tags<assert_stmt>{'Key':'Group' 'Value':params.group}<in>obj.tags<for_stmt>tag_key,tag_value params.add_tags.iteritems()<block_start><assert_stmt>tag_value<eq>get_tag(obj tag_key)<block_end><block_end># general <assert_stmt>inst.instance_type<eq>params.type_name<assert_stmt>inst.state['Name']<eq>'running'<line_sep># tags check_tags(inst)<line_sep># linked resources <assert_stmt>inst.key_name<eq>params.key_name<assert_stmt>len(inst.security_groups)<eq>1<assert_stmt>inst.security_groups[0]['GroupName']<eq>params.security_group<line_sep># ebs options volumes=list(inst.volumes.all())<assert_stmt>len(volumes)<eq>1<assert_stmt>volumes[0].size<eq>params.ebs_volume_size<assert_stmt>volumes[0].volume_type<eq>params.ebs_volume_type<line_sep>check_tags(volumes[0])<line_sep># market options <if_stmt>params.market_spot<block_start><assert_stmt>inst.instance_lifecycle<eq>'spot'<assert_stmt>inst.spot_instance_request_id<is><not><none><line_sep>spot_params=ec2cl.describe_spot_instance_requests(SpotInstanceRequestIds=[inst.spot_instance_request_id])<assert_stmt>isclose(float(spot_params['SpotInstanceRequests'][0]['SpotPrice']) price)<block_end><block_end>@pytest.mark.regions([['us-east-2' 'eu-west-1']])<def_stmt>test_AwsEC2Launcher_wait ec2ctxs ec2_resources<block_start>launcher=AwsEC2Launcher()<line_sep>instances=[]<line_sep>params=ec2_resources._replace(market_spot=<false>)<for_stmt>ctx ec2ctxs<block_start>_instances=launcher.launch(params 1 region=ctx.region ec2=ctx.resource)<assert_stmt>len(_instances)<eq>1<line_sep>instances<augadd>_instances<block_end><assert_stmt>len(launcher.awaited)<g>0<line_sep>launcher.wait()<assert_stmt>len(launcher.awaited)<eq>0<for_stmt>inst instances<block_start>check_instance_params(inst params)<block_end><block_end><def_stmt>idfn_test_AwsEC2Launcher max_price<block_start><if_stmt>max_price<is><none><block_start><return>'max_price_default'<block_end><else_stmt><block_start><return>"max_price_{}".format(max_price)<block_end><block_end>@pytest.mark.prices(term="on-demand")@pytest.mark.regions([['us-east-2'] ['eu-west-1']])@pytest.mark.parametrize('max_price_factor' [<none> 0.7] ids=idfn_test_AwsEC2Launcher)<def_stmt>test_AwsEC2Launcher_spot ec2ctx ec2_resources max_price_factor<block_start>launcher=AwsEC2Launcher()<line_sep>default_price=ec2ctx.prices['on-demand'][ec2_resources.type_name]<line_sep>price=default_price<times>(1<if>max_price_factor<is><none><else>max_price_factor)<line_sep>params=ec2_resources._replace(market_spot=<true> spot_max_price=(<none><if>max_price_factor<is><none><else>"{}".format(price)))<line_sep>instances=launcher.launch(params 1 region=ec2ctx.region ec2=ec2ctx.resource)<line_sep>launcher.wait()<for_stmt>inst instances<block_start>check_instance_params(inst params ec2ctx.client price)<block_end><block_end>@pytest.mark.regions([['us-east-2' 'eu-west-1']])<def_stmt>test_AwsEC2Terminator_wait ec2ctxs ec2_resources<block_start>launcher=AwsEC2Launcher()<line_sep>terminator=AwsEC2Terminator()<line_sep>instances=[]<line_sep>params=ec2_resources._replace(market_spot=<false>)<for_stmt>ctx ec2ctxs<block_start>_instances=launcher.launch(params 1 region=ctx.region ec2=ctx.resource)<assert_stmt>len(_instances)<eq>1<line_sep>instances<augadd>_instances<block_end>launcher.wait()<for_stmt>instance instances<block_start>terminator.terminate(instance)<block_end><assert_stmt>len(terminator.awaited)<g>0<line_sep>terminator.wait()<assert_stmt>len(terminator.awaited)<eq>0<for_stmt>instance instances<block_start><assert_stmt>instance.state['Name']<eq>'terminated'<block_end><block_end>@pytest.mark.regions([['us-east-2'] ['eu-west-1']])<def_stmt>test_AwsEC2Terminator_spot ec2ctx ec2_resources<block_start>launcher=AwsEC2Launcher()<line_sep>terminator=AwsEC2Terminator()<line_sep>params=ec2_resources._replace(market_spot=<true> spot_max_price=<none>)<line_sep>instances=launcher.launch(params 1 region=ec2ctx.region ec2=ec2ctx.resource)<line_sep>launcher.wait()<for_stmt>instance instances<block_start>terminator.terminate(instance)<block_end><for_stmt>instance instances<block_start><assert_stmt>instance.spot_instance_request_id<is><not><none><line_sep>spot_params=ec2ctx.client.describe_spot_instance_requests(SpotInstanceRequestIds=[instance.spot_instance_request_id])<line_sep># https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/spot-bid-status.html#get-spot-instance-bid-status <assert_stmt>(spot_params['SpotInstanceRequests'][0]['State']<in>('closed' 'cancelled'))<assert_stmt>(spot_params['SpotInstanceRequests'][0]['Status']['Code']<in>('instance-terminated-by-user' 'request-canceled-and-instance-running'))<block_end>terminator.wait()<block_end>@pytest.mark.regions([['us-east-1']])<def_stmt>test_find_instances ec2ctx ec2_resources<block_start>launcher=AwsEC2Launcher()<line_sep>terminator=AwsEC2Terminator()<line_sep>params1=ec2_resources._replace(group="{}_{}".format(ec2_resources.group 'aaa'))<line_sep>params2=ec2_resources._replace(group="{}_{}".format(ec2_resources.group 'bbb'))<for_stmt>group (params1.group params2.group)<block_start><for_stmt>inst find_instances(ec2ctx.resource ec2_resources.project ec2_resources.namespace group)<block_start>terminator.terminate(inst ec2ctx.region)<block_end><block_end>terminator.wait(<false>)<line_sep>launcher.launch(params1 2 ec2=ec2ctx.resource)<line_sep>launcher.launch(params2 3 ec2=ec2ctx.resource)<line_sep>aaa=find_instances(ec2ctx.resource params1.project params1.namespace params1.group)<line_sep>bbb=find_instances(ec2ctx.resource params2.project params2.namespace params2.group)<line_sep>aaa_and_bbb=[i<for>i find_instances(ec2ctx.resource ec2_resources.project ec2_resources.namespace)<if>get_tag(i 'Group')<in>(params1.group params2.group)]<assert_stmt>len(aaa)<eq>2<assert_stmt>len(bbb)<eq>3<assert_stmt>len(aaa_and_bbb)<eq>5<assert_stmt>set(aaa).union(bbb)<eq>set(aaa_and_bbb)<for_stmt>inst aaa_and_bbb<block_start>terminator.terminate(inst ec2ctx.region)<block_end>terminator.wait(<false>)<block_end><def_stmt>test_valid_instances <block_start>regions=['us' 'eu']<line_sep>instances=valid_instances(regions 0)<assert_stmt>instances['us']<eq>[]<assert_stmt>instances['eu']<eq>[]<line_sep>instances=valid_instances(regions 1)<assert_stmt>instances['us']<eq>['1']<assert_stmt>instances['eu']<eq>[]<line_sep>instances=valid_instances(regions 2)<assert_stmt>instances['us']<eq>['1']<assert_stmt>instances['eu']<eq>['2']<line_sep>instances=valid_instances(regions 3)<assert_stmt>instances['us']<eq>['1' '3']<assert_stmt>instances['eu']<eq>['2']<line_sep>instances=valid_instances(regions 4)<assert_stmt>instances['us']<eq>['1' '3']<assert_stmt>instances['eu']<eq>['2' '4']<block_end>@pytest.mark.regions([['us-east-2' 'ca-central-1' 'eu-west-1']] ids=['3regions'])<def_stmt>test_manage_instances ec2ctxs ec2_resources<block_start>regions=[ctx.region<for>ctx ec2ctxs]<def_stmt>check_hosts hosts<block_start><assert_stmt>len(set(host.tag_id<for>host hosts))<eq>len(hosts)<assert_stmt>len(set(host.public_ip<for>host hosts))<eq>len(hosts)<block_end><def_stmt>check_tags instances<block_start><for_stmt>inst_group instances<block_start><for_stmt>inst inst_group<block_start>inst_tag_id=get_tag(inst 'ID')<assert_stmt>inst_tag_id<is><not><none><line_sep>inst_tag_name=get_tag(inst 'Name')<assert_stmt>inst_tag_name<eq>"{}-{}-{}-{}".format(ec2_resources.project ec2_resources.namespace ec2_resources.group inst_tag_id.zfill(3)).lower()<block_end><block_end><block_end>res=manage_instances(regions ec2_resources 4)<line_sep>instances=[find_instances(ctx.resource ec2_resources.project ec2_resources.namespace ec2_resources.group)<for>ctx ec2ctxs]<assert_stmt>res.changed<assert_stmt>len(res.active)<eq>4<assert_stmt>len(res.terminated)<eq>0<line_sep>check_hosts(res.active+res.terminated)<line_sep>check_tags(instances)<assert_stmt>len(instances[0])<eq>2<assert_stmt>len(instances[1])<eq>1<assert_stmt>len(instances[2])<eq>1<assert_stmt>set([get_tag(instances[0][0] 'ID') get_tag(instances[0][1] 'ID')])<eq>set(['1' '4'])<assert_stmt>get_tag(instances[1][0] 'ID')<eq>'2'<assert_stmt>get_tag(instances[2][0] 'ID')<eq>'3'<line_sep>res=manage_instances(regions ec2_resources 4)<line_sep>instances=[find_instances(ctx.resource ec2_resources.project ec2_resources.namespace ec2_resources.group)<for>ctx ec2ctxs]<assert_stmt><not>res.changed<assert_stmt>len(res.active)<eq>4<assert_stmt>len(res.terminated)<eq>0<line_sep>check_hosts(res.active+res.terminated)<line_sep>check_tags(instances)<assert_stmt>len(instances[0])<eq>2<assert_stmt>len(instances[1])<eq>1<assert_stmt>len(instances[2])<eq>1<assert_stmt>set([get_tag(instances[0][0] 'ID') get_tag(instances[0][1] 'ID')])<eq>set(['1' '4'])<assert_stmt>get_tag(instances[1][0] 'ID')<eq>'2'<assert_stmt>get_tag(instances[2][0] 'ID')<eq>'3'<line_sep>res=manage_instances(regions ec2_resources 2)<line_sep>instances=[find_instances(ctx.resource ec2_resources.project ec2_resources.namespace ec2_resources.group)<for>ctx ec2ctxs]<assert_stmt>res.changed<assert_stmt>len(res.active)<eq>2<assert_stmt>len(res.terminated)<eq>2<line_sep>check_hosts(res.active+res.terminated)<line_sep>check_tags(instances)<assert_stmt>len(instances[0])<eq>1<assert_stmt>len(instances[1])<eq>1<assert_stmt>len(instances[2])<eq>0<assert_stmt>get_tag(instances[0][0] 'ID')<eq>'1'<assert_stmt>get_tag(instances[1][0] 'ID')<eq>'2'<line_sep>res=manage_instances(regions ec2_resources 0)<line_sep>instances=[find_instances(ctx.resource ec2_resources.project ec2_resources.namespace ec2_resources.group)<for>ctx ec2ctxs]<assert_stmt>res.changed<assert_stmt>len(res.active)<eq>0<assert_stmt>len(res.terminated)<eq>2<line_sep>check_hosts(res.active+res.terminated)<line_sep>check_tags(instances)<assert_stmt>len(instances[0])<eq>0<assert_stmt>len(instances[1])<eq>0<assert_stmt>len(instances[2])<eq>0<line_sep>res=manage_instances(regions ec2_resources 0)<line_sep>instances=[find_instances(ctx.resource ec2_resources.project ec2_resources.namespace ec2_resources.group)<for>ctx ec2ctxs]<assert_stmt><not>res.changed<assert_stmt>len(res.active)<eq>0<assert_stmt>len(res.terminated)<eq>0<line_sep>check_hosts(res.active+res.terminated)<line_sep>check_tags(instances)<assert_stmt>len(instances[0])<eq>0<assert_stmt>len(instances[1])<eq>0<assert_stmt>len(instances[2])<eq>0<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>bmw path<block_start>"""Daily Log Returns on BMW Share Price These data are the daily log returns on BMW share price from Tuesday 2nd January 1973 until Tuesday 23rd July 1996. The data are contained in a numeric vector. The dates of each observation are contained in a `times` attribute, which is an object of class `"POSIXct"` (see `DateTimeClasses`). Note that these data form an irregular time series because no trading takes place at the weekend. A numeric vector containing 6146 observations, with a `times` Args: path: str. Path to directory which either stores file or otherwise file will be downloaded and extracted there. Filename is `bmw.csv`. Returns: Tuple of np.ndarray `x_train` with 6146 rows and 1 columns and dictionary `metadata` of column headers (feature names). """<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='bmw.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/evir/bmw.csv'<line_sep>maybe_download_and_extract(path url save_file_name='bmw.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end>
<import_from_stmt>PyPDF4 PdfFileReader PdfFileWriter<import_from_stmt>PyPDF4.pdf ContentStream<import_from_stmt>PyPDF4.generic TextStringObject NameObject<import_from_stmt>PyPDF4.utils b_<import_stmt>os<import_stmt>argparse<import_from_stmt>io BytesIO<import_from_stmt>typing Tuple<line_sep># Import the reportlab library <import_from_stmt>reportlab.pdfgen canvas<line_sep># The size of the page supposedly A4 <import_from_stmt>reportlab.lib.pagesizes A4<line_sep># The color of the watermark <import_from_stmt>reportlab.lib colors<line_sep>PAGESIZE=A4<line_sep>FONTNAME='Helvetica-Bold'<line_sep>FONTSIZE=40<line_sep># using colors module # COLOR = colors.lightgrey # or simply RGB # COLOR = (190, 190, 190) COLOR=colors.red<line_sep># The position attributes of the watermark X=250<line_sep>Y=10<line_sep># The rotation angle in order to display the watermark diagonally if needed ROTATION_ANGLE=45<def_stmt>get_info input_file:str<block_start>""" Extracting the file info """<line_sep># If PDF is encrypted the file metadata cannot be extracted <with_stmt>open(input_file 'rb')<as>pdf_file<block_start>pdf_reader=PdfFileReader(pdf_file strict=<false>)<line_sep>output={"File":input_file "Encrypted":("True"<if>pdf_reader.isEncrypted<else>"False")}<if_stmt><not>pdf_reader.isEncrypted<block_start>info=pdf_reader.getDocumentInfo()<line_sep>num_pages=pdf_reader.getNumPages()<line_sep>output["Author"]=info.author<line_sep>output["Creator"]=info.creator<line_sep>output["Producer"]=info.producer<line_sep>output["Subject"]=info.subject<line_sep>output["Title"]=info.title<line_sep>output["Number of pages"]=num_pages<block_end><block_end># To Display collected metadata print("## File Information ##################################################")<line_sep>print("\n".join("{}:{}".format(i j)<for>i,j output.items()))<line_sep>print("######################################################################")<line_sep><return><true> output<block_end><def_stmt>get_output_file input_file:str output_file:str<block_start>""" Check whether a temporary output file is needed or not """<line_sep>input_path=os.path.dirname(input_file)<line_sep>input_filename=os.path.basename(input_file)<line_sep># If output file is empty -> generate a temporary output file # If output file is equal to input_file -> generate a temporary output file <if_stmt><not>output_file<or>input_file<eq>output_file<block_start>tmp_file=os.path.join(input_path 'tmp_'+input_filename)<line_sep><return><true> tmp_file<block_end><return><false> output_file<block_end><def_stmt>create_watermark wm_text:str<block_start>""" Creates a watermark template. """<if_stmt>wm_text# Generate the output to a memory buffer <block_start>output_buffer=BytesIO()<line_sep># Default Page Size = A4 c=canvas.Canvas(output_buffer pagesize=PAGESIZE)<line_sep># you can also add image instead of text # c.drawImage("logo.png", X, Y, 160, 160) # Set the size and type of the font c.setFont(FONTNAME FONTSIZE)<line_sep># Set the color <if_stmt>isinstance(COLOR tuple)<block_start>color=(c/255<for>c COLOR)<line_sep>c.setFillColorRGB(*color)<block_end><else_stmt><block_start>c.setFillColor(COLOR)<block_end># Rotate according to the configured parameter c.rotate(ROTATION_ANGLE)<line_sep># Position according to the configured parameter c.drawString(X Y wm_text)<line_sep>c.save()<line_sep><return><true> output_buffer<block_end><return><false> <none><block_end><def_stmt>save_watermark wm_buffer output_file<block_start>""" Saves the generated watermark template to disk """<with_stmt>open(output_file mode='wb')<as>f<block_start>f.write(wm_buffer.getbuffer())<block_end>f.close()<line_sep><return><true><block_end><def_stmt>watermark_pdf input_file:str wm_text:str pages:Tuple=<none><block_start>""" Adds watermark to a pdf file. """<line_sep>result,wm_buffer=create_watermark(wm_text)<if_stmt>result<block_start>wm_reader=PdfFileReader(wm_buffer)<line_sep>pdf_reader=PdfFileReader(open(input_file 'rb') strict=<false>)<line_sep>pdf_writer=PdfFileWriter()<try_stmt><block_start><for_stmt>page range(pdf_reader.getNumPages())# If required to watermark specific pages not all the document pages <block_start><if_stmt>pages<block_start><if_stmt>str(page)<not><in>pages<block_start><continue><block_end><block_end>page=pdf_reader.getPage(page)<line_sep>page.mergePage(wm_reader.getPage(0))<line_sep>pdf_writer.addPage(page)<block_end><block_end><except_stmt>Exception<as>e<block_start>print("Exception = " e)<line_sep><return><false> <none> <none><block_end><return><true> pdf_reader pdf_writer<block_end><block_end><def_stmt>unwatermark_pdf input_file:str wm_text:str pages:Tuple=<none><block_start>""" Removes watermark from the pdf file. """<line_sep>pdf_reader=PdfFileReader(open(input_file 'rb') strict=<false>)<line_sep>pdf_writer=PdfFileWriter()<for_stmt>page range(pdf_reader.getNumPages())# If required for specific pages <block_start><if_stmt>pages<block_start><if_stmt>str(page)<not><in>pages<block_start><continue><block_end><block_end>page=pdf_reader.getPage(page)<line_sep># Get the page content content_object=page["/Contents"].getObject()<line_sep>content=ContentStream(content_object pdf_reader)<line_sep># Loop through all the elements page elements <for_stmt>operands,operator content.operations# Checks the TJ operator and replaces the corresponding string operand (Watermark text) with '' <block_start><if_stmt>operator<eq>b_("Tj")<block_start>text=operands[0]<if_stmt>isinstance(text str)<and>text.startswith(wm_text)<block_start>operands[0]=TextStringObject('')<block_end><block_end><block_end>page.__setitem__(NameObject('/Contents') content)<line_sep>pdf_writer.addPage(page)<block_end><return><true> pdf_reader pdf_writer<block_end><def_stmt>watermark_unwatermark_file **kwargs<block_start>input_file=kwargs.get('input_file')<line_sep>wm_text=kwargs.get('wm_text')<line_sep># watermark -> Watermark # unwatermark -> Unwatermark action=kwargs.get('action')<line_sep># HDD -> Temporary files are saved on the Hard Disk Drive and then deleted # RAM -> Temporary files are saved in memory and then deleted. mode=kwargs.get('mode')<line_sep>pages=kwargs.get('pages')<line_sep>temporary,output_file=get_output_file(input_file kwargs.get('output_file'))<if_stmt>action<eq>"watermark"<block_start>result,pdf_reader,pdf_writer=watermark_pdf(input_file=input_file wm_text=wm_text pages=pages)<block_end><elif_stmt>action<eq>"unwatermark"<block_start>result,pdf_reader,pdf_writer=unwatermark_pdf(input_file=input_file wm_text=wm_text pages=pages)<block_end># Completed successfully <if_stmt>result# Generate to memory <block_start><if_stmt>mode<eq>"RAM"<block_start>output_buffer=BytesIO()<line_sep>pdf_writer.write(output_buffer)<line_sep>pdf_reader.stream.close()<line_sep># No need to create a temporary file in RAM Mode <if_stmt>temporary<block_start>output_file=input_file<block_end><with_stmt>open(output_file mode='wb')<as>f<block_start>f.write(output_buffer.getbuffer())<block_end>f.close()<block_end><elif_stmt>mode<eq>"HDD"# Generate to a new file on the hard disk <block_start><with_stmt>open(output_file 'wb')<as>pdf_output_file<block_start>pdf_writer.write(pdf_output_file)<block_end>pdf_output_file.close()<line_sep>pdf_reader.stream.close()<if_stmt>temporary<block_start><if_stmt>os.path.isfile(input_file)<block_start>os.replace(output_file input_file)<block_end>output_file=input_file<block_end><block_end><block_end><block_end><def_stmt>watermark_unwatermark_folder **kwargs<block_start>""" Watermarks all PDF Files within a specified path Unwatermarks all PDF Files within a specified path """<line_sep>input_folder=kwargs.get('input_folder')<line_sep>wm_text=kwargs.get('wm_text')<line_sep># Run in recursive mode recursive=kwargs.get('recursive')<line_sep># watermark -> Watermark # unwatermark -> Unwatermark action=kwargs.get('action')<line_sep># HDD -> Temporary files are saved on the Hard Disk Drive and then deleted # RAM -> Temporary files are saved in memory and then deleted. mode=kwargs.get('mode')<line_sep>pages=kwargs.get('pages')<line_sep># Loop though the files within the input folder. <for_stmt>foldername,dirs,filenames os.walk(input_folder)<block_start><for_stmt>filename filenames# Check if pdf file <block_start><if_stmt><not>filename.endswith('.pdf')<block_start><continue><block_end># PDF File found inp_pdf_file=os.path.join(foldername filename)<line_sep>print("Processing file:" inp_pdf_file)<line_sep>watermark_unwatermark_file(input_file=inp_pdf_file output_file=<none> wm_text=wm_text action=action mode=mode pages=pages)<block_end><if_stmt><not>recursive<block_start><break><block_end><block_end><block_end><def_stmt>is_valid_path path<block_start>""" Validates the path inputted and checks whether it is a file path or a folder path """<if_stmt><not>path<block_start><raise>ValueError(f"Invalid Path")<block_end><if_stmt>os.path.isfile(path)<block_start><return>path<block_end><elif_stmt>os.path.isdir(path)<block_start><return>path<block_end><else_stmt><block_start><raise>ValueError(f"Invalid Path {path}")<block_end><block_end><def_stmt>parse_args <block_start>""" Get user command line parameters """<line_sep>parser=argparse.ArgumentParser(description="Available Options")<line_sep>parser.add_argument('-i' '--input_path' dest='input_path' type=is_valid_path required=<true> help="Enter the path of the file or the folder to process")<line_sep>parser.add_argument('-a' '--action' dest='action' choices=['watermark' 'unwatermark'] type=str default='watermark' help="Choose whether to watermark or to unwatermark")<line_sep>parser.add_argument('-m' '--mode' dest='mode' choices=['RAM' 'HDD'] type=str default='RAM' help="Choose whether to process on the hard disk drive or in memory")<line_sep>parser.add_argument('-w' '--watermark_text' dest='watermark_text' type=str required=<true> help="Enter a valid watermark text")<line_sep>parser.add_argument('-p' '--pages' dest='pages' type=tuple help="Enter the pages to consider e.g.: [2,4]")<line_sep>path=parser.parse_known_args()[0].input_path<if_stmt>os.path.isfile(path)<block_start>parser.add_argument('-o' '--output_file' dest='output_file' type=str help="Enter a valid output file")<block_end><if_stmt>os.path.isdir(path)<block_start>parser.add_argument('-r' '--recursive' dest='recursive' default=<false> type=<lambda>x:(str(x).lower()<in>['true' '1' 'yes']) help="Process Recursively or Non-Recursively")<block_end># To Porse The Command Line Arguments args=vars(parser.parse_args())<line_sep># To Display The Command Line Arguments print("## Command Arguments #################################################")<line_sep>print("\n".join("{}:{}".format(i j)<for>i,j args.items()))<line_sep>print("######################################################################")<line_sep><return>args<block_end><if_stmt>__name__<eq>'__main__'# Parsing command line arguments entered by user <block_start>args=parse_args()<line_sep># If File Path <if_stmt>os.path.isfile(args['input_path'])# Extracting File Info <block_start>get_info(input_file=args['input_path'])<line_sep># Encrypting or Decrypting a File watermark_unwatermark_file(input_file=args['input_path'] wm_text=args['watermark_text'] action=args['action'] mode=args['mode'] output_file=args['output_file'] pages=args['pages'])<block_end># If Folder Path <elif_stmt>os.path.isdir(args['input_path'])# Encrypting or Decrypting a Folder <block_start>watermark_unwatermark_folder(input_folder=args['input_path'] wm_text=args['watermark_text'] action=args['action'] mode=args['mode'] recursive=args['recursive'] pages=args['pages'])<block_end><block_end>
<import_stmt>torch.utils.data<as>data<class_stmt>BaseDataset(data.Dataset)<block_start><def_stmt>__init__ self<block_start>super(BaseDataset self).__init__()<block_end><def_stmt>name self<block_start><return>'BaseDataset'<block_end>@staticmethod<def_stmt>modify_commandline_options parser is_train<block_start><return>parser<block_end><def_stmt>initialize self opt<block_start><pass><block_end><def_stmt>__len__ self<block_start><return>0<block_end><block_end>
<import_stmt>gym<import_stmt>numpy<as>np<import_from_stmt>.base BaseEnv<class_stmt>_Gym(BaseEnv)<block_start>"""Gym environment. Args: name (str): name of environment in Gym. render (bool): parameter that determine whether to render. custom_action (bool): parameter that determine whether to use custom action. """<def_stmt>__init__ self name render=<false> custom_action=<false> **kwargs <block_start>self.env=gym.make(name)<line_sep>self.state_size=self.env.observation_space.shape[0]<if_stmt><not>custom_action<block_start>self.action_size=(self.env.action_space.shape[0]<if>self.action_type<eq>"continuous"<else>self.env.action_space.n)<block_end>self.render=render<block_end><def_stmt>reset self<block_start>self.score=0<line_sep>state=self.env.reset()<line_sep>state=np.expand_dims(state 0)# for (1, state_size) <return>state<block_end><def_stmt>step self action<block_start><if_stmt>self.render<block_start>self.env.render()<block_end><if_stmt>self.action_type<eq>"continuous"<block_start>action=((action+1.0)/2.0)<times>(self.env.action_space.high-self.env.action_space.low)+self.env.action_space.low<line_sep>action=np.reshape(action self.env.action_space.shape)<block_end><else_stmt><block_start>action=action.item()<block_end>next_state,reward,done,info=self.env.step(action)<line_sep>self.score<augadd>reward<line_sep>next_state,reward,done=map(<lambda>x:np.expand_dims(x 0) [next_state [reward] [done]])<line_sep># for (1, ?) <return>(next_state reward done)<block_end><def_stmt>close self<block_start>self.env.close()<block_end><block_end><class_stmt>Cartpole(_Gym)<block_start><def_stmt>__init__ self action_type="discrete" **kwargs<block_start>self.action_type=action_type<if_stmt>action_type<eq>"continuous"<block_start>super(Cartpole self).__init__("CartPole-v1" custom_action=<true> **kwargs)<line_sep>self.action_size=1<block_end><else_stmt><block_start>super(Cartpole self).__init__("CartPole-v1" **kwargs)<block_end><block_end><def_stmt>step self action<block_start><if_stmt>self.render<block_start>self.env.render()<block_end>action=action.item()<if_stmt>self.action_type<eq>"continuous"<block_start>action=0<if>action<l>0<else>1<block_end>next_state,reward,done,info=self.env.step(action)<line_sep>self.score<augadd>reward<line_sep>reward=-1<if>done<else>0.1<line_sep>next_state,reward,done=map(<lambda>x:np.expand_dims(x 0) [next_state [reward] [done]])<line_sep># for (1, ?) <return>(next_state reward done)<block_end><block_end><class_stmt>Pendulum(_Gym)<block_start><def_stmt>__init__ self **kwargs<block_start>self.action_type="continuous"<line_sep>super(Pendulum self).__init__("Pendulum-v1" **kwargs)<block_end><block_end><class_stmt>MountainCar(_Gym)<block_start><def_stmt>__init__ self **kwargs<block_start>self.action_type="discrete"<line_sep>super(MountainCar self).__init__("MountainCar-v0" **kwargs)<block_end><block_end>
<import_from_stmt>models *<import_from_stmt>django.contrib admin<line_sep>admin.site.register(Profile)<line_sep>admin.site.register(EmailVerify)<line_sep>
<import_stmt>json<import_stmt>github<import_from_stmt>celery task<import_from_stmt>django.db transaction<import_from_stmt>django.conf settings<import_from_stmt>ide.models.user User<import_from_stmt>ide.models.project Project<import_from_stmt>ide.utils.sdk load_manifest_dict<import_from_stmt>ide.models.files SourceFile ResourceFile ResourceIdentifier ResourceVariant<import_from_stmt>ide.utils.project APPINFO_MANIFEST PACKAGE_MANIFEST<import_from_stmt>ide.utils generate_half_uuid<import_from_stmt>utils.td_helper send_td_event<import_from_stmt>collections defaultdict<import_stmt>urllib2<line_sep>@task(acks_late=<true>)<def_stmt>import_gist user_id gist_id<block_start>user=User.objects.get(pk=user_id)<line_sep>g=github.Github()<try_stmt><block_start>gist=g.get_gist(gist_id)<block_end><except_stmt>github.UnknownObjectException<block_start>send_td_event('cloudpebble_gist_not_found' data={'data':{'gist_id':gist_id}} user=user)<line_sep><raise>Exception("Couldn't find gist to import.")<block_end>files=gist.files<line_sep>default_name=gist.description<or>'Sample project'<line_sep>default_settings={'name':default_name 'app_short_name':default_name 'app_long_name':default_name 'app_company_name':user.username 'app_version_label':'1.0' 'app_is_watchface':<false> 'app_is_hidden':<false> 'app_is_shown_on_communication':<false> 'app_capabilities':'[]' 'app_keys':'{}' 'project_type':'native' 'app_modern_multi_js':<false> 'sdk_version':'2'}<if_stmt>len(files)<eq>1<or>((APPINFO_MANIFEST<in>files<or>PACKAGE_MANIFEST<in>files)<and>len(files)<eq>2)<block_start><if_stmt>'simply.js'<in>files<block_start>default_settings['project_type']='simplyjs'<block_end><elif_stmt>'app.js'<in>files<block_start>default_settings['project_type']='pebblejs'<block_end><elif_stmt>'index.js'<in>files<block_start>default_settings['project_type']='rocky'<block_end><block_end># If all files are .js or .json and there is an index.js, assume it's a rocky project. <if_stmt>all(x.endswith(('.js' '.json'))<for>x gist.files)<and>'index.js'<in>files<block_start>default_settings['project_type']='rocky'<line_sep>default_settings['sdk_version']='3'<line_sep>default_settings['app_modern_multi_js']=<true><block_end>media=[]<line_sep># Using defaultdict we can load project settings from a manifest dict which # has values that default to None. This way, we can delegate <if_stmt>PACKAGE_MANIFEST<in>files<block_start>content=json.loads(files[PACKAGE_MANIFEST].content)<line_sep>package=defaultdict(<lambda>:<none>)<line_sep>package.update(content)<line_sep>package['pebble']=defaultdict(<lambda>:<none>)<line_sep>package['pebble'].update(content.get('pebble' {}))<line_sep>manifest_settings,media,dependencies=load_manifest_dict(package PACKAGE_MANIFEST default_project_type=<none>)<line_sep>default_settings['app_keys']='[]'<line_sep>default_settings['sdk_version']='3'<line_sep>default_settings['app_modern_multi_js']=<true><block_end><elif_stmt>APPINFO_MANIFEST<in>files<block_start>content=json.loads(files[APPINFO_MANIFEST].content)<line_sep>package=defaultdict(<lambda>:<none>)<line_sep>package.update(content)<line_sep>manifest_settings,media,dependencies=load_manifest_dict(package APPINFO_MANIFEST default_project_type=<none>)<block_end><else_stmt><block_start>manifest_settings={}<line_sep>dependencies={}<block_end>fixed_settings={'owner':user 'app_uuid':generate_half_uuid()}<line_sep>project_settings={}<line_sep>project_settings.update(default_settings)<line_sep>project_settings.update({k:v<for>k,v manifest_settings.iteritems()<if>v<is><not><none>})<line_sep>project_settings.update(fixed_settings)<with_stmt>transaction.atomic()<block_start>project=Project.objects.create(**project_settings)<line_sep>project.set_dependencies(dependencies)<line_sep>project_type=project.project_type<if_stmt>project_type<eq>'package'<block_start><raise>Exception("Gist imports are not yet support for packages.")<block_end><if_stmt>project_type<ne>'simplyjs'<block_start><for_stmt>filename gist.files<block_start>target='app'<if_stmt><not>filename.endswith(('.c' '.h' '.js' '.json'))<block_start><continue><block_end><if_stmt>filename<in>('appinfo.json' 'package.json')<block_start><continue><block_end><if_stmt>project_type<eq>'native'<block_start><if_stmt>filename.endswith(('.js' '.json'))<block_start>target='pkjs'<block_end><block_end><elif_stmt>project_type<eq>'rocky'<block_start><if_stmt>filename<eq>'app.js'<block_start>target='pkjs'<block_end><block_end>source_file=SourceFile.objects.create(project=project file_name=filename target=target)<line_sep>source_file.save_text(gist.files[filename].content)<block_end>resources={}<for_stmt>resource media<block_start>kind=resource['type']<line_sep>def_name=resource['name']<line_sep>filename=resource['file']<line_sep>regex=resource.get('characterRegex' <none>)<line_sep>tracking=resource.get('trackingAdjust' <none>)<line_sep>memory_format=resource.get('memoryFormat' <none>)<line_sep>storage_format=resource.get('storageFormat' <none>)<line_sep>space_optimisation=resource.get('spaceOptimization' <none>)<line_sep>is_menu_icon=resource.get('menuIcon' <false>)<line_sep>compatibility=resource.get('compatibility' <none>)<if_stmt>filename<not><in>gist.files<block_start><continue><block_end><if_stmt>filename<not><in>resources<block_start>resources[filename]=ResourceFile.objects.create(project=project file_name=filename kind=kind is_menu_icon=is_menu_icon)<line_sep># We already have this as a unicode string in .content, but it shouldn't have become unicode # in the first place. default_variant=ResourceVariant.objects.create(resource_file=resources[filename] tags=ResourceVariant.TAGS_DEFAULT)<line_sep>default_variant.save_file(urllib2.urlopen(gist.files[filename].raw_url))<block_end>ResourceIdentifier.objects.create(resource_file=resources[filename] resource_id=def_name character_regex=regex tracking=tracking compatibility=compatibility memory_format=memory_format storage_format=storage_format space_optimisation=space_optimisation)<block_end><block_end><else_stmt><block_start>source_file=SourceFile.objects.create(project=project file_name='app.js')<line_sep>source_file.save_text(gist.files['simply.js'].content)<block_end><block_end>send_td_event('cloudpebble_gist_import' data={'data':{'gist_id':gist_id}} project=project)<line_sep><return>project.id<block_end>
<import_from_stmt>src.platform.weblogic.interfaces WINTERFACES<import_stmt>src.platform.weblogic.deployers.web_deploy<as>web_deploy<line_sep>versions=["10" "11" "12"]<line_sep>title=WINTERFACES.WLS<def_stmt>deploy fingerengine fingerprint<block_start><return>web_deploy.deploy(fingerengine fingerprint)<block_end>
""" CIFAR-10 classification dataset. """<import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>chainer.dataset DatasetMixin<import_from_stmt>chainer.datasets.cifar get_cifar10<import_from_stmt>chainercv.transforms random_crop<import_from_stmt>chainercv.transforms random_flip<import_from_stmt>.dataset_metainfo DatasetMetaInfo<class_stmt>CIFAR10(DatasetMixin)<block_start>""" CIFAR-10 image classification dataset. Parameters: ---------- root : str, default '~/.chainer/datasets/cifar10' Path to temp folder for storing data. mode : str, default 'train' 'train', 'val', or 'test'. transform : function, default None A function that takes data and label and transforms them. """<def_stmt>__init__ self root=os.path.join("~" ".chainer" "datasets" "cifar10") mode="train" transform=<none><block_start><assert_stmt>(root<is><not><none>)<line_sep>self.transform=transform<line_sep>train_ds,test_ds=get_cifar10()<line_sep>self.base=train_ds<if>mode<eq>"train"<else>test_ds<block_end><def_stmt>__len__ self<block_start><return>len(self.base)<block_end><def_stmt>get_example self i<block_start>image,label=self.base[i]<line_sep>image=self.transform(image)<line_sep><return>image label<block_end><block_end><class_stmt>CIFAR10MetaInfo(DatasetMetaInfo)<block_start><def_stmt>__init__ self<block_start>super(CIFAR10MetaInfo self).__init__()<line_sep>self.label="CIFAR10"<line_sep>self.short_label="cifar"<line_sep>self.root_dir_name="cifar10"<line_sep>self.dataset_class=CIFAR10<line_sep>self.num_training_samples=50000<line_sep>self.in_channels=3<line_sep>self.num_classes=10<line_sep>self.input_image_size=(32 32)<line_sep>self.train_metric_capts=["Train.Err"]<line_sep>self.train_metric_names=["Top1Error"]<line_sep>self.train_metric_extra_kwargs=[{"name":"err"}]<line_sep>self.val_metric_capts=["Val.Err"]<line_sep>self.val_metric_names=["Top1Error"]<line_sep>self.val_metric_extra_kwargs=[{"name":"err"}]<line_sep>self.saver_acc_ind=0<line_sep>self.train_transform=CIFARTrainTransform<line_sep>self.val_transform=CIFARValTransform<line_sep>self.test_transform=CIFARValTransform<line_sep>self.ml_type="imgcls"<block_end><block_end><class_stmt>CIFARTrainTransform(object)<block_start>""" CIFAR-10 training transform. """<def_stmt>__init__ self ds_metainfo mean_rgb=(0.4914 0.4822 0.4465) std_rgb=(0.2023 0.1994 0.2010)<block_start><assert_stmt>(ds_metainfo<is><not><none>)<line_sep>self.mean=np.array(mean_rgb np.float32)[: np.newaxis np.newaxis]<line_sep>self.std=np.array(std_rgb np.float32)[: np.newaxis np.newaxis]<block_end><def_stmt>__call__ self img<block_start>img=random_crop(img=img size=self.resize_value)<line_sep>img=random_flip(img=img x_random=<true>)<line_sep>img<augsub>self.mean<line_sep>img<augdiv>self.std<line_sep><return>img<block_end><block_end><class_stmt>CIFARValTransform(object)<block_start>""" CIFAR-10 validation transform. """<def_stmt>__init__ self ds_metainfo mean_rgb=(0.4914 0.4822 0.4465) std_rgb=(0.2023 0.1994 0.2010)<block_start><assert_stmt>(ds_metainfo<is><not><none>)<line_sep>self.mean=np.array(mean_rgb np.float32)[: np.newaxis np.newaxis]<line_sep>self.std=np.array(std_rgb np.float32)[: np.newaxis np.newaxis]<block_end><def_stmt>__call__ self img<block_start>img<augsub>self.mean<line_sep>img<augdiv>self.std<line_sep><return>img<block_end><block_end>
# Program 19d: Generalized synchronization. # See Figure 19.8(a). <import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>scipy.integrate odeint<line_sep># Constants mu=5.7<line_sep>sigma=16<line_sep>b=4<line_sep>r=45.92<line_sep>g=8# When g=4, there is no synchronization. tmax=100<line_sep>t=np.arange(0.0 tmax 0.1)<def_stmt>rossler_lorenz_odes X t<block_start>x1,x2,x3,y1,y2,y3,z1,z2,z3=X<line_sep>dx1=-(x2+x3)<line_sep>dx2=x1+0.2<times>x2<line_sep>dx3=0.2+x3<times>(x1-mu)<line_sep>dy1=sigma<times>(y2-y1)-g<times>(y1-x1)<line_sep>dy2=-y1<times>y3+r<times>y1-y2<line_sep>dy3=y1<times>y2-b<times>y3<line_sep>dz1=sigma<times>(z2-z1)-g<times>(z1-x1)<line_sep>dz2=-z1<times>z3+r<times>z1-z2<line_sep>dz3=z1<times>z2-b<times>z3<line_sep><return>(dx1 dx2 dx3 dy1 dy2 dy3 dz1 dz2 dz3)<block_end>y0=[2 -10 44 30 10 20 31 11 22]<line_sep>X=odeint(rossler_lorenz_odes y0 t rtol=1e-6)<line_sep>x1,x2,x3,y1,y2,y3,x1,z2,z3=X.T# unpack columns plt.figure(1)<line_sep># Delete first 500 iterates. plt.plot(y2[500:len(y2)] z2[500:len(z2)])<line_sep>plt.xlabel(r'$y_2$' fontsize=15)<line_sep>plt.ylabel(r'$z_2$' fontsize=15)<line_sep>plt.show()<line_sep>
# Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 <import_from_stmt>extensions.ops.embedding_bag EmbeddingBagOffsetsSum EmbeddingBagPackedSum<import_from_stmt>extensions.ops.rank Rank<import_from_stmt>mo.front.common.partial_infer.utils int64_array<import_from_stmt>mo.front.common.replacement FrontReplacementPattern<import_from_stmt>mo.front.tf.graph_utils create_op_with_const_inputs<import_from_stmt>mo.graph.graph Graph rename_node<import_from_stmt>mo.ops.broadcast Broadcast<import_from_stmt>mo.ops.concat Concat<import_from_stmt>mo.ops.shape Shape<import_from_stmt>mo.ops.unsqueeze Unsqueeze<import_from_stmt>mo.utils.shape node_to_get_shape_value_of_indices get_canonical_axis_index_node get_shape_values_by_indices_node<class_stmt>AtenToEmbeddingBag(FrontReplacementPattern)<block_start>""" Converts the ATen layer to EmbeddingBag layer. """<line_sep>enabled=<true><def_stmt>find_and_replace_pattern self graph:Graph<block_start><for_stmt>node graph.get_op_nodes(op='ATen' operator='embedding_bag')<block_start><assert_stmt>node.soft_get('mode')<eq>0 'ATen::embedding_bag has unsupported mode, only "sum" '<concat>'mode is supported for node {}.'.format(node.id)<line_sep>node_name=node.soft_get('name' node.id)<line_sep>rename_node(node node_name+'/TBR')<line_sep>is_packed=<false><if_stmt>len(node.in_ports())<l>3<or>node.in_port(2).disconnected()<block_start>is_packed=<true><line_sep>embedding_bag=EmbeddingBagPackedSum(graph {'name':node_name}).create_node()<block_end><else_stmt><block_start>embedding_bag=EmbeddingBagOffsetsSum(graph {'name':node_name}).create_node()<line_sep>node.in_port(2).get_connection().set_destination(embedding_bag.in_port(2))<block_end>rename_node(embedding_bag node_name)<line_sep>node.in_port(0).get_connection().set_destination(embedding_bag.in_port(0))<line_sep>node.in_port(1).get_connection().set_destination(embedding_bag.in_port(1))<line_sep>node.out_port(0).get_connection().set_source(embedding_bag.out_port(0))<if_stmt>len(node.in_ports())<eq>4<and><not>node.in_port(3).disconnected()<block_start><if_stmt>is_packed<block_start>node.in_port(3).get_connection().set_destination(embedding_bag.in_port(2))<block_end><else_stmt># connect per_sample_weights <block_start>node.in_port(3).get_connection().set_destination(embedding_bag.in_port(4))<line_sep>weights_shape_node=Shape(graph {'name':node_name+'/WeightsShape'}).create_node()<line_sep>weights_rank_node=Rank(graph {'name':node_name+'/WeightsRank'}).create_node()<line_sep>last_dim_node=get_canonical_axis_index_node(weights_rank_node -1)<line_sep>weights_last_dim=get_shape_values_by_indices_node(weights_shape_node last_dim_node)<line_sep>weights_first_dim=node_to_get_shape_value_of_indices(weights_shape_node [0])<line_sep>zero_col_node=create_op_with_const_inputs(graph Broadcast {0:int64_array([0])} {'name':node_name+'/Broadcast'})<line_sep>zero_col_node.in_port(1).connect(weights_last_dim.out_port(0))<line_sep>default_embeddings_node=create_op_with_const_inputs(graph Unsqueeze {1:int64_array(0)} {'name':node_name+'/Unsqueeze'})<line_sep>default_embeddings_node.in_port(0).connect(zero_col_node.out_port(0))<line_sep># expand embedding table with zeros weights_concat=Concat(graph {'axis':0 'in_ports_count':2 'name':node_name+'/Concat'}).create_node()<line_sep>embedding_bag.in_port(0).get_connection().set_destination(weights_concat.in_port(0))<line_sep>weights_concat.in_port(0).get_connection().add_destination(weights_shape_node.in_port(0))<line_sep>weights_concat.in_port(0).get_connection().add_destination(weights_rank_node.in_port(0))<line_sep>weights_concat.in_port(1).connect(default_embeddings_node.out_port(0))<line_sep>weights_concat.out_port(0).connect(embedding_bag.in_port(0))<line_sep># point default index to expanded part of embedding table weights_first_dim.out_port(0).connect(embedding_bag.in_port(3))<block_end><block_end><block_end><block_end><block_end>
<import_from_stmt>sys version_info<if_stmt>version_info[0]<eq>2<block_start><import_from_stmt>sys maxint<block_end><else_stmt><block_start><import_from_stmt>sys maxsize<as>maxint<block_end><import_from_stmt>itertools chain<import_from_stmt>.iters map range<class_stmt>Stream(object)<block_start>__slots__=("_last" "_collection" "_origin")<class_stmt>_StreamIterator(object)<block_start>__slots__=("_stream" "_position")<def_stmt>__init__ self stream<block_start>self._stream=stream<line_sep>self._position=-1<block_end># not started yet <def_stmt>__next__ self# check if elements are available for next position # return next element or raise StopIteration <block_start>self._position<augadd>1<if_stmt>(len(self._stream._collection)<g>self._position<or>self._stream._fill_to(self._position))<block_start><return>self._stream._collection[self._position]<block_end><raise>StopIteration()<block_end><if_stmt>version_info[0]<eq>2<block_start>next=__next__<block_end><block_end><def_stmt>__init__ self *origin<block_start>self._collection=[]<line_sep>self._last=-1# not started yet self._origin=iter(origin)<if>origin<else>[]<block_end><def_stmt>__lshift__ self rvalue<block_start>iterator=rvalue()<if>callable(rvalue)<else>rvalue<line_sep>self._origin=chain(self._origin iterator)<line_sep><return>self<block_end><def_stmt>cursor self<block_start>"""Return position of next evaluated element"""<line_sep><return>self._last+1<block_end><def_stmt>_fill_to self index<block_start><if_stmt>self._last<ge>index<block_start><return><true><block_end><while_stmt>self._last<l>index<block_start><try_stmt><block_start>n=next(self._origin)<block_end><except_stmt>StopIteration<block_start><return><false><block_end>self._last<augadd>1<line_sep>self._collection.append(n)<block_end><return><true><block_end><def_stmt>__iter__ self<block_start><return>self._StreamIterator(self)<block_end><def_stmt>__getitem__ self index<block_start><if_stmt>isinstance(index int)# todo: i'm not sure what to do with negative indices <block_start><if_stmt>index<l>0<block_start><raise>TypeError("Invalid argument type")<block_end>self._fill_to(index)<block_end><elif_stmt>isinstance(index slice)<block_start>low,high,step=index.indices(maxint)<if_stmt>step<eq>0<block_start><raise>ValueError("Step must not be 0")<block_end><return>self.__class__()<lshift>map(self.__getitem__ range(low high step<or>1))<block_end><else_stmt><block_start><raise>TypeError("Invalid argument type")<block_end><return>self._collection.__getitem__(index)<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>allennlp.common.util ensure_list<import_from_stmt>allennlp.data.tokenizers PretrainedTransformerTokenizer<import_from_stmt>allennlp.data.token_indexers PretrainedTransformerIndexer<import_from_stmt>allennlp_models.classification BoolQDatasetReader<import_from_stmt>tests FIXTURES_ROOT<class_stmt>TestBoolqReader<block_start>boolq_path=FIXTURES_ROOT/"classification"/"boolq.jsonl"<def_stmt>test_boolq_dataset_reader_default_setting self<block_start>reader=BoolQDatasetReader()<line_sep>instances=reader.read(self.boolq_path)<line_sep>instances=ensure_list(instances)<assert_stmt>len(instances)<eq>5<line_sep>fields=instances[0].fields<assert_stmt>[t.text<for>t fields["tokens"].tokens][:5]<eq>["Persian" "language" "--" "Persian" "(/ˈpɜːrʒən," ]<assert_stmt>fields["label"].label<eq>1<line_sep>fields=instances[1].fields<assert_stmt>[t.text<for>t fields["tokens"].tokens][:5]<eq>["Epsom" "railway" "station" "--" "Epsom" ]<assert_stmt>fields["label"].label<eq>0<block_end><def_stmt>test_boolq_dataset_reader_roberta_setting self<block_start>reader=BoolQDatasetReader(tokenizer=PretrainedTransformerTokenizer("roberta-base" add_special_tokens=<false>) token_indexers={"tokens":PretrainedTransformerIndexer("roberta-base")} )<line_sep>instances=reader.read(self.boolq_path)<line_sep>instances=ensure_list(instances)<assert_stmt>len(instances)<eq>5<line_sep>fields=instances[0].fields<assert_stmt>[t.text<for>t fields["tokens"].tokens][:5]<eq>["<s>" "Pers" "ian" "Ġlanguage" "Ġ--" ]<assert_stmt>[t.text<for>t fields["tokens"].tokens][-5:]<eq>["Ġspeak" "Ġthe" "Ġsame" "Ġlanguage" "</s>" ]<assert_stmt>fields["label"].label<eq>1<line_sep>fields=instances[1].fields<assert_stmt>[t.text<for>t fields["tokens"].tokens][:5]<eq>["<s>" "E" "ps" "om" "Ġrailway" ]<assert_stmt>[t.text<for>t fields["tokens"].tokens][-5:]<eq>["Ġe" "ps" "om" "Ġstation" "</s>" ]<assert_stmt>fields["label"].label<eq>0<block_end><block_end>
<import_stmt>os<import_stmt>django<import_from_stmt>django.db connection<import_from_stmt>django.template Context<import_from_stmt>django.template Template<import_from_stmt>django.urls path<line_sep>BASE_DIR=os.path.dirname(os.path.abspath(__file__))<line_sep>DEBUG=<false><line_sep>ROOT_URLCONF=__name__<line_sep>DATABASES={"default":{"ENGINE":"django.db.backends.sqlite3" "NAME":":memory:" }}<line_sep>TEMPLATES=[{"BACKEND":"django.template.backends.django.DjangoTemplates" "DIRS":[BASE_DIR ] }]<line_sep>SECRET_KEY=("SECRET" )<line_sep>MIDDLEWARE=["app.empty_middleware" "app.empty_middleware"]<line_sep>ALLOWED_HOSTS=["*"]<line_sep>SETTINGS=dict((key val)<for>key,val locals().items()<if>key.isupper())<def_stmt>empty_middleware get_response<block_start><def_stmt>middleware request<block_start>response=get_response(request)<line_sep><return>response<block_end><return>middleware<block_end><def_stmt>index request# render a large table template <block_start>template=Template(("<table>\n"<concat>"{% for row in table %}\n"<concat>"<tr>{% for col in row %}<td>{{ col|escape }}</td>{% endfor %}</tr>\n"<concat>"{% endfor %}\n"<concat>"</table>"))<line_sep>table=[range(10)<for>_ range(100)]<line_sep>context=Context({"table":table})<line_sep>template.render(context)<line_sep># query db for random data <for_stmt>_ range(10)<block_start><with_stmt>connection.cursor()<as>cursor<block_start>cursor.execute("""with recursive cnt( id, x) as ( values(1 , random()) union all select id+1,random() from cnt where id<100) select * from cnt""")<line_sep>cursor.fetchall()<block_end><block_end>index=Template(""" <html lang="en"> <head> <meta charset="utf-8"> <title>Django Simple</title> </head> <body> <p>Hello {{name|default:"friend"}}!</p> </body> </html> """)<line_sep><return>django.http.HttpResponse(index.render(Context({})))<block_end>urlpatterns=[path("" index)]<if_stmt>__name__<eq>"__main__"<block_start><import_from_stmt>django.core management<line_sep>management.execute_from_command_line()<block_end>
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # <def_stmt>f_gold a b c<block_start><if_stmt>(a+b<le>c)<or>(a+c<le>b)<or>(b+c<le>a)<block_start><return><false><block_end><else_stmt><block_start><return><true><block_end><block_end>#TOFILL <if_stmt>__name__<eq>'__main__'<block_start>param=[(29 19 52 ) (83 34 49 ) (48 14 65 ) (59 12 94 ) (56 39 22 ) (68 85 9 ) (63 36 41 ) (95 34 37 ) (2 90 27 ) (11 16 1 )]<line_sep>n_success=0<for_stmt>i,parameters_set enumerate(param)<block_start><if_stmt>f_filled(*parameters_set)<eq>f_gold(*parameters_set)<block_start>n_success<augadd>1<block_end><block_end>print("#Results: %i, %i"%(n_success len(param)))<block_end>
<import_stmt>mock<import_stmt>pytest<import_stmt>pwny<def_stmt>test_default_arch_x86 <block_start><with_stmt>mock.patch('platform.machine')<as>platform_mock<block_start>platform_mock.return_value='i386'<assert_stmt>pwny.Target().arch<is>pwny.Target.Arch.x86<block_end><block_end><def_stmt>test_default_arch_x86_64 <block_start><with_stmt>mock.patch('platform.machine')<as>platform_mock<block_start>platform_mock.return_value='x86_64'<assert_stmt>pwny.Target().arch<is>pwny.Target.Arch.x86<block_end><block_end><def_stmt>test_default_arch_unknown <block_start><with_stmt>mock.patch('platform.machine')<as>platform_mock<block_start>platform_mock.return_value='unknown'<assert_stmt>pwny.Target().arch<is>pwny.Target.Arch.unknown<block_end><block_end><def_stmt>test_default_arch_32bit <block_start><with_stmt>mock.patch('platform.architecture')<as>platform_mock<block_start>platform_mock.return_value=('32bit' )<assert_stmt>pwny.Target().bits<is>pwny.Target.Bits.bits_32<block_end><block_end><def_stmt>test_default_arch_64bit <block_start><with_stmt>mock.patch('platform.architecture')<as>platform_mock<block_start>platform_mock.return_value=('64bit' )<assert_stmt>pwny.Target().bits<is>pwny.Target.Bits.bits_64<block_end><block_end><def_stmt>test_set_arch <block_start><with_stmt>mock.patch('platform.architecture')<as>platform_mock<block_start>platform_mock.return_value=('64bit' )<line_sep>target=pwny.Target(arch=pwny.Target.Arch.x86)<assert_stmt>target.arch<is>pwny.Target.Arch.x86<block_end><block_end><def_stmt>test_default_endian <block_start><assert_stmt>pwny.Target().endian<is>pwny.Target.Endian.little<block_end><def_stmt>test_set_endian <block_start>target=pwny.Target(arch=pwny.Target.Arch.unknown endian=pwny.Target.Endian.big)<assert_stmt>target.endian<is>pwny.Target.Endian.big<block_end><def_stmt>test_default_bits_x86 <block_start>target=pwny.Target(arch=pwny.Target.Arch.x86)<assert_stmt>target.bits<eq>32<block_end>@pytest.mark.xfail(raises=NotImplementedError)<def_stmt>test_default_bits_unsupported <block_start>target=pwny.Target(arch=pwny.Target.Arch.unknown)<line_sep>_=target.bits<block_end><def_stmt>test_set__bits <block_start>target=pwny.Target(arch=pwny.Target.Arch.x86 bits=64)<assert_stmt>target.bits<eq>64<block_end>@pytest.mark.xfail(raises=ValueError)<def_stmt>test_set_invalid_bits <block_start>pwny.Target(bits=33)<block_end><def_stmt>test_target_assume <block_start>target=pwny.Target()<line_sep>target.assume(pwny.Target(arch=pwny.Target.Arch.arm endian=pwny.Target.Endian.little bits=64 mode=2))<assert_stmt>target.arch<is>pwny.Target.Arch.arm<and>target.endian<eq>pwny.Target.Endian.little<and>target.bits<eq>64<and>target.mode<eq>2<block_end>