content
stringlengths 0
1.55M
|
---|
<import_from_stmt>.modules modules<line_sep>
|
# -*- coding: utf-8 -*-
"""A set of utility functions to support outlier detection.
"""<line_sep># Author: <NAME> <<EMAIL>>
# License: BSD 2 clause
<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>os<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>numpy percentile<import_stmt>numbers<import_stmt>sklearn<import_from_stmt>sklearn.metrics precision_score<import_from_stmt>sklearn.preprocessing StandardScaler<import_from_stmt>sklearn.utils column_or_1d<import_from_stmt>sklearn.utils check_array<import_from_stmt>sklearn.utils check_consistent_length<import_from_stmt>sklearn.utils check_random_state<import_from_stmt>sklearn.utils.random sample_without_replacement<line_sep>MAX_INT=np.iinfo(np.int32).max<line_sep>MIN_INT=-1<times>MAX_INT<def_stmt>make_dirs_if_not_exists save_dir# make saving directory if needed
<block_start><if_stmt><not>os.path.isdir(save_dir)<block_start>os.makedirs(save_dir)<block_end><block_end><def_stmt>read_csv_to_df file_loc header_lower=<true> usecols=<none> dtype=<none> low_memory=<true> encoding=<none><block_start>"""Read in csv files with necessary processing
Parameters
----------
file_loc
header_lower
low_memory
Returns
-------
"""<if_stmt>dtype<ne><none><block_start>df=pd.read_csv(file_loc usecols=usecols dtype=dtype low_memory=low_memory encoding=encoding)<block_end><else_stmt><block_start>df=pd.read_csv(file_loc usecols=usecols low_memory=low_memory encoding=encoding)<block_end><if_stmt>header_lower<block_start>df.columns=df.columns.str.lower()<block_end><return>df<block_end><def_stmt>read_excel_to_df file_loc header_lower=<true> usecols=<none> dtype=<none> low_memory=<true> encoding=<none><block_start>"""Read in excel files with necessary processing
Parameters
----------
file_loc
header_lower
low_memory
Returns
-------
"""<if_stmt>dtype<ne><none><block_start>df=pd.read_excel(file_loc usecols=usecols dtype=dtype low_memory=low_memory encoding=encoding)<block_end><else_stmt><block_start>df=pd.read_excel(file_loc usecols=usecols low_memory=low_memory encoding=encoding)<block_end><if_stmt>header_lower<block_start>df.columns=df.columns.str.lower()<block_end><return>df<block_end><def_stmt>check_parameter param low=MIN_INT high=MAX_INT param_name='' include_left=<false> include_right=<false><block_start>"""Check if an input is within the defined range.
Parameters
----------
param : int, float
The input parameter to check.
low : int, float
The lower bound of the range.
high : int, float
The higher bound of the range.
param_name : str, optional (default='')
The name of the parameter.
include_left : bool, optional (default=False)
Whether includes the lower bound (lower bound <=).
include_right : bool, optional (default=False)
Whether includes the higher bound (<= higher bound).
Returns
-------
within_range : bool or raise errors
Whether the parameter is within the range of (low, high)
"""<line_sep># param, low and high should all be numerical
<if_stmt><not>isinstance(param (numbers.Integral np.integer np.float))<block_start><raise>TypeError('{param_name} is set to {param} Not numerical'.format(param=param param_name=param_name))<block_end><if_stmt><not>isinstance(low (numbers.Integral np.integer np.float))<block_start><raise>TypeError('low is set to {low}. Not numerical'.format(low=low))<block_end><if_stmt><not>isinstance(high (numbers.Integral np.integer np.float))<block_start><raise>TypeError('high is set to {high}. Not numerical'.format(high=high))<block_end># at least one of the bounds should be specified
<if_stmt>low<is>MIN_INT<and>high<is>MAX_INT<block_start><raise>ValueError('Neither low nor high bounds is undefined')<block_end># if wrong bound values are used
<if_stmt>low<g>high<block_start><raise>ValueError('Lower bound > Higher bound')<block_end># value check under different bound conditions
<if_stmt>(include_left<and>include_right)<and>(param<l>low<or>param<g>high)<block_start><raise>ValueError('{param_name} is set to {param}. '<concat>'Not in the range of [{low}, {high}].'.format(param=param low=low high=high param_name=param_name))<block_end><elif_stmt>(include_left<and><not>include_right)<and>(param<l>low<or>param<ge>high)<block_start><raise>ValueError('{param_name} is set to {param}. '<concat>'Not in the range of [{low}, {high}).'.format(param=param low=low high=high param_name=param_name))<block_end><elif_stmt>(<not>include_left<and>include_right)<and>(param<le>low<or>param<g>high)<block_start><raise>ValueError('{param_name} is set to {param}. '<concat>'Not in the range of ({low}, {high}].'.format(param=param low=low high=high param_name=param_name))<block_end><elif_stmt>(<not>include_left<and><not>include_right)<and>(param<le>low<or>param<ge>high)<block_start><raise>ValueError('{param_name} is set to {param}. '<concat>'Not in the range of ({low}, {high}).'.format(param=param low=low high=high param_name=param_name))<block_end><else_stmt><block_start><return><true><block_end><block_end>
|
<import_from_stmt>contexttimer Timer<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>scipy.stats<as>ss<import_stmt>cocos.numerics<as>cn<import_stmt>cocos.device<as>cd<import_from_stmt>cocos.scientific.kde gaussian_kde<line_sep>n=10000# number of data points
grid_size=n+1# number of points at which to evaluate the kde
R=10# number of repetitions for performance benchmark
<if_stmt>__name__<eq>'__main__'# generate random sample
<block_start>points=np.random.randn(n)<line_sep># generate grid at which to evaluate the sample
grid=np.linspace(-5.0 5.0 grid_size)<line_sep># construct and evaluate scipy gaussian kde object
gaussian_kde_scipy=ss.kde.gaussian_kde(points)<line_sep>density_estimate_scipy=gaussian_kde_scipy.evaluate(grid)<line_sep># construct and evaluate cocos gaussian kde object using gpu evaluation
gaussian_kde_cocos=gaussian_kde(cn.array(points) gpu=<true>)<line_sep>density_estimate_cocos=np.array(gaussian_kde_cocos.evaluate(grid))<line_sep># verify that results are numerically close
print(f'maximum absolute difference between results gpu using Cocos and cpu using SciPy: '<concat>f'{np.max(abs(density_estimate_cocos-density_estimate_scipy))}')<if_stmt>np.allclose(density_estimate_cocos density_estimate_scipy)<block_start>print('estimates from cocos and scipy are numerically close')<block_end><else_stmt><block_start>print('estimates from cocos and scipy deviate by more than the default tolerance')<block_end># plot kernel density estimates
plt.plot(grid density_estimate_cocos label='gaussian kernel density estimated using Cocos')<line_sep>plt.plot(grid density_estimate_scipy label='gaussian kernel density estimated using SciPy')<line_sep>plt.legend(loc=1)<line_sep>plt.show()<line_sep># run benchmark comparing cpu performance using SciPy with gpu performance using Cocos
<with_stmt>Timer()<as>scipy_timer<block_start><for_stmt>_ range(R)<block_start>gaussian_kde_scipy.evaluate(grid)<block_end><block_end>print(f'Time to evaluate gaussian kde on cpu using scipy was {scipy_timer.elapsed/R} seconds')<with_stmt>Timer()<as>cocos_timer<block_start><for_stmt>_ range(R)<block_start>gaussian_kde_cocos.evaluate(grid)<line_sep>cd.sync()<block_end><block_end>print(f'Time to evaluate gaussian kde on gpu using cocos was {cocos_timer.elapsed/R} seconds')<line_sep>print(f'speedup on gpu is {scipy_timer.elapsed/cocos_timer.elapsed}')<block_end>
|
<import_stmt>numpy<as>np<def_stmt>calc_area vertex<block_start>vec_a=vertex[: 1]-vertex[: 0]<line_sep>vec_b=vertex[: 2]-vertex[: 0]<line_sep>normal=np.cross(vec_a vec_b)<line_sep>area=np.absolute(np.linalg.norm(normal ord=2 axis=1))<times>0.5<line_sep><return>area<block_end><def_stmt>uniform_sample_on_triangle triangle<block_start><while_stmt><true><block_start>rn=np.random.rand(2)<if_stmt>np.sum(rn)<le>1.0<block_start><break><block_end><block_end><return>rn[0]<times>(triangle[1]-triangle[0])+rn[1]<times>(triangle[2]-triangle[0])+triangle[0]<block_end># mesh
<def_stmt>mesh2pcl triangle_collection numpoints<block_start>area_collection=calc_area(triangle_collection)<line_sep>total_area=np.sum(area_collection)<line_sep>print("Triangle count: {}".format(triangle_collection.shape[0]))<line_sep>#print("Total surface area: {}".format(total_area))
area_collection<augdiv>total_area<line_sep># sample k points
# note that this will give an error if self.area_collection.shape[0] = 0 (implies empty shape)
sampled_triangles=np.random.choice(area_collection.shape[0] size=numpoints p=area_collection)<line_sep># Sample one random uvs on each triangle
rand_uv=np.random.rand(numpoints 2)<line_sep>oob_idx=np.sum(rand_uv axis=-1)<g>1.0<line_sep>rand_uv[oob_idx :]=-rand_uv[oob_idx :]+1.0<line_sep>sampled_triangle_collection=triangle_collection[sampled_triangles : :]<line_sep>sampled_points=rand_uv[: [0]]<times>(sampled_triangle_collection[: 1 :]-sampled_triangle_collection[: 0 :])+rand_uv[: [1]]<times>(sampled_triangle_collection[: 2 :]-sampled_triangle_collection[: 0 :])+sampled_triangle_collection[: 0 :]<line_sep><return>sampled_points.astype(np.float32)<block_end>
|
<import_stmt>tornado.ioloop<import_stmt>tornado.web<import_stmt>argparse<import_stmt>os<import_stmt>sys<line_sep>currentpath=os.path.dirname(os.path.realpath(__file__))<line_sep>project_basedir=os.path.join(currentpath '..')<line_sep>sys.path.append(project_basedir)<import_from_stmt>config conf<line_sep>datadir=conf.distributed_datadir<line_sep>parser=argparse.ArgumentParser(description="mcts self play script")<line_sep>parser.add_argument('--verbose' '-v' help='verbose mode' type=bool default=<false>)<line_sep>parser.add_argument('--datadir' '-d' type=str help="data dir to store chess plays" default=datadir)<line_sep>args=parser.parse_args()<line_sep>datadir=args.datadir<class_stmt>TestHandler(tornado.web.RequestHandler)<block_start><def_stmt>get self<block_start>self.write("OK")<block_end><block_end><class_stmt>ChessSubmitHandler(tornado.web.RequestHandler)<block_start><def_stmt>post self<block_start>name=self.get_argument("name")<line_sep>content=self.get_argument("content")<line_sep>print("receive {}".format(name))<if_stmt>args.verbose<eq><true><block_start>print(name content)<block_end><with_stmt>open(os.path.join(datadir name) 'w' encoding='utf-8')<as>whdl<block_start>whdl.write(content)<block_end>self.write("OK")<block_end><block_end><class_stmt>BestWeightNameHandler(tornado.web.RequestHandler)<block_start><def_stmt>get self<block_start>filelist=os.listdir(conf.distributed_server_weight_dir)<line_sep>filelist=[i[:-6]<for>i filelist<if>'.index'<in>i<and>conf.noup_flag<not><in>i]<line_sep>self.write(sorted(filelist)[-1])<block_end><block_end><class_stmt>ModelGetHandler(tornado.web.RequestHandler)<block_start><def_stmt>get self<block_start>name=self.get_argument("name")<line_sep>model_f=self.get_argument("model_f")<line_sep>file_name=os.path.join(conf.distributed_server_weight_dir "{}.{}".format(name model_f))<line_sep>self.set_header("Content-Type" 'application/octet-stream')<line_sep>self.set_header('Content-Disposition' 'attachment; filename={}'.format("{}.{}".format(name model_f)))<with_stmt>open(file_name 'rb')<as>f<block_start><while_stmt><true><block_start>data=f.read(1024)<if_stmt><not>data<block_start><break><line_sep><block_end>self.write(data)<block_end><block_end>self.finish()<block_end><block_end><def_stmt>make_app <block_start><return>tornado.web.Application([(r"/test" TestHandler) (r"/submit_chess" ChessSubmitHandler) (r"/best_weight" BestWeightNameHandler) (r"/model_get" ModelGetHandler) ])<block_end><if_stmt>__name__<eq>"__main__"<block_start>app=make_app()<line_sep>app.listen(conf.port)<line_sep>tornado.ioloop.IOLoop.current().start()<block_end>
|
<import_stmt>unittest<import_stmt>hcl2<import_from_stmt>checkov.terraform.checks.resource.azure.SynapseWorkspaceEnablesManagedVirtualNetworks check<import_from_stmt>checkov.common.models.enums CheckResult<class_stmt>TestSynapseWorkspaceEnablesManagedVirtualNetworks(unittest.TestCase)<block_start><def_stmt>test_failure_1 self<block_start>hcl_res=hcl2.loads("""
resource "azurerm_synapse_workspace" "example" {
name = "example"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id
sql_administrator_login = "sqladminuser"
sql_administrator_login_password = "<PASSWORD>!"
managed_virtual_network_enabled = false
aad_admin {
login = "<NAME>"
object_id = "00000000-0000-0000-0000-000000000000"
tenant_id = "00000000-0000-0000-0000-000000000000"
}
tags = {
Env = "production"
}
}
""")<line_sep>resource_conf=hcl_res['resource'][0]['azurerm_synapse_workspace']['example']<line_sep>scan_result=check.scan_resource_conf(conf=resource_conf)<line_sep>self.assertEqual(CheckResult.FAILED scan_result)<block_end><def_stmt>test_failure_2 self<block_start>hcl_res=hcl2.loads("""
resource "azurerm_synapse_workspace" "example" {
name = "example"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id
sql_administrator_login = "sqladminuser"
sql_administrator_login_password = "<PASSWORD>!"
aad_admin {
login = "AzureAD Admin"
object_id = "00000000-0000-0000-0000-000000000000"
tenant_id = "00000000-0000-0000-0000-000000000000"
}
tags = {
Env = "production"
}
}
""")<line_sep>resource_conf=hcl_res['resource'][0]['azurerm_synapse_workspace']['example']<line_sep>scan_result=check.scan_resource_conf(conf=resource_conf)<line_sep>self.assertEqual(CheckResult.FAILED scan_result)<block_end><def_stmt>test_success self<block_start>hcl_res=hcl2.loads("""
resource "azurerm_synapse_workspace" "example" {
name = "example"
resource_group_name = azurerm_resource_group.example.name
location = azurerm_resource_group.example.location
storage_data_lake_gen2_filesystem_id = azurerm_storage_data_lake_gen2_filesystem.example.id
sql_administrator_login = "sqladminuser"
sql_administrator_login_password = "<PASSWORD>!"
managed_virtual_network_enabled = true
aad_admin {
login = "AzureAD Admin"
object_id = "00000000-0000-0000-0000-000000000000"
tenant_id = "00000000-0000-0000-0000-000000000000"
}
tags = {
Env = "production"
}
}
""")<line_sep>resource_conf=hcl_res['resource'][0]['azurerm_synapse_workspace']['example']<line_sep>scan_result=check.scan_resource_conf(conf=resource_conf)<line_sep>self.assertEqual(CheckResult.PASSED scan_result)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
|
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
<import_stmt>pytest<import_from_stmt>common.layer_test_class check_ir_version<import_from_stmt>common.tf_layer_test_class CommonTFLayerTest<import_from_stmt>unit_tests.utils.graph build_graph<class_stmt>TestELU(CommonTFLayerTest)<block_start><def_stmt>create_elu_net self shape ir_version<block_start>"""
Tensorflow net IR net
Input->ELU => Input->ELU
"""<line_sep>#
# Create Tensorflow model
#
<import_stmt>tensorflow<as>tf<line_sep>tf.compat.v1.reset_default_graph()<line_sep># Create the graph and model
<with_stmt>tf.compat.v1.Session()<as>sess<block_start>shapes=shape.copy()<line_sep># reshaping
<if_stmt>len(shapes)<ge>4<block_start>shapes.append(shapes.pop(1))<block_end>input=tf.compat.v1.placeholder(tf.float32 shapes 'Input')<line_sep>tf.nn.elu(input name='Operation')<line_sep>tf.compat.v1.global_variables_initializer()<line_sep>tf_net=sess.graph_def<block_end>#
# Create reference IR net
# Please, specify 'type': 'Input' for input node
# Moreover, do not forget to validate ALL layer attributes!!!
#
ref_net=<none><if_stmt>check_ir_version(10 <none> ir_version)<block_start>nodes_attributes={'input':{'kind':'op' 'type':'Parameter'} 'input_data':{'shape':shape 'kind':'data'} 'ELU':{'kind':'op' 'type':'Elu'} 'ELU_data':{'shape':shape 'kind':'data'} 'result':{'kind':'op' 'type':'Result'}}<line_sep>ref_net=build_graph(nodes_attributes [('input' 'input_data') ('input_data' 'ELU') ('ELU' 'ELU_data') ('ELU_data' 'result')])<block_end><return>tf_net ref_net<block_end>test_data_precommit=[dict(shape=[4 6 8 10 12])]<line_sep>@pytest.mark.parametrize("params" test_data_precommit)@pytest.mark.precommit<def_stmt>test_elu_precommit self params ie_device precision ir_version temp_dir<block_start><if_stmt>ie_device<eq>'GPU'<block_start>pytest.skip("5D tensors is not supported on GPU")<block_end>self._test(*self.create_elu_net(**params ir_version=ir_version) ie_device precision ir_version temp_dir=temp_dir)<block_end>test_data=[dict(shape=[10 12]) dict(shape=[8 10 12]) dict(shape=[6 8 10 12]) dict(shape=[4 6 8 10 12])]<line_sep>@pytest.mark.parametrize("params" test_data)@pytest.mark.nightly<def_stmt>test_elu self params ie_device precision ir_version temp_dir<block_start><if_stmt>ie_device<eq>'GPU'<block_start>pytest.skip("5D tensors is not supported on GPU")<block_end>self._test(*self.create_elu_net(**params ir_version=ir_version) ie_device precision ir_version temp_dir=temp_dir)<block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_from_stmt>nose SkipTest<import_from_stmt>nose.tools eq_ ok_ assert_raises assert_in<import_from_stmt>parsimonious Grammar NodeVisitor VisitationError rule<import_from_stmt>parsimonious.expressions Literal<import_from_stmt>parsimonious.nodes Node<class_stmt>HtmlFormatter(NodeVisitor)<block_start>"""Visitor that turns a parse tree into HTML fragments"""<line_sep>grammar=Grammar("""bold_open = '(('""")# just partial
<def_stmt>visit_bold_open self node visited_children<block_start><return>'<b>'<block_end><def_stmt>visit_bold_close self node visited_children<block_start><return>'</b>'<block_end><def_stmt>visit_text self node visited_children<block_start>"""Return the text verbatim."""<line_sep><return>node.text<block_end><def_stmt>visit_bold_text self node visited_children<block_start><return>''.join(visited_children)<block_end><block_end><class_stmt>ExplosiveFormatter(NodeVisitor)<block_start>"""Visitor which raises exceptions"""<def_stmt>visit_boom self node visited_children<block_start><raise>ValueError<block_end><block_end><def_stmt>test_visitor <block_start>"""Assert a tree gets visited correctly."""<line_sep>grammar=Grammar(r'''
bold_text = bold_open text bold_close
text = ~'[a-zA-Z 0-9]*'
bold_open = '(('
bold_close = '))'
''')<line_sep>text='((o hai))'<line_sep>tree=Node(grammar['bold_text'] text 0 9 [Node(grammar['bold_open'] text 0 2) Node(grammar['text'] text 2 7) Node(grammar['bold_close'] text 7 9)])<line_sep>eq_(grammar.parse(text) tree)<line_sep>result=HtmlFormatter().visit(tree)<line_sep>eq_(result '<b>o hai</b>')<block_end><def_stmt>test_visitation_exception <block_start>assert_raises(VisitationError ExplosiveFormatter().visit Node(Literal('') '' 0 0))<block_end><def_stmt>test_str <block_start>"""Test str and unicode of ``Node``."""<line_sep>n=Node(Literal('something' name='text') 'o hai' 0 5)<line_sep>good='<Node called "text" matching "o hai">'<line_sep>eq_(str(n) good)<block_end><def_stmt>test_repr <block_start>"""Test repr of ``Node``."""<line_sep>s=u'hai ö'<line_sep>boogie=u'böogie'<line_sep>n=Node(Literal(boogie) s 0 3 children=[Node(Literal(' ') s 3 4) Node(Literal(u'ö') s 4 5)])<line_sep>eq_(repr(n) str("""s = {hai_o}\nNode({boogie}, s, 0, 3, children=[Node({space}, s, 3, 4), Node({o}, s, 4, 5)])""").format(hai_o=repr(s) boogie=repr(Literal(boogie)) space=repr(Literal(" ")) o=repr(Literal(u"ö")) ))<block_end><def_stmt>test_parse_shortcut <block_start>"""Exercise the simple case in which the visitor takes care of parsing."""<line_sep>eq_(HtmlFormatter().parse('((') '<b>')<block_end><def_stmt>test_match_shortcut <block_start>"""Exercise the simple case in which the visitor takes care of matching."""<line_sep>eq_(HtmlFormatter().match('((other things') '<b>')<block_end><class_stmt>CoupledFormatter(NodeVisitor)<block_start>@rule('bold_open text bold_close')<def_stmt>visit_bold_text self node visited_children<block_start><return>''.join(visited_children)<block_end>@rule('"(("')<def_stmt>visit_bold_open self node visited_children<block_start><return>'<b>'<block_end>@rule('"))"')<def_stmt>visit_bold_close self node visited_children<block_start><return>'</b>'<block_end>@rule('~"[a-zA-Z 0-9]*"')<def_stmt>visit_text self node visited_children<block_start>"""Return the text verbatim."""<line_sep><return>node.text<block_end><block_end><def_stmt>test_rule_decorator <block_start>"""Make sure the @rule decorator works."""<line_sep>eq_(CoupledFormatter().parse('((hi))') '<b>hi</b>')<block_end><def_stmt>test_rule_decorator_subclassing <block_start>"""Make sure we can subclass and override visitor methods without blowing
away the rules attached to them."""<class_stmt>OverridingFormatter(CoupledFormatter)<block_start><def_stmt>visit_text self node visited_children<block_start>"""Return the text capitalized."""<line_sep><return>node.text.upper()<block_end>@rule('"not used"')<def_stmt>visit_useless self node visited_children<block_start>"""Get in the way. Tempt the metaclass to pave over the
superclass's grammar with a new one."""<block_end><block_end><raise>SkipTest("I haven't got around to making this work yet.")<line_sep>eq_(OverridingFormatter().parse('((hi))') '<b>HI</b>')<block_end><class_stmt>PrimalScream(Exception)<block_start><pass><block_end><def_stmt>test_unwrapped_exceptions <block_start><class_stmt>Screamer(NodeVisitor)<block_start>grammar=Grammar("""greeting = 'howdy'""")<line_sep>unwrapped_exceptions=(PrimalScream )<def_stmt>visit_greeting self thing visited_children<block_start><raise>PrimalScream('This should percolate up!')<block_end><block_end>assert_raises(PrimalScream Screamer().parse 'howdy')<block_end><def_stmt>test_node_inequality <block_start>node=Node(Literal('12345') 'o hai' 0 5)<line_sep>ok_(node<ne>5)<line_sep>ok_(node<ne><none>)<line_sep>ok_(node<ne>Node(Literal('23456') 'o hai' 0 5))<line_sep>ok_(<not>(node<ne>Node(Literal('12345') 'o hai' 0 5)))<block_end><def_stmt>test_generic_visit_NotImplementedError_unnamed_node <block_start>"""
Test that generic_visit provides informative error messages
when visitors are not defined.
Regression test for https://github.com/erikrose/parsimonious/issues/110
"""<class_stmt>MyVisitor(NodeVisitor)<block_start>grammar=Grammar(r'''
bar = "b" "a" "r"
''')<line_sep>unwrapped_exceptions=(NotImplementedError )<block_end><with_stmt>assert_raises(NotImplementedError)<as>e<block_start>MyVisitor().parse('bar')<block_end>assert_in("No visitor method was defined for this expression: 'b'" str(e.exception))<block_end><def_stmt>test_generic_visit_NotImplementedError_named_node <block_start>"""
Test that generic_visit provides informative error messages
when visitors are not defined.
"""<class_stmt>MyVisitor(NodeVisitor)<block_start>grammar=Grammar(r'''
bar = myrule myrule myrule
myrule = ~"[bar]"
''')<line_sep>unwrapped_exceptions=(NotImplementedError )<block_end><with_stmt>assert_raises(NotImplementedError)<as>e<block_start>MyVisitor().parse('bar')<block_end>assert_in("No visitor method was defined for this expression: myrule = ~'[bar]'" str(e.exception))<block_end>
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>dYToMuMuGenFilter=cms.EDFilter("DYToMuMuGenFilter" inputTag=cms.InputTag("prunedGenParticles"))<line_sep>
|
# Copyright 2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
# -*- coding: utf-8 -*-
<import_from_stmt>.colorspace bgr2gray bgr2hls bgr2hsv bgr2rgb gray2bgr gray2rgb hls2bgr hsv2bgr iminvert posterize rgb2bgr rgb2gray solarize <line_sep>__all__=['solarize' 'posterize' 'bgr2gray' 'rgb2gray' 'gray2bgr' 'gray2rgb' 'bgr2rgb' 'rgb2bgr' 'bgr2hsv' 'hsv2bgr' 'bgr2hls' 'hls2bgr' ]<line_sep>
|
# nuScenes dev-kit.
# Code written by <NAME>, 2020.
<import_stmt>os<import_stmt>unittest<import_from_stmt>nuimages NuImages<class_stmt>TestNuImages(unittest.TestCase)<block_start><def_stmt>test_load self<block_start>"""
Loads up NuImages.
This is intended to simply run the NuImages class to check for import errors, typos, etc.
"""<assert_stmt>'NUIMAGES'<in>os.environ 'Set NUIMAGES env. variable to enable tests.'<line_sep>nuim=NuImages(version='v1.0-mini' dataroot=os.environ['NUIMAGES'] verbose=<false>)<line_sep># Trivial assert statement
self.assertEqual(nuim.table_root os.path.join(os.environ['NUIMAGES'] 'v1.0-mini'))<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
|
"""
This module contains classes used to define the standard behavior of the agent.
It relies on the controllers, the chosen training/test policy and the learning algorithm
to specify its behavior in the environment.
"""<import_stmt>os<import_stmt>numpy<as>np<import_stmt>copy<import_stmt>sys<import_stmt>joblib<import_from_stmt>warnings warn<import_from_stmt>.experiment base_controllers<as>controllers<import_from_stmt>.helper tree<import_from_stmt>deer.policies EpsilonGreedyPolicy<class_stmt>NeuralAgent(object)<block_start>"""The NeuralAgent class wraps a learning algorithm (such as a deep Q-network) for training and testing in a given environment.
Attach controllers to it in order to conduct an experiment (when to train the agent, when to test,...).
Parameters
-----------
environment : object from class Environment
The environment in which the agent interacts
learning_algo : object from class LearningAlgo
The learning algorithm associated to the agent
replay_memory_size : int
Size of the replay memory. Default : 1000000
replay_start_size : int
Number of observations (=number of time steps taken) in the replay memory before starting learning.
Default: minimum possible according to environment.inputDimensions().
batch_size : int
Number of tuples taken into account for each iteration of gradient descent. Default : 32
random_state : numpy random number generator
Default : random seed.
exp_priority : float
The exponent that determines how much prioritization is used, default is 0 (uniform priority).
One may check out Schaul et al. (2016) - Prioritized Experience Replay.
train_policy : object from class Policy
Policy followed when in training mode (mode -1)
test_policy : object from class Policy
Policy followed when in other modes than training (validation and test modes)
only_full_history : boolean
Whether we wish to train the neural network only on full histories or we wish to fill with zeroes the
observations before the beginning of the episode
"""<def_stmt>__init__ self environment learning_algo replay_memory_size=1000000 replay_start_size=<none> batch_size=32 random_state=np.random.RandomState() exp_priority=0 train_policy=<none> test_policy=<none> only_full_history=<true><block_start>inputDims=environment.inputDimensions()<if_stmt>replay_start_size<eq><none><block_start>replay_start_size=max(inputDims[i][0]<for>i range(len(inputDims)))<block_end><elif_stmt>replay_start_size<l>max(inputDims[i][0]<for>i range(len(inputDims)))<block_start><raise>AgentError("Replay_start_size should be greater than the biggest history of a state.")<block_end>self._controllers=[]<line_sep>self._environment=environment<line_sep>self._learning_algo=learning_algo<line_sep>self._replay_memory_size=replay_memory_size<line_sep>self._replay_start_size=replay_start_size<line_sep>self._batch_size=batch_size<line_sep>self._random_state=random_state<line_sep>self._exp_priority=exp_priority<line_sep>self._only_full_history=only_full_history<line_sep>self._dataset=DataSet(environment max_size=replay_memory_size random_state=random_state use_priority=self._exp_priority only_full_history=self._only_full_history)<line_sep>self._tmp_dataset=<none># Will be created by startTesting() when necessary
self._mode=-1<line_sep>self._totalModeNbrEpisode=0<line_sep>self._total_mode_reward=0<line_sep>self._training_loss_averages=[]<line_sep>self._Vs_on_last_episode=[]<line_sep>self._in_episode=<false><line_sep>self._selected_action=-1<line_sep>self._state=[]<for_stmt>i range(len(inputDims))<block_start>self._state.append(np.zeros(inputDims[i] dtype=float))<block_end><if_stmt>(train_policy<eq><none>)<block_start>self._train_policy=EpsilonGreedyPolicy(learning_algo environment.nActions() random_state 0.1)<block_end><else_stmt><block_start>self._train_policy=train_policy<block_end><if_stmt>(test_policy<eq><none>)<block_start>self._test_policy=EpsilonGreedyPolicy(learning_algo environment.nActions() random_state 0.)<block_end><else_stmt><block_start>self._test_policy=test_policy<block_end>self.gathering_data=<true># Whether the agent is gathering data or not
self.sticky_action=1# Number of times the agent is forced to take the same action as part of one actual time step
<block_end><def_stmt>setControllersActive self toDisable active<block_start>""" Activate controller
"""<for_stmt>i toDisable<block_start>self._controllers[i].setActive(active)<block_end><block_end><def_stmt>setLearningRate self lr<block_start>""" Set the learning rate for the gradient descent
"""<line_sep>self._learning_algo.setLearningRate(lr)<block_end><def_stmt>learningRate self<block_start>""" Get the learning rate
"""<line_sep><return>self._learning_algo.learningRate()<block_end><def_stmt>setDiscountFactor self df<block_start>""" Set the discount factor
"""<line_sep>self._learning_algo.setDiscountFactor(df)<block_end><def_stmt>discountFactor self<block_start>""" Get the discount factor
"""<line_sep><return>self._learning_algo.discountFactor()<block_end><def_stmt>overrideNextAction self action<block_start>""" Possibility to override the chosen action. This possibility should be used on the signal OnActionChosen.
"""<line_sep>self._selected_action=action<block_end><def_stmt>avgBellmanResidual self<block_start>""" Returns the average training loss on the epoch
"""<if_stmt>(len(self._training_loss_averages)<eq>0)<block_start><return>-1<block_end><return>np.average(self._training_loss_averages)<block_end><def_stmt>avgEpisodeVValue self<block_start>""" Returns the average V value on the episode (on time steps where a non-random action has been taken)
"""<if_stmt>(len(self._Vs_on_last_episode)<eq>0)<block_start><return>-1<block_end><if_stmt>(np.trim_zeros(self._Vs_on_last_episode)<ne>[])<block_start><return>np.average(np.trim_zeros(self._Vs_on_last_episode))<block_end><else_stmt><block_start><return>0<block_end><block_end><def_stmt>totalRewardOverLastTest self<block_start>""" Returns the average sum of rewards per episode and the number of episode
"""<line_sep><return>self._total_mode_reward/self._totalModeNbrEpisode self._totalModeNbrEpisode<block_end><def_stmt>attach self controller<block_start><if_stmt>(isinstance(controller controllers.Controller))<block_start>self._controllers.append(controller)<block_end><else_stmt><block_start><raise>TypeError("The object you try to attach is not a Controller.")<block_end><block_end><def_stmt>detach self controllerIdx<block_start><return>self._controllers.pop(controllerIdx)<block_end><def_stmt>mode self<block_start><return>self._mode<block_end><def_stmt>startMode self mode epochLength<block_start><if_stmt>self._in_episode<block_start><raise>AgentError("Trying to start mode while current episode is not yet finished. This method can be "<concat>"called only *between* episodes for testing and validation.")<block_end><elif_stmt>mode<eq>-1<block_start><raise>AgentError("Mode -1 is reserved and means 'training mode'; use resumeTrainingMode() instead.")<block_end><else_stmt><block_start>self._mode=mode<line_sep>self._total_mode_reward=0.<del_stmt>self._tmp_dataset<line_sep>self._tmp_dataset=DataSet(self._environment self._random_state max_size=self._replay_memory_size only_full_history=self._only_full_history)<block_end><block_end><def_stmt>resumeTrainingMode self<block_start>self._mode=-1<block_end><def_stmt>summarizeTestPerformance self<block_start><if_stmt>self._mode<eq>-1<block_start><raise>AgentError("Cannot summarize test performance outside test environment.")<block_end>self._environment.summarizePerformance(self._tmp_dataset self._learning_algo train_data_set=self._dataset)<block_end><def_stmt>train self<block_start>"""
This function selects a random batch of data (with self._dataset.randomBatch) and performs a
Q-learning iteration (with self._learning_algo.train).
"""<line_sep># We make sure that the number of elements in the replay memory
# is strictly superior to self._replay_start_size before taking
# a random batch and perform training
<if_stmt>self._dataset.n_elems<le>self._replay_start_size<block_start><return><block_end><try_stmt><block_start><if_stmt>hasattr(self._learning_algo 'nstep')<block_start>observations,actions,rewards,terminals,rndValidIndices=self._dataset.randomBatch_nstep(self._batch_size self._learning_algo.nstep self._exp_priority)<line_sep>loss,loss_ind=self._learning_algo.train(observations actions rewards terminals)<block_end><else_stmt><block_start>states,actions,rewards,next_states,terminals,rndValidIndices=self._dataset.randomBatch(self._batch_size self._exp_priority)<line_sep>loss,loss_ind=self._learning_algo.train(states actions rewards next_states terminals)<block_end>self._training_loss_averages.append(loss)<if_stmt>(self._exp_priority)<block_start>self._dataset.updatePriorities(pow(loss_ind self._exp_priority)+0.0001 rndValidIndices[1])<block_end><block_end><except_stmt>SliceError<as>e<block_start>warn("Training not done - "+str(e) AgentWarning)<block_end><block_end><def_stmt>dumpNetwork self fname nEpoch=-1<block_start>""" Dump the network
Parameters
-----------
fname : string
Name of the file where the network will be dumped
nEpoch : int
Epoch number (Optional)
"""<try_stmt><block_start>os.mkdir("nnets")<block_end><except_stmt>Exception<block_start><pass><block_end>basename="nnets/"+fname<for_stmt>f os.listdir("nnets/")<block_start><if_stmt>fname<in>f<block_start>os.remove("nnets/"+f)<block_end><block_end>all_params=self._learning_algo.getAllParams()<if_stmt>(nEpoch<ge>0)<block_start>joblib.dump(all_params basename+".epoch={}".format(nEpoch))<block_end><else_stmt><block_start>joblib.dump(all_params basename compress=<true>)<block_end><block_end><def_stmt>setNetwork self fname nEpoch=-1<block_start>""" Set values into the network
Parameters
-----------
fname : string
Name of the file where the values are
nEpoch : int
Epoch number (Optional)
"""<line_sep>basename="nnets/"+fname<if_stmt>(nEpoch<ge>0)<block_start>all_params=joblib.load(basename+".epoch={}".format(nEpoch))<block_end><else_stmt><block_start>all_params=joblib.load(basename)<block_end>self._learning_algo.setAllParams(all_params)<block_end><def_stmt>run self n_epochs epoch_length<block_start>"""
This function encapsulates the inference and the learning.
If the agent is in train mode (mode = -1):
It starts by calling the controllers method "onStart",
Then it runs a given number of epochs where an epoch is made up of one or many episodes (called with
agent._runEpisode) and where an epoch ends up after the number of steps reaches the argument "epoch_length".
It ends up by calling the controllers method "end".
If the agent is on non train mode (mode > -1):
This function runs a number of epochs in non train mode (mode > -1), thus without controllers.
Parameters
-----------
n_epochs : int
number of epochs
epoch_length : int
maximum number of steps for a given epoch
"""<if_stmt>(self._mode<eq>-1)<block_start>self._run_train(n_epochs epoch_length)<block_end><else_stmt><block_start>self._run_non_train(n_epochs epoch_length)<block_end><block_end><def_stmt>_run_train self n_epochs epoch_length<block_start>"""
This function encapsulates the whole process of the learning.
It starts by calling the controllers method "onStart",
Then it runs a given number of epochs where an epoch is made up of one or many episodes (called with
agent._runEpisode) and where an epoch ends up after the number of steps reaches the argument "epoch_length".
It ends up by calling the controllers method "end".
Parameters
-----------
n_epochs : int
number of epochs
epoch_length : int
maximum number of steps for a given epoch
"""<for_stmt>c self._controllers<block_start>c.onStart(self)<block_end>i=0<while_stmt>i<l>n_epochs<block_start>nbr_steps_left=epoch_length<line_sep>self._training_loss_averages=[]<while_stmt>nbr_steps_left<g>0# run new episodes until the number of steps left for the epoch has reached 0
<block_start>nbr_steps_left=self._runEpisode(nbr_steps_left)<block_end>i<augadd>1<for_stmt>c self._controllers<block_start>c.onEpochEnd(self)<block_end><block_end>self._environment.end()<for_stmt>c self._controllers<block_start>c.onEnd(self)<block_end><block_end><def_stmt>_run_non_train self n_epochs epoch_length<block_start>"""
This function runs a number of epochs in non train mode (id > -1).
Parameters
-----------
n_epochs : int
number of epochs
epoch_length : int
maximum number of steps for a given epoch
"""<for_stmt>c self._controllers<block_start>c.onStart(self)<block_end>i=0<while_stmt>i<l>n_epochs<block_start>nbr_steps_left=epoch_length<line_sep>self._totalModeNbrEpisode=0<while_stmt>nbr_steps_left<g>0<block_start>self._totalModeNbrEpisode<augadd>1<line_sep>nbr_steps_left=self._runEpisode(nbr_steps_left)<block_end>i<augadd>1<for_stmt>c self._controllers<block_start>c.onEpochEnd(self)<block_end><block_end>self._environment.end()<for_stmt>c self._controllers<block_start>c.onEnd(self)<block_end><block_end><def_stmt>_runEpisode self maxSteps<block_start>"""
This function runs an episode of learning. An episode ends up when the environment method "inTerminalState"
returns True (or when the number of steps reaches the argument "maxSteps")
Parameters
-----------
maxSteps : int
maximum number of steps before automatically ending the episode
"""<line_sep>self._in_episode=<true><line_sep>initState=self._environment.reset(self._mode)<line_sep>inputDims=self._environment.inputDimensions()<for_stmt>i range(len(inputDims))<block_start><if_stmt>inputDims[i][0]<g>1<block_start>self._state[i][1:]=initState[i][1:]<block_end><block_end>self._Vs_on_last_episode=[]<line_sep>is_terminal=<false><line_sep>reward=0<while_stmt>maxSteps<g>0<block_start>maxSteps<augsub>1<if_stmt>(self.gathering_data<eq><true><or>self._mode<ne>-1)<block_start>obs=self._environment.observe()<for_stmt>i range(len(obs))<block_start>self._state[i][0:-1]=self._state[i][1:]<line_sep>self._state[i][-1]=obs[i]<block_end>V,action,reward=self._step()<line_sep>self._Vs_on_last_episode.append(V)<if_stmt>self._mode<ne>-1<block_start>self._total_mode_reward<augadd>reward<block_end>is_terminal=self._environment.inTerminalState()# If the transition ends up in a terminal state, mark transition as terminal
# Note that the new obs will not be stored, as it is unnecessary.
<if_stmt>(maxSteps<g>0)<block_start>self._addSample(obs action reward is_terminal)<block_end><else_stmt><block_start>self._addSample(obs action reward <true>)<block_end><block_end># If the episode ends because max number of steps is reached, mark the transition as terminal
<for_stmt>c self._controllers<block_start>c.onActionTaken(self)<block_end><if_stmt>is_terminal<block_start><break><block_end><block_end>self._in_episode=<false><for_stmt>c self._controllers<block_start>c.onEpisodeEnd(self is_terminal reward)<block_end><return>maxSteps<block_end><def_stmt>_step self<block_start>"""
This method is called at each time step and performs one action in the environment.
Returns
-------
V : float
Estimated value function of current state.
action : int
The id of the action selected by the agent.
reward : float
Reward obtained for the transition
"""<line_sep>action,V=self._chooseAction()<line_sep>reward=0<for_stmt>i range(self.sticky_action)<block_start>reward<augadd>self._environment.act(action)<block_end><return>V action reward<block_end><def_stmt>_addSample self ponctualObs action reward is_terminal<block_start><if_stmt>self._mode<ne>-1<block_start>self._tmp_dataset.addSample(ponctualObs action reward is_terminal priority=1)<block_end><else_stmt><block_start>self._dataset.addSample(ponctualObs action reward is_terminal priority=1)<block_end><block_end><def_stmt>_chooseAction self<block_start><if_stmt>self._mode<ne>-1# Act according to the test policy if not in training mode
<block_start>action,V=self._test_policy.action(self._state mode=self._mode dataset=self._dataset)<block_end><else_stmt><block_start><if_stmt>self._dataset.n_elems<g>self._replay_start_size# follow the train policy
<block_start>action,V=self._train_policy.action(self._state mode=<none> dataset=self._dataset)#is self._state the only way to store/pass the state?
<block_end><else_stmt># Still gathering initial data: choose dummy action
<block_start>action,V=self._train_policy.randomAction()<block_end><block_end><for_stmt>c self._controllers<block_start>c.onActionChosen(self action)<block_end><return>action V<block_end><block_end><class_stmt>AgentError(RuntimeError)<block_start>"""Exception raised for errors when calling the various Agent methods at wrong times.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""<def_stmt>__init__ self value<block_start>self.value=value<block_end><def_stmt>__str__ self<block_start><return>repr(self.value)<block_end><block_end><class_stmt>AgentWarning(RuntimeWarning)<block_start>"""Warning issued of the various Agent methods.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""<block_end><class_stmt>DataSet(object)<block_start>"""A replay memory consisting of circular buffers for observations, actions, rewards and terminals."""<def_stmt>__init__ self env random_state=<none> max_size=1000000 use_priority=<false> only_full_history=<true><block_start>"""Initializer.
Parameters
-----------
inputDims : list of tuples
Each tuple relates to one of the observations where the first value is the history size considered for this
observation and the rest describes the shape of each punctual observation (e.g., scalar, vector or matrix).
See base_classes.Environment.inputDimensions() documentation for more info.
random_state : Numpy random number generator
If None, a new one is created with default numpy seed.
max_size : float
The replay memory maximum size. Default : 1000000
"""<line_sep>self._batch_dimensions=env.inputDimensions()<line_sep>self._max_history_size=np.max([self._batch_dimensions[i][0]<for>i range(len(self._batch_dimensions))])<line_sep>self._size=max_size<line_sep>self._use_priority=use_priority<line_sep>self._only_full_history=only_full_history<if_stmt>(isinstance(env.nActions() int))<block_start>self._actions=CircularBuffer(max_size dtype="int8")<block_end><else_stmt><block_start>self._actions=CircularBuffer(max_size dtype='object')<block_end>self._rewards=CircularBuffer(max_size)<line_sep>self._terminals=CircularBuffer(max_size dtype="bool")<if_stmt>(self._use_priority)<block_start>self._prioritiy_tree=tree.SumTree(max_size)<line_sep>self._translation_array=np.zeros(max_size)<block_end>self._observations=np.zeros(len(self._batch_dimensions) dtype='object')<line_sep># Initialize the observations container if necessary
<for_stmt>i range(len(self._batch_dimensions))<block_start>self._observations[i]=CircularBuffer(max_size elemShape=self._batch_dimensions[i][1:] dtype=env.observationType(i))<block_end><if_stmt>(random_state<eq><none>)<block_start>self._random_state=np.random.RandomState()<block_end><else_stmt><block_start>self._random_state=random_state<block_end>self.n_elems=0<line_sep>self.sticky_action=1# Number of times the agent is forced to take the same action as part of one actual time step
<block_end><def_stmt>actions self<block_start>"""Get all actions currently in the replay memory, ordered by time where they were taken."""<line_sep><return>self._actions.getSlice(0)<block_end><def_stmt>rewards self<block_start>"""Get all rewards currently in the replay memory, ordered by time where they were received."""<line_sep><return>self._rewards.getSlice(0)<block_end><def_stmt>terminals self<block_start>"""Get all terminals currently in the replay memory, ordered by time where they were observed.
terminals[i] is True if actions()[i] lead to a terminal state (i.e. corresponded to a terminal
transition), and False otherwise.
"""<line_sep><return>self._terminals.getSlice(0)<block_end><def_stmt>observations self<block_start>"""Get all observations currently in the replay memory, ordered by time where they were observed.
"""<line_sep>ret=np.zeros_like(self._observations)<for_stmt>input range(len(self._observations))<block_start>ret[input]=self._observations[input].getSlice(0)<block_end><return>ret<block_end><def_stmt>updatePriorities self priorities rndValidIndices<block_start>"""
"""<for_stmt>i range(len(rndValidIndices))<block_start>self._prioritiy_tree.update(rndValidIndices[i] priorities[i])<block_end><block_end><def_stmt>randomBatch self batch_size use_priority<block_start>"""Returns a batch of states, actions, rewards, terminal status, and next_states for a number batch_size of randomly
chosen transitions. Note that if terminal[i] == True, then next_states[s][i] == np.zeros_like(states[s][i]) for
each s.
Parameters
-----------
batch_size : int
Number of transitions to return.
use_priority : Boolean
Whether to use prioritized replay or not
Returns
-------
states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
States are taken randomly in the data with the only constraint that they are complete regarding the history size
for each observation.
actions : numpy array of integers [batch_size]
actions[i] is the action taken after having observed states[:][i].
rewards : numpy array of floats [batch_size]
rewards[i] is the reward obtained for taking actions[i-1].
next_states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * history size * size of punctual observation (which is 2D,1D or scalar)]).
terminals : numpy array of booleans [batch_size]
terminals[i] is True if the transition leads to a terminal state and False otherwise
Throws
-------
SliceError
If a batch of this batch_size could not be built based on current data set (not enough data or all
trajectories are too short).
"""<if_stmt>(self._max_history_size+self.sticky_action-1<ge>self.n_elems)<block_start><raise>SliceError("Not enough elements in the dataset to create a "<concat>"complete state. {} elements in dataset; requires {}".format(self.n_elems self._max_history_size))<block_end><if_stmt>(self._use_priority)#FIXME : take into account the case where self._only_full_history is false
<block_start>rndValidIndices,rndValidIndices_tree=self._randomPrioritizedBatch(batch_size)<if_stmt>(rndValidIndices.size<eq>0)<block_start><raise>SliceError("Could not find a state with full histories")<block_end><block_end><else_stmt><block_start>rndValidIndices=np.zeros(batch_size dtype='int32')<if_stmt>(self._only_full_history)<block_start><for_stmt>i range(batch_size)# TODO: multithread this loop?
<block_start>rndValidIndices[i]=self._randomValidStateIndex(self._max_history_size+self.sticky_action-1)<block_end><block_end><else_stmt><block_start><for_stmt>i range(batch_size)# TODO: multithread this loop?
<block_start>rndValidIndices[i]=self._randomValidStateIndex(minimum_without_terminal=self.sticky_action)<block_end><block_end><block_end>actions=self._actions.getSliceBySeq(rndValidIndices)<line_sep>rewards=self._rewards.getSliceBySeq(rndValidIndices)<line_sep>terminals=self._terminals.getSliceBySeq(rndValidIndices)<line_sep>states=np.zeros(len(self._batch_dimensions) dtype='object')<line_sep>next_states=np.zeros_like(states)<line_sep># We calculate the first terminal index backward in time and set it
# at maximum to the value self._max_history_size+self.sticky_action-1
first_terminals=[]<for_stmt>rndValidIndex rndValidIndices<block_start>first_terminal=1<while_stmt>first_terminal<l>self._max_history_size+self.sticky_action-1<block_start><if_stmt>(self._terminals[rndValidIndex-first_terminal]<eq><true><or>first_terminal<g>rndValidIndex)<block_start><break><block_end>first_terminal<augadd>1<block_end>first_terminals.append(first_terminal)<block_end><for_stmt>input range(len(self._batch_dimensions))<block_start>states[input]=np.zeros((batch_size )+self._batch_dimensions[input] dtype=self._observations[input].dtype)<line_sep>next_states[input]=np.zeros_like(states[input])<for_stmt>i range(batch_size)<block_start>slice=self._observations[input].getSlice(rndValidIndices[i]-self.sticky_action+2-min(self._batch_dimensions[input][0] first_terminals[i]+self.sticky_action-1) rndValidIndices[i]+1)<if_stmt>(len(slice)<eq>len(states[input][i]))<block_start>states[input][i]=slice<block_end><else_stmt><block_start><for_stmt>j range(len(slice))<block_start>states[input][i][-j-1]=slice[-j-1]<block_end><block_end># If transition leads to terminal, we don't care about next state
<if_stmt>rndValidIndices[i]<ge>self.n_elems-1<or>terminals[i]<block_start>next_states[input][i]=np.zeros_like(states[input][i])<block_end><else_stmt><block_start>slice=self._observations[input].getSlice(rndValidIndices[i]+2-min(self._batch_dimensions[input][0] first_terminals[i]+1) rndValidIndices[i]+2)<if_stmt>(len(slice)<eq>len(states[input][i]))<block_start>next_states[input][i]=slice<block_end><else_stmt><block_start><for_stmt>j range(len(slice))<block_start>next_states[input][i][-j-1]=slice[-j-1]<block_end><block_end>#next_states[input][i] = self._observations[input].getSlice(rndValidIndices[i]+2-min(self._batch_dimensions[input][0],first_terminal), rndValidIndices[i]+2)
<block_end><block_end><block_end><if_stmt>(self._use_priority)<block_start><return>states actions rewards next_states terminals [rndValidIndices rndValidIndices_tree]<block_end><else_stmt><block_start><return>states actions rewards next_states terminals rndValidIndices<block_end><block_end><def_stmt>randomBatch_nstep self batch_size nstep use_priority<block_start>"""Return corresponding states, actions, rewards, terminal status, and next_states for a number batch_size of randomly
chosen transitions. Note that if terminal[i] == True, then next_states[s][i] == np.zeros_like(states[s][i]) for
each s.
Parameters
-----------
batch_size : int
Number of transitions to return.
nstep : int
Number of transitions to be considered for each element
use_priority : Boolean
Whether to use prioritized replay or not
Returns
-------
states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * (history size+nstep-1) * size of punctual observation (which is 2D,1D or scalar)]).
States are taken randomly in the data with the only constraint that they are complete regarding the history size
for each observation.
actions : numpy array of integers [batch_size, nstep]
actions[i] is the action taken after having observed states[:][i].
rewards : numpy array of floats [batch_size, nstep]
rewards[i] is the reward obtained for taking actions[i-1].
next_states : numpy array of objects
Each object is a numpy array that relates to one of the observations
with size [batch_size * (history size+nstep-1) * size of punctual observation (which is 2D,1D or scalar)]).
terminals : numpy array of booleans [batch_size, nstep]
terminals[i] is True if the transition leads to a terminal state and False otherwise
Throws
-------
SliceError
If a batch of this size could not be built based on current data set (not enough data or all
trajectories are too short).
"""<if_stmt>(self._max_history_size+self.sticky_action-1<ge>self.n_elems)<block_start><raise>SliceError("Not enough elements in the dataset to create a "<concat>"complete state. {} elements in dataset; requires {}".format(self.n_elems self._max_history_size))<block_end><if_stmt>(self._use_priority)#FIXME : take into account the case where self._only_full_history is false
<block_start>rndValidIndices,rndValidIndices_tree=self._randomPrioritizedBatch(batch_size)<if_stmt>(rndValidIndices.size<eq>0)<block_start><raise>SliceError("Could not find a state with full histories")<block_end><block_end><else_stmt><block_start>rndValidIndices=np.zeros(batch_size dtype='int32')<if_stmt>(self._only_full_history)<block_start><for_stmt>i range(batch_size)# TODO: multithread this loop?
<block_start>rndValidIndices[i]=self._randomValidStateIndex(self._max_history_size+self.sticky_action<times>nstep-1)<block_end><block_end><else_stmt><block_start><for_stmt>i range(batch_size)# TODO: multithread this loop?
<block_start>rndValidIndices[i]=self._randomValidStateIndex(minimum_without_terminal=self.sticky_action<times>nstep)<block_end><block_end><block_end>actions=np.zeros((batch_size (nstep)<times>self.sticky_action) dtype=int)<line_sep>rewards=np.zeros((batch_size (nstep)<times>self.sticky_action))<line_sep>terminals=np.zeros((batch_size (nstep)<times>self.sticky_action))<for_stmt>i range(batch_size)<block_start>actions[i]=self._actions.getSlice(rndValidIndices[i]-self.sticky_action<times>nstep+1 rndValidIndices[i]+self.sticky_action)<line_sep>rewards[i]=self._rewards.getSlice(rndValidIndices[i]-self.sticky_action<times>nstep+1 rndValidIndices[i]+self.sticky_action)<line_sep>terminals[i]=self._terminals.getSlice(rndValidIndices[i]-self.sticky_action<times>nstep+1 rndValidIndices[i]+self.sticky_action)<block_end>observations=np.zeros(len(self._batch_dimensions) dtype='object')<line_sep># We calculate the first terminal index backward in time and set it
# at maximum to the value self._max_history_size+self.sticky_action-1
first_terminals=[]<for_stmt>rndValidIndex rndValidIndices<block_start>first_terminal=1<while_stmt>first_terminal<l>self._max_history_size+self.sticky_action<times>nstep-1<block_start><if_stmt>(self._terminals[rndValidIndex-first_terminal]<eq><true><or>first_terminal<g>rndValidIndex)<block_start><break><block_end>first_terminal<augadd>1<block_end>first_terminals.append(first_terminal)<block_end>batch_dimensions=copy.deepcopy(self._batch_dimensions)<for_stmt>input range(len(self._batch_dimensions))<block_start>batch_dimensions[input]=tuple(x+y<for>x,y zip(self._batch_dimensions[input] (self.sticky_action<times>(nstep+1)-1 0 0)))<line_sep>observations[input]=np.zeros((batch_size )+batch_dimensions[input] dtype=self._observations[input].dtype)<for_stmt>i range(batch_size)<block_start>slice=self._observations[input].getSlice(rndValidIndices[i]-self.sticky_action<times>nstep+2-min(self._batch_dimensions[input][0] first_terminals[i]-self.sticky_action<times>nstep+1) rndValidIndices[i]+self.sticky_action+1)<if_stmt>(len(slice)<eq>len(observations[input][i]))<block_start>observations[input][i]=slice<block_end><else_stmt><block_start><for_stmt>j range(len(slice))<block_start>observations[input][i][-j-1]=slice[-j-1]<block_end><block_end># If transition leads to terminal, we don't care about next state
<if_stmt>terminals[i][-1]#rndValidIndices[i] >= self.n_elems - 1 or terminals[i]:
<block_start>observations[input][rndValidIndices[i]:rndValidIndices[i]+self.sticky_action+1]=0<block_end><block_end><block_end><if_stmt>(self._use_priority)<block_start><return>observations actions rewards terminals [rndValidIndices rndValidIndices_tree]<block_end><else_stmt><block_start><return>observations actions rewards terminals rndValidIndices<block_end><block_end><def_stmt>_randomValidStateIndex self minimum_without_terminal<block_start>""" Returns the index corresponding to a timestep that is valid
"""<line_sep>index_lowerBound=minimum_without_terminal-1<line_sep># We try out an index in the acceptable range of the replay memory
index=self._random_state.randint(index_lowerBound self.n_elems-1)<line_sep># Check if slice is valid wrt terminals
# The selected index may correspond to a terminal transition but not
# the previous minimum_without_terminal-1 transition
firstTry=index<line_sep>startWrapped=<false><while_stmt><true><block_start>i=index-1<line_sep>processed=0<for_stmt>_ range(minimum_without_terminal-1)<block_start><if_stmt>(i<l>0<or>self._terminals[i])<block_start><break><line_sep><block_end>i<augsub>1<line_sep>processed<augadd>1<block_end><if_stmt>(processed<l>minimum_without_terminal-1)# if we stopped prematurely, shift slice to the left and try again
<block_start>index=i<if_stmt>(index<l>index_lowerBound)<block_start>startWrapped=<true><line_sep>index=self.n_elems-1<block_end><if_stmt>(startWrapped<and>index<le>firstTry)<block_start><raise>SliceError("Could not find a state with full histories")<block_end><block_end><else_stmt># else index was ok according to terminals
<block_start><return>index<block_end><block_end><block_end><def_stmt>_randomPrioritizedBatch self batch_size<block_start>indices_tree=self._prioritiy_tree.getBatch(batch_size self._random_state self)<line_sep>indices_replay_mem=np.zeros(indices_tree.size dtype='int32')<for_stmt>i range(len(indices_tree))<block_start>indices_replay_mem[i]=int(self._translation_array[indices_tree[i]]-self._actions.getLowerBound())<block_end><return>indices_replay_mem indices_tree<block_end><def_stmt>addSample self obs action reward is_terminal priority<block_start>"""Store the punctual observations, action, reward, is_terminal and priority in the dataset.
Parameters
-----------
obs : ndarray
An ndarray(dtype='object') where obs[s] corresponds to the punctual observation s before the
agent took action [action].
action : int
The action taken after having observed [obs].
reward : float
The reward associated to taking this [action].
is_terminal : bool
Tells whether [action] lead to a terminal state (i.e. corresponded to a terminal transition).
priority : float
The priority to be associated with the sample
"""<line_sep># Store observations
<for_stmt>i range(len(self._batch_dimensions))<block_start>self._observations[i].append(obs[i])<block_end># Update tree and translation table
<if_stmt>(self._use_priority)<block_start>index=self._actions.getIndex()<if_stmt>(index<ge>self._size)<block_start>ub=self._actions.getUpperBound()<line_sep>true_size=self._actions.getTrueSize()<line_sep>tree_ind=index%self._size<if_stmt>(ub<eq>true_size)<block_start>size_extension=true_size-self._size<line_sep># New index
index=self._size-1<line_sep>tree_ind=-1<line_sep># Shift translation array
self._translation_array<augsub>size_extension+1<block_end>tree_ind=np.where(self._translation_array<eq>tree_ind)[0][0]<block_end><else_stmt><block_start>tree_ind=index<block_end>self._prioritiy_tree.update(tree_ind)<line_sep>self._translation_array[tree_ind]=index<block_end># Store rest of sample
self._actions.append(action)<line_sep>self._rewards.append(reward)<line_sep>self._terminals.append(is_terminal)<if_stmt>(self.n_elems<l>self._size)<block_start>self.n_elems<augadd>1<block_end><block_end><block_end><class_stmt>CircularBuffer(object)<block_start><def_stmt>__init__ self size elemShape=() extension=0.1 dtype="float32"<block_start>self._size=size<line_sep>self._data=np.zeros((int(size+extension<times>size) )+elemShape dtype=dtype)<line_sep>self._trueSize=self._data.shape[0]<line_sep>self._lb=0<line_sep>self._ub=size<line_sep>self._cur=0<line_sep>self.dtype=dtype<block_end><def_stmt>append self obj<block_start><if_stmt>self._cur<g>self._size#> instead of >=
<block_start>self._lb<augadd>1<line_sep>self._ub<augadd>1<block_end><if_stmt>self._ub<ge>self._trueSize# Rolling array without copying whole array (for memory constraints)
# basic command: self._data[0:self._size-1] = self._data[self._lb:] OR NEW self._data[0:self._size] = self._data[self._lb-1:]
<block_start>n_splits=10<for_stmt>i range(n_splits)<block_start>self._data[i<times>(self._size)<floordiv>n_splits:(i+1)<times>(self._size)<floordiv>n_splits]=self._data[(self._lb-1)+i<times>(self._size)<floordiv>n_splits:(self._lb-1)+(i+1)<times>(self._size)<floordiv>n_splits]<block_end>self._lb=0<line_sep>self._ub=self._size<line_sep>self._cur=self._size<block_end>#OLD self._size - 1
self._data[self._cur]=obj<line_sep>self._cur<augadd>1<block_end><def_stmt>__getitem__ self i<block_start><return>self._data[self._lb+i]<block_end><def_stmt>getSliceBySeq self seq<block_start><return>self._data[seq+self._lb]<block_end><def_stmt>getSlice self start end=sys.maxsize<block_start><if_stmt>end<eq>sys.maxsize<block_start><return>self._data[self._lb+start:self._cur]<block_end><else_stmt><block_start><return>self._data[self._lb+start:self._lb+end]<block_end><block_end><def_stmt>getLowerBound self<block_start><return>self._lb<block_end><def_stmt>getUpperBound self<block_start><return>self._ub<block_end><def_stmt>getIndex self<block_start><return>self._cur<block_end><def_stmt>getTrueSize self<block_start><return>self._trueSize<block_end><block_end><class_stmt>SliceError(LookupError)<block_start>"""Exception raised for errors when getting slices from CircularBuffers.
Attributes:
expr -- input expression in which the error occurred
msg -- explanation of the error
"""<def_stmt>__init__ self value<block_start>self.value=value<block_end><def_stmt>__str__ self<block_start><return>repr(self.value)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><pass><block_end>
|
<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>re<import_stmt>subprocess<import_from_stmt>pathlib Path<import_from_stmt>flask g make_response request url_for<import_from_stmt>Pegasus.db connection<import_from_stmt>Pegasus.db.ensembles EMError Ensembles EnsembleWorkflowStates Triggers TriggerType <import_from_stmt>Pegasus.service.ensembles api emapp<import_from_stmt>Pegasus.service.lifecycle authenticate<line_sep>log=logging.getLogger(__name__)<def_stmt>connect <block_start>log.debug("Connecting to database")<line_sep>g.master_db_url=g.user.get_master_db_url()<line_sep>g.session=connection.connect(g.master_db_url connect_args={"check_same_thread":<false>})<block_end><def_stmt>disconnect <block_start><if_stmt>"conn"<in>g<block_start>log.debug("Disconnecting from database")<line_sep>g.session.close()<block_end><block_end>@emapp.errorhandler(Exception)<def_stmt>handle_error e<block_start><return>api.json_api_error(e)<block_end>emapp.before_request(authenticate)<line_sep>emapp.before_request(connect)<line_sep>@emapp.teardown_request<def_stmt>teardown_request exception<block_start>disconnect()<block_end>@emapp.route("/ensembles" methods=["GET"])<def_stmt>route_list_ensembles <block_start>dao=Ensembles(g.session)<line_sep>ensembles=dao.list_ensembles(g.user.username)<line_sep>result=[e.get_object()<for>e ensembles]<line_sep><return>api.json_response(result)<block_end>@emapp.route("/ensembles" methods=["POST"])<def_stmt>route_create_ensemble <block_start>name=request.form.get("name" <none>)<if_stmt>name<is><none><block_start><raise>EMError("Specify ensemble name")<block_end>max_running=request.form.get("max_running" 1)<line_sep>max_planning=request.form.get("max_planning" 1)<line_sep>dao=Ensembles(g.session)<line_sep>dao.create_ensemble(g.user.username name max_running max_planning)<line_sep>g.session.commit()<line_sep><return>api.json_created(url_for("route_get_ensemble" name=name _external=<true>))<block_end>@emapp.route("/ensembles/<string:name>" methods=["GET"])<def_stmt>route_get_ensemble name<block_start>dao=Ensembles(g.session)<line_sep>e=dao.get_ensemble(g.user.username name)<line_sep>result=e.get_object()<line_sep><return>api.json_response(result)<block_end>@emapp.route("/ensembles/<string:name>" methods=["PUT" "POST"])<def_stmt>route_update_ensemble name<block_start>dao=Ensembles(g.session)<line_sep>e=dao.get_ensemble(g.user.username name)<line_sep>max_running=request.form.get("max_running" <none>)<if_stmt>max_running<is><not><none><block_start>e.set_max_running(max_running)<block_end>max_planning=request.form.get("max_planning" <none>)<if_stmt>max_planning<is><not><none><block_start>e.set_max_planning(max_planning)<block_end>state=request.form.get("state" <none>)<if_stmt>state<is><not><none><block_start><if_stmt>state<ne>e.state# TODO Do the necessary state transition
<block_start>e.set_state(state)<block_end><block_end>e.set_updated()<line_sep>g.session.commit()<line_sep><return>api.json_response(e.get_object())<block_end>@emapp.route("/ensembles/<string:name>/workflows" methods=["GET"])<def_stmt>route_list_ensemble_workflows name<block_start>dao=Ensembles(g.session)<line_sep>e=dao.get_ensemble(g.user.username name)<line_sep>result=[w.get_object()<for>w dao.list_ensemble_workflows(e.id)]<line_sep><return>api.json_response(result)<block_end>@emapp.route("/ensembles/<string:ensemble>/workflows" methods=["POST"])<def_stmt>route_create_ensemble_workflow ensemble<block_start>dao=Ensembles(g.session)<line_sep>e=dao.get_ensemble(g.user.username ensemble)<line_sep>name=request.form.get("name" <none>)<if_stmt>name<is><none><block_start><raise>EMError("Specify ensemble workflow 'name'")<block_end>priority=request.form.get("priority" 0)<line_sep>basedir=request.form.get("basedir")<if_stmt>basedir<is><none><block_start><raise>EMError("Specify 'basedir' where plan command should be executed")<block_end>plan_command=request.form.get("plan_command")<if_stmt>plan_command<is><none><block_start><raise>EMError("Specify 'plan_command' that should be executed to plan workflow")<block_end>dao.create_ensemble_workflow(e.id name basedir priority plan_command)<line_sep>g.session.commit()<line_sep><return>api.json_created(url_for("route_get_ensemble_workflow" ensemble=ensemble workflow=name))<block_end>@emapp.route("/ensembles/<string:ensemble>/workflows/<string:workflow>" methods=["GET"])<def_stmt>route_get_ensemble_workflow ensemble workflow<block_start>dao=Ensembles(g.session)<line_sep>e=dao.get_ensemble(g.user.username ensemble)<line_sep>w=dao.get_ensemble_workflow(e.id workflow)<line_sep>result=w.get_detail_object()<line_sep><return>api.json_response(result)<block_end>@emapp.route("/ensembles/<string:ensemble>/workflows/<string:workflow>" methods=["PUT" "POST"])<def_stmt>route_update_ensemble_workflow ensemble workflow<block_start>dao=Ensembles(g.session)<line_sep>e=dao.get_ensemble(g.user.username ensemble)<line_sep>w=dao.get_ensemble_workflow(e.id workflow)<line_sep>priority=request.form.get("priority" <none>)<if_stmt>priority<is><not><none><block_start>w.set_priority(priority)<block_end>state=request.form.get("state" <none>)<if_stmt>state<is><not><none><block_start>w.change_state(state)<block_end>w.set_updated()<line_sep>g.session.commit()<line_sep><return>api.json_response(w.get_detail_object())<block_end>@emapp.route("/ensembles/<string:ensemble>/workflows/<string:workflow>/analyze" methods=["GET"])<def_stmt>route_analyze_ensemble_workflow ensemble workflow<block_start>dao=Ensembles(g.session)<line_sep>e=dao.get_ensemble(g.user.username ensemble)<line_sep>w=dao.get_ensemble_workflow(e.id workflow)<line_sep>report="".join(analyze(w))<line_sep>resp=make_response(report 200)<line_sep>resp.headers["Content-Type"]="text/plain"<line_sep><return>resp<block_end><def_stmt>analyze workflow<block_start>w=workflow<line_sep><yield>"Workflow state is %s\n"%w.state<line_sep><yield>"Plan command is: %s\n"%w.plan_command<line_sep>logfile=w.get_logfile()<if_stmt>os.path.isfile(logfile)<block_start><yield>"Workflow log:\n"<for_stmt>l open(w.get_logfile() "rb")<block_start><yield>"LOG: %s"%l.decode()<block_end><block_end><else_stmt><block_start><yield>"No workflow log available\n"<block_end><if_stmt>w.submitdir<is><none><or><not>os.path.isdir(w.submitdir)<block_start><yield>"No submit directory available\n"<block_end><else_stmt><block_start><yield>"pegasus-analyzer output is:\n"<line_sep>p=subprocess.Popen(["pegasus-analyzer" w.submitdir] stdout=subprocess.PIPE stderr=subprocess.STDOUT )<line_sep>out,err=p.communicate()<line_sep>out=out.decode()<for_stmt>l out.split("\n")<block_start><yield>"ANALYZER: %s\n"%l<block_end>rc=p.wait()<line_sep><yield>"ANALYZER: Exited with code %d\n"%rc<block_end><if_stmt>w.state<eq>EnsembleWorkflowStates.PLAN_FAILED<block_start><yield>"Planner failure detected\n"<block_end><elif_stmt>w.state<eq>EnsembleWorkflowStates.RUN_FAILED<block_start><yield>"pegasus-run failure detected\n"<block_end><elif_stmt>w.state<eq>EnsembleWorkflowStates.FAILED<block_start><yield>"Workflow failure detected\n"<block_end><block_end># --- trigger related routes ---------------------------------------------------
@emapp.route("/ensembles/<string:ensemble>/triggers" methods=["GET"])<def_stmt>route_list_triggers ensemble<block_start>dao=Triggers(g.session)<line_sep>triggers=dao.list_triggers_by_ensemble(g.user.username ensemble)<line_sep><return>api.json_response([Triggers.get_object(t)<for>t triggers])<block_end>@emapp.route("/ensembles/<string:ensemble>/triggers/<string:trigger>" methods=["GET"])<def_stmt>route_get_trigger ensemble trigger<block_start><raise>NotImplementedError("TODO")<block_end># TODO: checks for correct data should be done here on the backend
"""
# error response format
{
id: "id", <-- unique id to a request, it has been added as request.uid (use this when logging)
code: "UNPROCESSABLE_ENTITY", <-- capitalized versions of errors that json schema would return
"message": "Err description",
"errors": [
{
code: "MIN_LEN_ERR",
message": "Err description",
path: [ field_name ],
},
..
],
"warnings": [ .. ]
}
"""<line_sep>@emapp.route("/ensembles/<string:ensemble>/triggers/cron" methods=["POST"])<def_stmt>route_create_cron_trigger ensemble# verify that ensemble exists for user
<block_start>e_dao=Ensembles(g.session)<line_sep># raises EMError code 404 if does not exist
ensemble_id=e_dao.get_ensemble(g.user.username ensemble).id<line_sep># validate trigger
trigger=request.form.get("trigger" type=str)<if_stmt><not>trigger<or>len(trigger)<eq>0<block_start><raise>EMError("trigger name must be a non-empty string")<block_end># validate workflow_script
workflow_script=request.form.get("workflow_script" type=str)<if_stmt><not>workflow_script<or>len(workflow_script)<eq>0<block_start><raise>EMError("workflow_script name must be a non-empty string")<block_end><if_stmt><not>Path(workflow_script).is_absolute()<block_start><raise>EMError("workflow_script must be given as an absolute path")<block_end># validate workflow_args
can_decode=<true><try_stmt><block_start>workflow_args=json.loads(request.form.get("workflow_args"))<block_end><except_stmt>json.JSONDecodeError<block_start>can_decode=<false><block_end><if_stmt><not>can_decode<or><not>isinstance(workflow_args list)<block_start><raise>EMError("workflow_args must be given as a list serialized to json")<block_end># validate interval
<try_stmt><block_start>interval=to_seconds(request.form.get("interval" type=str))<block_end><except_stmt>ValueError<block_start><raise>EMError("interval must be given as `<int> <s|m|h|d>` and be greater than 0 seconds")<block_end># validate timeout
<try_stmt><block_start>timeout=request.form.get("timeout" type=str default=<none>)<if_stmt>timeout<is><not><none><block_start>timeout=to_seconds(timeout)<block_end><block_end><except_stmt>ValueError<block_start><raise>EMError("timeout must be given as `<int> <s|m|h|d>` and be greater than 0 seconds")<block_end>kwargs={"ensemble_id":ensemble_id "trigger":trigger "trigger_type":TriggerType.CRON.value "workflow_script":workflow_script "workflow_args":workflow_args "interval":interval "timeout":timeout }<line_sep># create trigger entry in db
t_dao=Triggers(g.session)<line_sep>t_dao.insert_trigger(**kwargs)<line_sep># return response success
<return>api.json_created(url_for("route_get_trigger" ensemble=ensemble trigger=trigger))<block_end>@emapp.route("/ensembles/<string:ensemble>/triggers/file_pattern" methods=["POST"])<def_stmt>route_create_file_pattern_trigger ensemble# verify that ensemble exists for user
<block_start>e_dao=Ensembles(g.session)<line_sep># raises EMError code 404 if does not exist
ensemble_id=e_dao.get_ensemble(g.user.username ensemble).id<line_sep># validate trigger
trigger=request.form.get("trigger" type=str)<if_stmt><not>trigger<or>len(trigger)<eq>0<block_start><raise>EMError("trigger name must be a non-empty string")<block_end># validate workflow_script
workflow_script=request.form.get("workflow_script" type=str)<if_stmt><not>workflow_script<or>len(workflow_script)<eq>0<block_start><raise>EMError("workflow_script name must be a non-empty string")<block_end><if_stmt><not>Path(workflow_script).is_absolute()<block_start><raise>EMError("workflow_script must be given as an absolute path")<block_end># validate workflow_args
can_decode=<true><try_stmt><block_start>workflow_args=json.loads(request.form.get("workflow_args"))<block_end><except_stmt>json.JSONDecodeError<block_start>can_decode=<false><block_end><if_stmt><not>can_decode<or><not>isinstance(workflow_args list)<block_start><raise>EMError("workflow_args must be given as a list serialized to json")<block_end># validate interval
<try_stmt><block_start>interval=to_seconds(request.form.get("interval" type=str))<block_end><except_stmt>ValueError<block_start><raise>EMError("interval must be given as `<int> <s|m|h|d>` and be greater than 0 seconds")<block_end># validate timeout
<try_stmt><block_start>timeout=request.form.get("timeout" type=str default=<none>)<if_stmt>timeout<is><not><none><block_start>timeout=to_seconds(timeout)<block_end><block_end><except_stmt>ValueError<block_start><raise>EMError("timeout must be given as `<int> <s|m|h|d>` and be greater than 0 seconds")<block_end># validate file_patterns
can_decode=<true><try_stmt><block_start>file_patterns=json.loads(request.form.get("file_patterns"))<block_end><except_stmt>json.JSONDecodeError<block_start>can_decode=<false><block_end><if_stmt><not>can_decode<or><not>isinstance(file_patterns list)<block_start><raise>EMError("file_patterns must be given as a list serialized to json")<block_end><if_stmt>len(file_patterns)<l>1<block_start><raise>EMError("file_patterns must contain at least one file pattern")<block_end><for_stmt>fp file_patterns<block_start><if_stmt><not>Path(fp).is_absolute()<block_start><raise>EMError("each file pattern must be given as an absolute path (e.g. '/inputs/*.txt")<block_end><block_end>kwargs={"ensemble_id":ensemble_id "trigger":trigger "trigger_type":TriggerType.FILE_PATTERN.value "workflow_script":workflow_script "workflow_args":workflow_args "interval":interval "timeout":timeout "file_patterns":file_patterns }<line_sep># create trigger entry in db
t_dao=Triggers(g.session)<line_sep>t_dao.insert_trigger(**kwargs)<line_sep># return response success
<return>api.json_created(url_for("route_get_trigger" ensemble=ensemble trigger=trigger))<block_end>@emapp.route("/ensembles/<string:ensemble>/triggers/<string:trigger>" methods=["DELETE"])<def_stmt>route_delete_trigger ensemble trigger# verify that ensemble exists for user
<block_start>e_dao=Ensembles(g.session)<line_sep># raises EMError code 404 if does not exist
ensemble_id=e_dao.get_ensemble(g.user.username ensemble).id<line_sep># update trigger state to be STOPPED so that the TriggerManager can
# handle it appropriately
t_dao=Triggers(g.session)<line_sep># make sure get_trigger raises 404 if nothing found
trigger_id=t_dao.get_trigger(ensemble_id trigger)._id<line_sep>t_dao.update_state(ensemble_id trigger_id "STOPPED")<line_sep><return>api.json_response({"message":"ensemble: {}, trigger: {} marked for deletion".format(ensemble trigger)} status_code=202 )<block_end><def_stmt>to_seconds value:str<arrow>int<block_start>"""Convert time unit given as '<int> <s|m|h|d>` to seconds.
:param value: input str
:type value: str
:raises ValueError: value must be given as '<int> <s|m|h|d>
:raises ValueError: value must be > 0s
:return: value given in seconds
:rtype: int
"""<line_sep>value=value.strip()<line_sep>pattern=re.compile(r"\d+ *[sSmMhHdD]")<if_stmt><not>pattern.fullmatch(value)<block_start><raise>ValueError("invalid interval: {}, interval must be given as '<int> <s|m|h|d>'".format(value))<block_end>num=int(value[0:len(value)-1])<line_sep>unit=value[-1].lower()<line_sep>as_seconds={"s":1 "m":60 "h":60<times>60 "d":60<times>60<times>24}<line_sep>result=as_seconds[unit]<times>num<if_stmt>result<le>0<block_start><raise>ValueError("invalid interval: {}, interval must be greater than 0 seconds".format(result))<block_end><return>result<block_end>
|
########################################################################
###
### Read out APEs from .db files and convert them to trees
### that can be read by the APE validation plot tools.
###
### Intended to provide a straightforward comparison of
### measured APE values to values stored in .db files
###
########################################################################
###
### HOW TO USE:
### 1. Run the default setup procedure for the APE
### tool (including creation of a TrackerTree)
### 2. Configure the apeTreeCreateDefault tool below
### and run it with cmsRun
### 3. Use output file in validation, for example in
### macros/commandsDrawComparison.C
###
########################################################################
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>Alignment.APEEstimation.SectorBuilder_cff *<import_stmt>os<line_sep>##
## User options
##
# Run number to use for data in case one uses a multi-IOV object
theFirstRun=1<line_sep># Which global tag to use
theGlobalTag='auto:phase1_2017_realistic'<line_sep># Source from which to get the APE object
theSource='frontier://FrontierProd/CMS_CONDITIONS'<line_sep># Tag to extract the APE object
theTag='TrackerAlignmentExtendedErrors_Upgrade2017_pseudoAsymptotic_v3'<line_sep># Name and path of output File
theOutputFile='defaultAPE.root'<line_sep># Sector definitions, RecentSectors is the typical granularity
theSectors=RecentSectors<line_sep>##
## Process definition
##
process=cms.Process("ApeTreeCreateDefault")<line_sep>##
## Message Logger
##
process.load("FWCore.MessageService.MessageLogger_cfi")<line_sep>process.MessageLogger.DefaultAPETree=dict()<line_sep>process.MessageLogger.SectorBuilder=dict()<line_sep>process.MessageLogger.cerr.INFO.limit=0<line_sep>process.MessageLogger.cerr.default.limit=0<line_sep>process.MessageLogger.cerr.DefaultAPETree=cms.untracked.PSet(limit=cms.untracked.int32(-1))<line_sep>process.MessageLogger.cerr.SectorBuilder=cms.untracked.PSet(limit=cms.untracked.int32(-1))<line_sep>##
## Process options
##
process.options=cms.untracked.PSet(wantSummary=cms.untracked.bool(<true>) )<line_sep>##
## Input Files
##
process.source=cms.Source("EmptySource" firstRun=cms.untracked.uint32(theFirstRun))<line_sep>##
## Number of Events
##
process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1))<line_sep>### Load desired default APEs
process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")<import_from_stmt>Configuration.AlCa.GlobalTag GlobalTag<line_sep>process.GlobalTag=GlobalTag(process.GlobalTag theGlobalTag '')<import_from_stmt>CondCore.CondDB.CondDB_cfi *<line_sep>CondDBAlignmentError=CondDB.clone(connect=cms.string(theSource))<line_sep>process.myTrackerAlignmentErr=cms.ESSource("PoolDBESSource" CondDBAlignmentError timetype=cms.string("runnumber") toGet=cms.VPSet(cms.PSet(record=cms.string('TrackerAlignmentErrorExtendedRcd') tag=cms.string(theTag))))<line_sep>process.es_prefer_trackerAlignmentErr=cms.ESPrefer("PoolDBESSource" "myTrackerAlignmentErr")<line_sep>##
## Define Sequence
##
process.ApeTreeCreateDefaultSequence=cms.Sequence()<line_sep>process.ApeTreeCreateDefault=cms.EDAnalyzer('ApeTreeCreateDefault' resultFile=cms.string(theOutputFile) trackerTreeFile=cms.string(os.environ['CMSSW_BASE']+'/src/Alignment/TrackerAlignment/hists/TrackerTree.root') sectors=theSectors )<line_sep>process.ApeTreeCreateDefaultSequence<augmul>process.ApeTreeCreateDefault<line_sep>##
## Path
##
process.p=cms.Path(process.ApeTreeCreateDefaultSequence)<line_sep>
|
# coding: utf-8
<import_from_future_stmt> absolute_import<import_from_stmt>datetime date datetime# noqa: F401
<import_from_stmt>typing List Dict# noqa: F401
<import_from_stmt>openapi_server.models.base_model_ Model<import_from_stmt>openapi_server util<class_stmt>UploadForm(Model)<block_start>"""NOTE: This class is auto generated by OpenAPI Generator (https://openapi-generator.tech).
Do not edit the class manually.
"""<def_stmt>__init__ self additional_metadata=<none> file=<none># noqa: E501
<block_start>"""UploadForm - a model defined in OpenAPI
:param additional_metadata: The additional_metadata of this UploadForm. # noqa: E501
:type additional_metadata: str
:param file: The file of this UploadForm. # noqa: E501
:type file: file
"""<line_sep>self.openapi_types={'additional_metadata':str 'file':file}<line_sep>self.attribute_map={'additional_metadata':'additionalMetadata' 'file':'file'}<line_sep>self._additional_metadata=additional_metadata<line_sep>self._file=file<block_end>@classmethod<def_stmt>from_dict cls dikt<block_start>"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The UploadForm of this UploadForm. # noqa: E501
:rtype: UploadForm
"""<line_sep><return>util.deserialize_model(dikt cls)<block_end>@property<def_stmt>additional_metadata self<block_start>"""Gets the additional_metadata of this UploadForm.
Additional data to pass to server # noqa: E501
:return: The additional_metadata of this UploadForm.
:rtype: str
"""<line_sep><return>self._additional_metadata<block_end>@additional_metadata.setter<def_stmt>additional_metadata self additional_metadata<block_start>"""Sets the additional_metadata of this UploadForm.
Additional data to pass to server # noqa: E501
:param additional_metadata: The additional_metadata of this UploadForm.
:type additional_metadata: str
"""<line_sep>self._additional_metadata=additional_metadata<block_end>@property<def_stmt>file self<block_start>"""Gets the file of this UploadForm.
file to upload # noqa: E501
:return: The file of this UploadForm.
:rtype: file
"""<line_sep><return>self._file<block_end>@file.setter<def_stmt>file self file<block_start>"""Sets the file of this UploadForm.
file to upload # noqa: E501
:param file: The file of this UploadForm.
:type file: file
"""<if_stmt>file<is><none><block_start><raise>ValueError("Invalid value for `file`, must not be `None`")<block_end># noqa: E501
self._file=file<block_end><block_end>
|
r"""
Synthetic seismograms using the convolutional model
---------------------------------------------------
The simplest way to get a seismogram (in time x offset) is through the
convolutional model
.. math::
trace(t) = wavelet(t) \ast reflectivity(t)
Module :mod:`fatiando.seismic.conv` defines functions for doing this
convolution, calculating the required reflectivity, and converting from depth a
model into time.
"""<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>fatiando.seismic conv<import_from_stmt>fatiando.vis mpl<line_sep># Define the parameters of our depth model
n_samples,n_traces=[600 100]<line_sep>velocity=1500<times>np.ones((n_samples n_traces))<line_sep># We'll put two interfaces in depth
velocity[150: :]=2000<line_sep>velocity[400: :]=3500<line_sep>dt=2e-3<line_sep># We need to convert the depth model we made above into time
vel_l=conv.depth_2_time(velocity velocity dt=dt dz=1)<line_sep># and we'll assume the density is homogeneous
rho_l=2200<times>np.ones(np.shape(vel_l))<line_sep># With that, we can calculate the reflectivity model in time
rc=conv.reflectivity(vel_l rho_l)<line_sep># and finally perform our convolution
synt=conv.convolutional_model(rc 30 conv.rickerwave dt=dt)<line_sep># We can use the utility function in fatiando.vis.mpl to plot the seismogram
fig,axes=plt.subplots(1 2 figsize=(8 5))<line_sep>ax=axes[0]<line_sep>ax.set_title("Velocity model (in depth)")<line_sep>tmp=ax.imshow(velocity extent=[0 n_traces n_samples 0] cmap="copper" aspect='auto' origin='upper')<line_sep>fig.colorbar(tmp ax=ax pad=0 aspect=50)<line_sep>ax.set_xlabel('Trace')<line_sep>ax.set_ylabel('Depth (m)')<line_sep>ax=axes[1]<line_sep>ax.set_title("Synthetic seismogram")<line_sep>mpl.seismic_wiggle(synt[: ::20] dt scale=1)<line_sep>mpl.seismic_image(synt dt cmap="RdBu_r" aspect='auto')<line_sep>ax.set_xlabel('Trace')<line_sep>ax.set_ylabel('Time (s)')<line_sep>plt.tight_layout()<line_sep>plt.show()<line_sep>
|
<import_from_stmt>nose.tools eq_<import_from_stmt>tests get_url<import_from_stmt>tests.simple_page SimplePageTest<class_stmt>TestClick(SimplePageTest)<block_start><def_stmt>test_link self<block_start>eq_(self.page.icon_link.get_href() get_url('icon.gif'))<block_end><block_end>
|
<import_stmt>os<import_stmt>pyboolector<import_from_stmt>pyboolector Boolector BoolectorException<if_stmt>__name__<eq>"__main__"<block_start><try_stmt># Create Boolector instance
<block_start>btor=Boolector()<line_sep># Enable model generation
btor.Set_opt(pyboolector.BTOR_OPT_MODEL_GEN <true>)<line_sep># Create bit-vector sort of size 8
bvsort8=btor.BitVecSort(8)<line_sep># Create expressions
x=btor.Var(bvsort8 "x")<line_sep>y=btor.Var(bvsort8 "y")<line_sep>zero=btor.Const(0 8)<line_sep>hundred=btor.Const(100 8)<line_sep># 0 < x
ult_x=btor.Ult(zero x)<line_sep>btor.Assert(ult_x)<line_sep># x <= 100
ulte_x=btor.Ulte(x hundred)<line_sep>btor.Assert(ulte_x)<line_sep># 0 < y
ult_y=btor.Ult(zero y)<line_sep>btor.Assert(ult_y)<line_sep># y <= 100
ulte_y=btor.Ulte(y hundred)<line_sep>btor.Assert(ulte_y)<line_sep># x * y
mul=btor.Mul(x y)<line_sep># x * y < 100
ult=btor.Ult(mul hundred)<line_sep>btor.Assert(ult)<line_sep>umulo=btor.Umulo(x y)<line_sep>numulo=btor.Not(umulo)# prevent overflow
btor.Assert(numulo)<line_sep>res=btor.Sat()<line_sep>print("Expect: sat")<line_sep>print("Boolector: " end='')<if_stmt>res<eq>btor.SAT<block_start>print("sat")<block_end><elif_stmt>res<eq>btor.UNSAT<block_start>print("unsat")<block_end><else_stmt><block_start>print("unknown")<block_end>print("")<line_sep># prints "x: 00000100"
print("assignment of {}: {}".format(x.symbol x.assignment))<line_sep># prints: "y: 00010101"
print("assignment of {}: {}".format(y.symbol y.assignment))<line_sep>print("")<line_sep>print("Print model in BTOR format:")<line_sep>btor.Print_model("btor")<line_sep>print("")<line_sep>print("Print model in SMT-LIBv2 format:")<line_sep>btor.Print_model("smt2")<line_sep>print("")<block_end><except_stmt>BoolectorException<as>e<block_start>print("Caught exception: "+str(e))<block_end><block_end>
|
<import_stmt>torch<import_stmt>pytest<def_stmt>test_saveable_dataloader tmpdir device<block_start><import_from_stmt>speechbrain.dataio.dataloader SaveableDataLoader<line_sep>save_file=tmpdir+"/dataloader.ckpt"<line_sep>dataset=torch.randn(10 1 device=device)<line_sep>dataloader=SaveableDataLoader(dataset collate_fn=<none>)<line_sep>data_iterator=iter(dataloader)<line_sep>first_item=next(data_iterator)<assert_stmt>first_item<eq>dataset[0]<line_sep># Save here:
dataloader._speechbrain_save(save_file)<line_sep>second_item=next(data_iterator)<assert_stmt>second_item<eq>dataset[1]<line_sep># Now make a new dataloader and recover:
new_dataloader=SaveableDataLoader(dataset collate_fn=<none>)<line_sep>new_dataloader._speechbrain_load(save_file end_of_epoch=<false> device=<none>)<line_sep>new_data_iterator=iter(new_dataloader)<line_sep>second_second_item=next(new_data_iterator)<assert_stmt>second_second_item<eq>second_item<block_end><def_stmt>test_saveable_dataloader_multiprocess tmpdir# Same test as above, but with multiprocess dataloading
<block_start><import_from_stmt>speechbrain.dataio.dataloader SaveableDataLoader<line_sep>save_file=tmpdir+"/dataloader.ckpt"<line_sep>dataset=torch.randn(10 1)<for_stmt>num_parallel [1 2 3 4]<block_start>dataloader=SaveableDataLoader(dataset num_workers=num_parallel collate_fn=<none>)<line_sep># Note num_workers
data_iterator=iter(dataloader)<line_sep>first_item=next(data_iterator)<assert_stmt>first_item<eq>dataset[0]<line_sep># Save here, note that this overwrites.
dataloader._speechbrain_save(save_file)<line_sep>second_item=next(data_iterator)<assert_stmt>second_item<eq>dataset[1]<line_sep># Cleanup needed for MacOS (open file limit)
<del_stmt>data_iterator<del_stmt>dataloader<line_sep># Now make a new dataloader and recover:
new_dataloader=SaveableDataLoader(dataset num_workers=num_parallel collate_fn=<none>)<line_sep>new_dataloader._speechbrain_load(save_file end_of_epoch=<false> device=<none>)<line_sep>new_data_iterator=iter(new_dataloader)<line_sep>second_second_item=next(new_data_iterator)<assert_stmt>second_second_item<eq>second_item<del_stmt>new_data_iterator<del_stmt>new_dataloader<block_end><block_end><def_stmt>test_looped_loader tmpdir# Tests that LoopedLoader will raise StopIteration appropriately
# And that it can recover and keep the place.
<block_start><import_from_stmt>speechbrain.dataio.dataloader LoopedLoader<line_sep>save_file=tmpdir+"/loopedloader.ckpt"<line_sep>data=range(3)<line_sep>dataloader=LoopedLoader(data epoch_length=2)<line_sep>data_iterator=iter(dataloader)<assert_stmt>next(data_iterator)<eq>0<line_sep># Save here, 1 to go:
dataloader.save(save_file)<assert_stmt>next(data_iterator)<eq>1<with_stmt>pytest.raises(StopIteration)<block_start>next(data_iterator)<block_end># And it can be continued past the range:
<assert_stmt>next(data_iterator)<eq>2<assert_stmt>next(data_iterator)<eq>0<line_sep># And again it raises:
<with_stmt>pytest.raises(StopIteration)<block_start>next(data_iterator)<block_end># Now make a new dataloader and recover:
new_dataloader=LoopedLoader(data epoch_length=2)<line_sep>new_dataloader.load(save_file end_of_epoch=<false> device=<none>)<line_sep>new_data_iterator=iter(new_dataloader)<line_sep>next(new_data_iterator)<with_stmt>pytest.raises(StopIteration)<block_start>next(new_data_iterator)<block_end><block_end>
|
<import_from_stmt>flask request abort jsonify g<import_from_stmt>.. db<import_from_stmt>..auth token_auth token_optional_auth<import_from_stmt>..models Message<import_from_stmt>..utils timestamp url_for<import_from_stmt>..tasks async_task<import_from_stmt>. api<line_sep>@api.route('/messages' methods=['POST'])@token_auth.login_required@async_task<def_stmt>new_message <block_start>"""
Post a new message.
This endpoint is requires a valid user token.
"""<line_sep>msg=Message.create(request.get_json()<or>{})<line_sep>db.session.add(msg)<line_sep>db.session.commit()<line_sep>r=jsonify(msg.to_dict())<line_sep>r.status_code=201<line_sep>r.headers['Location']=url_for('api.get_message' id=msg.id)<line_sep><return>r<block_end>@api.route('/messages' methods=['GET'])@token_optional_auth.login_required<def_stmt>get_messages <block_start>"""
Return list of messages.
This endpoint is publicly available, but if the client has a token it
should send it, as that indicates to the server that the user is online.
"""<line_sep>since=int(request.args.get('updated_since' '0'))<line_sep>day_ago=timestamp()-24<times>60<times>60<if_stmt>since<l>day_ago# do not return more than a day worth of messages
<block_start>since=day_ago<block_end>msgs=Message.query.filter(Message.updated_at<g>since).order_by(Message.updated_at)<line_sep><return>jsonify({'messages':[msg.to_dict()<for>msg msgs.all()]})<block_end>@api.route('/messages/<id>' methods=['GET'])@token_optional_auth.login_required<def_stmt>get_message id<block_start>"""
Return a message.
This endpoint is publicly available, but if the client has a token it
should send it, as that indicates to the server that the user is online.
"""<line_sep><return>jsonify(Message.query.get_or_404(id).to_dict())<block_end>@api.route('/messages/<id>' methods=['PUT'])@token_auth.login_required@async_task<def_stmt>edit_message id<block_start>"""
Modify an existing message.
This endpoint is requires a valid user token.
Note: users are only allowed to modify their own messages.
"""<line_sep>msg=Message.query.get_or_404(id)<if_stmt>msg.user<ne>g.current_user<block_start>abort(403)<block_end>msg.from_dict(request.get_json()<or>{})<line_sep>db.session.add(msg)<line_sep>db.session.commit()<line_sep><return>'' 204<block_end>
|
# stdlib
<import_stmt>os<import_stmt>unittest<line_sep># 3p
<import_stmt>mock<line_sep># project
<import_from_stmt>checks.check_status AgentStatus<class_stmt>TestRunFiles(unittest.TestCase)<block_start>""" Tests that runfiles (.pid, .sock, .pickle etc.) are written to internal agent folders"""<line_sep># Mac run directory expected location
_my_dir=os.path.dirname(os.path.abspath(__file__))<line_sep>_mac_run_dir='/'.join(_my_dir.split('/')[:-4])<or>'/'<line_sep>_linux_run_dir='/opt/datadog-agent/run'<line_sep>@mock.patch('os.path.isdir' return_value=<true>)@mock.patch('checks.check_status._windows_commondata_path' return_value="C:\Windows\App Data")@mock.patch('utils.platform.Platform.is_win32' return_value=<true>)<def_stmt>test_agent_status_pickle_file_win32 self *mocks<block_start>''' Test pickle file location on win32 '''<line_sep>expected_path=os.path.join('C:\Windows\App Data' 'Datadog' 'AgentStatus.pickle')<line_sep># check AgentStatus pickle created
self.assertEqual(AgentStatus._get_pickle_path() expected_path)<block_end>@mock.patch('utils.pidfile.PidFile.get_dir' return_value=_mac_run_dir)@mock.patch('utils.platform.Platform.is_win32' return_value=<false>)@mock.patch('utils.platform.Platform.is_mac' return_value=<true>)<def_stmt>test_agent_status_pickle_file_mac_dmg self *mocks<block_start>''' Test pickle file location when running a Mac DMG install '''<line_sep>expected_path=os.path.join(self._mac_run_dir 'AgentStatus.pickle')<line_sep>self.assertEqual(AgentStatus._get_pickle_path() expected_path)<block_end>@mock.patch('utils.pidfile.tempfile.gettempdir' return_value='/a/test/tmp/dir')@mock.patch('utils.pidfile.PidFile.get_dir' return_value='')@mock.patch('utils.platform.Platform.is_win32' return_value=<false>)@mock.patch('utils.platform.Platform.is_mac' return_value=<true>)<def_stmt>test_agent_status_pickle_file_mac_source self *mocks<block_start>''' Test pickle file location when running a Mac source install '''<line_sep>expected_path=os.path.join('/a/test/tmp/dir' 'AgentStatus.pickle')<line_sep>self.assertEqual(AgentStatus._get_pickle_path() expected_path)<block_end>@mock.patch('os.path.isdir' return_value=<true>)@mock.patch('utils.pidfile.PidFile.get_dir' return_value=_linux_run_dir)@mock.patch('utils.platform.Platform.is_win32' return_value=<false>)@mock.patch('utils.platform.Platform.is_mac' return_value=<false>)<def_stmt>test_agent_status_pickle_file_linux self *mocks<block_start>''' Test pickle file location when running on Linux '''<line_sep>expected_path=os.path.join('/opt/datadog-agent/run' 'AgentStatus.pickle')<line_sep>self.assertEqual(AgentStatus._get_pickle_path() expected_path)<block_end><block_end>
|
<import_stmt>os<import_stmt>subprocess<import_from_stmt>multiprocessing.pool Pool<import_stmt>miditoolkit<import_stmt>pandas<as>pd<import_stmt>pretty_midi<import_from_stmt>tqdm tqdm<import_stmt>numpy<as>np<import_stmt>pickle<import_from_stmt>copy deepcopy<import_from_stmt>midi_preprocess.utils.hparams hparams<import_stmt>midi_preprocess.steps.track_separate<as>tc<def_stmt>filter_and_merge processed_data_dir instru2program<block_start>base_dir='midi_preprocess'<line_sep>melody_model=pickle.load(open(f'{base_dir}/model/melody_model_new' 'rb'))<line_sep>bass_model=pickle.load(open(f'{base_dir}/model/bass_model' 'rb'))<line_sep>chord_model=pickle.load(open(f'{base_dir}/model/chord_model' 'rb'))<line_sep>df=pd.read_csv(open(f'{processed_data_dir}/meta.csv'))<line_sep>print(f"| load #midi infos: {df.shape[0]}.")<line_sep>pool=Pool(int(os.getenv('N_PROC' os.cpu_count())))<line_sep>save_dir=f'{processed_data_dir}/midi_recog_tracks'<line_sep>subprocess.check_call(f'rm -rf "{save_dir}"' shell=<true>)<line_sep>futures=[pool.apply_async(filter_recog_merge_job args=[midi_info['path'] midi_info instru2program save_dir melody_model bass_model chord_model])<for>idx,midi_info df.iterrows()]<line_sep>pool.close()<line_sep>merged_infos=[]<for_stmt>f,(idx midi_info) zip(tqdm(futures) df.iterrows())<block_start>res=f.get()<line_sep>merged_info={}<line_sep>merged_info.update(midi_info)<if_stmt>isinstance(res str)<block_start>merged_info['msg']=res<block_end><else_stmt><block_start>merged_info['msg']=''<line_sep>merged_info.update(res)<block_end>merged_infos.append(merged_info)<block_end>df=pd.DataFrame(merged_infos)<line_sep>df=df.set_index(['id'])<line_sep>df.to_csv(f'{processed_data_dir}/meta.csv')<line_sep>pool.join()<line_sep>n_merged=len([x<for>x merged_infos<if>x['msg']<eq>''])<line_sep>print(f"| merged #midi: {n_merged}")<block_end><def_stmt>predict_track_with_model midi_path melody_model bass_model chord_model<block_start><try_stmt><block_start>ret=tc.cal_file_features(midi_path)# remove empty track and calculate the features
features,pm=ret<block_end><except_stmt>Exception<as>e<block_start>features=<none><line_sep>pm=pretty_midi.PrettyMIDI(midi_path)<block_end><if_stmt>features<is><none><and>pm<is><none><block_start>pm=pretty_midi.PrettyMIDI(midi_path)<block_end><if_stmt>features<is><none><or>features.shape[0]<eq>0<block_start><return>pm [] []<block_end>features=tc.add_labels(features)# add label
tc.remove_file_duplicate_tracks(features pm)# delete duplicate track
features=tc.predict_labels(features melody_model bass_model chord_model)# predict lead, bass, chord
predicted_melody_tracks_idx=np.where(features.melody_predict)[0]<line_sep>predicted_bass_tracks_idx=np.where(features.bass_predict)[0]<line_sep>melody_tracks_idx=np.concatenate((predicted_melody_tracks_idx np.where(features.is_melody)[0]))<line_sep>bass_tracks_idx=np.concatenate((predicted_bass_tracks_idx np.where(features.is_bass)[0]))<line_sep><return>pm melody_tracks_idx bass_tracks_idx<block_end><def_stmt>filter_recog_merge_job midi_path midi_info instru2program save_dir melody_model bass_model chord_model<block_start>filter_msg=filter_tracks(midi_info)<if_stmt>filter_msg<ne>''<block_start><return>filter_msg<block_end>pm,melody_tracks_idx,bass_tracks_idx=predict_track_with_model(midi_path melody_model bass_model chord_model)<if_stmt>pm<is><none><block_start><return>'pm is None'<block_end>pm_new=deepcopy(pm)<line_sep>pm_new.instruments=[]<for_stmt>i,instru_old enumerate(pm.instruments)<block_start>program_old=instru_old.program<line_sep>instru=deepcopy(instru_old)<if_stmt>i<in>melody_tracks_idx<and>'MUMIDI_'<not><in>instru.name<or>instru.name<eq>'MUMIDI_Lead'<block_start>instru.name='Lead'<block_end><elif_stmt>i<in>bass_tracks_idx<and>'MUMIDI_'<not><in>instru.name<or>instru.name<eq>'MUMIDI_Bass'<block_start>instru.name='Bass'<block_end><elif_stmt>instru_old.is_drum<and>'MUMIDI_'<not><in>instru.name<or>instru.name<eq>'MUMIDI_Drums'# drum
<block_start>instru.name='Drums'<block_end><elif_stmt>program_old<floordiv>8<eq>0<and>'MUMIDI_'<not><in>instru.name<or>instru.name<eq>'MUMIDI_Piano'# piano
<block_start>instru.name='Piano'<block_end><elif_stmt>program_old<floordiv>8<eq>3<and>'MUMIDI_'<not><in>instru.name<or>instru.name<eq>'MUMIDI_Guitar'# guitar
<block_start>instru.name='Guitar'<block_end><elif_stmt>40<le>program_old<le>54<and>'MUMIDI_'<not><in>instru.name<or>instru.name<eq>'MUMIDI_Strings'# string
<block_start>instru.name='Strings'<block_end><elif_stmt>73<le>program_old<le>88# Lead
<block_start>instru.name='Lead'<block_end><elif_stmt>program_old<floordiv>8<eq>4# Bass
<block_start>instru.name='Bass'<block_end><else_stmt><block_start>instru.name='UnRec'<block_end>instru.program=instru_old.program<line_sep>pm_new.instruments.append(instru)<block_end>os.makedirs(save_dir exist_ok=<true>)<line_sep>out_path=f"{save_dir}/{midi_info['id']}.mid"<line_sep>pm_new.write(out_path)<line_sep>merged_midi_info=get_merged_midi_info(out_path instru2program)<line_sep>filter_msg=filter_tracks(midi_info)<if_stmt>filter_msg<ne>''<block_start><return>'[merged]'+filter_msg<block_end><return>merged_midi_info<block_end><def_stmt>filter_tracks midi_info# filter out too long n_beats > 10000, and too short n_beats < 16
<block_start><if_stmt>midi_info['n_beats']<g>hparams['max_n_beats']<or>midi_info['n_beats']<l>hparams['min_n_beats']<block_start><return>'invalid beats'<block_end><if_stmt>midi_info['n_notes']<l>hparams['min_n_notes']<block_start><return>'invalid n_notes'<block_end><if_stmt>midi_info['n_pitches']<l>hparams['min_n_pitches']<block_start><return>'Invalid pitches'<block_end><if_stmt>midi_info['cross_bar_rate']<g>hparams['max_cross_bar_rate']<block_start><return>'Invalid cross_bar'<block_end><return>''<block_end><def_stmt>get_merged_midi_info midi_fn instru2program<block_start><try_stmt><block_start>mf=miditoolkit.MidiFile(midi_fn)<block_end><except_stmt>KeyboardInterrupt<block_start><raise><block_end><except_stmt>Exception<as>e<block_start><return>str(e)<block_end># merge tracks
track_lists_to_merge=get_tracks_to_merge(mf instru2program)<line_sep>n_merge_track=[len(x)<for>x track_lists_to_merge]<line_sep>available_instrs=list(set([x2<for>x track_lists_to_merge<for>x2 x]))# Important for 6 tracks
# notes
all_vels=[x1.velocity<for>i,x enumerate(mf.instruments)<if>i<in>available_instrs<for>x1 x.notes]<line_sep># all instruments note connection in a line
all_pitches=[x1.pitch<for>i,x enumerate(mf.instruments)<if>i<in>available_instrs<for>x1 x.notes]<line_sep>n_notes=len(all_vels)# numbers of notes
<if_stmt>n_notes<eq>0<block_start><return>'empty tracks'<block_end>n_beats=max([x1.end<for>i,x enumerate(mf.instruments)<if>i<in>available_instrs<for>x1 x.notes])<floordiv>mf.ticks_per_beat+1<line_sep>n_instru=len(mf.instruments)<line_sep>n_pitches=len(set(all_pitches))# pitch classes
vel_mean=np.mean(all_vels)<line_sep>vel_std=np.std(all_vels)<line_sep>n_cross_bar=0<for_stmt>i,instru enumerate(mf.instruments)<block_start><if_stmt>i<not><in>available_instrs<block_start><continue><block_end><for_stmt>n instru.notes<block_start>start_beat=n.start/mf.ticks_per_beat<line_sep>end_beat=n.end/mf.ticks_per_beat<if_stmt>(start_beat+0.25)<floordiv>4<l>(end_beat-0.25)<floordiv>4<and>start_beat%4<g>0.125<block_start>n_cross_bar<augadd>1<block_end><block_end><block_end><return>{'path_recog_tracks':midi_fn # velocity
'vel_mean':vel_mean 'vel_std':vel_std # stats
'n_notes':n_notes 'n_instru':n_instru 'n_beats':n_beats 'n_pitches':n_pitches 'n_cross_bar':n_cross_bar # tracks
'n_tracks':n_merge_track 'track_lists_to_merge':track_lists_to_merge }<block_end><def_stmt>get_tracks_to_merge mf instru2program<block_start>track_lists_to_merge=[[]<for>_ range(6)]<line_sep>instru_order={v:k<for>k,v enumerate(instru2program.keys())}<for_stmt>idx,instr enumerate(mf.instruments)<block_start>instr_name=instr.name<if_stmt>instr_name<in>instru_order<block_start>track_lists_to_merge[instru_order[instr_name]].append(idx)<block_end><block_end><return>track_lists_to_merge<block_end>
|
<class_stmt>Node<block_start><def_stmt>__init__ self value<block_start>self.value=value<line_sep>self.l=<none><line_sep>self.r=<none><block_end><block_end><class_stmt>BinaryTree<block_start><def_stmt>__init__ self<block_start>self.root=<none><block_end><def_stmt>draw self<block_start>'''Prints a preorder traversal of the tree'''<line_sep>self._draw(self.root)<line_sep>print()<block_end><def_stmt>_draw self node<block_start>print("(" end="")<if_stmt>node<ne><none><block_start>print(str(node.value)+", " end="")<line_sep>self._draw(node.l)<line_sep>print(", " end="")<line_sep>self._draw(node.r)<block_end>print(")" end="")<block_end><def_stmt>invert self<block_start>self.root=self._invert(self.root)<block_end><def_stmt>_invert self node# find lowest point where nodes can be swapped
# swap nodes
<block_start><if_stmt>node<block_start>node.l=self._invert(node.l)<line_sep>node.r=self._invert(node.r)<line_sep>temp=node.l<line_sep>node.l=node.r<line_sep>node.r=temp<block_end><return>node<block_end><def_stmt>add self vals<block_start><for_stmt>val vals<block_start><if_stmt>self.root<eq><none><block_start>self.root=Node(val)<block_end><else_stmt><block_start>self._add(self.root val)<block_end><block_end><block_end><def_stmt>_add self node val<block_start><if_stmt>val<l>node.value<block_start><if_stmt>node.l<eq><none><block_start>node.l=Node(val)<block_end><else_stmt><block_start>self._add(node.l val)<block_end><block_end><else_stmt><block_start><if_stmt>node.r<eq><none><block_start>node.r=Node(val)<block_end><else_stmt><block_start>self._add(node.r val)<block_end><block_end><block_end><block_end><def_stmt>main <block_start>t=BinaryTree()<line_sep>t.add([4 2 7 1 3 6 9 11])<line_sep>t.draw()<line_sep>t.invert()<line_sep>t.draw()<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
|
<import_stmt>unittest<import_stmt>mock<import_from_stmt>steam guard<as>g<class_stmt>TCguard(unittest.TestCase)<block_start><def_stmt>test_generate_twofactor_code_for_time self<block_start>code=g.generate_twofactor_code_for_time(b'superdupersecret' timestamp=3000030)<line_sep>self.assertEqual(code 'YRGQJ')<line_sep>code=g.generate_twofactor_code_for_time(b'superdupersecret' timestamp=3000029)<line_sep>self.assertEqual(code '94R9D')<block_end><def_stmt>test_generate_confirmation_key self<block_start>key=g.generate_confirmation_key(b'itsmemario' '' 100000)<line_sep>self.assertEqual(key b'\<KEY>')<line_sep>key=g.generate_confirmation_key(b'itsmemario' 'allow' 100000)<line_sep>self.assertEqual(key b"<KEY>")<block_end><block_end>
|
<import_stmt>unittest<import_from_stmt>nlu *<class_stmt>TestQuestions(unittest.TestCase)<block_start><def_stmt>test_questions_model self<block_start>pipe=nlu.load('questions' verbose=<true>)<line_sep>data=['I love pancaces. I hate Mondays' 'I love Fridays']<line_sep>df=pipe.predict(data output_level='sentence')<for_stmt>c df.columns<block_start>print(df[c])<block_end>df=pipe.predict(['I love pancaces. I hate Mondays' 'I love Fridays'] output_level='document')<for_stmt>c df.columns<block_start>print(df[c])<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
|
#! /usr/bin/python
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>errno<import_stmt>locale<import_stmt>subprocess<import_stmt>random<import_stmt>sys<import_from_stmt>st2common.runners.base_action Action<class_stmt>DigAction(Action)<block_start><def_stmt>run self rand count nameserver hostname queryopts<block_start>opt_list=[]<line_sep>output=[]<line_sep>cmd_args=["dig"]<if_stmt>nameserver<block_start>nameserver="@"+nameserver<line_sep>cmd_args.append(nameserver)<block_end><if_stmt>isinstance(queryopts str)<and>","<in>queryopts<block_start>opt_list=queryopts.split(",")<block_end><else_stmt><block_start>opt_list.append(queryopts)<block_end>cmd_args.extend(["+"+option<for>option opt_list])<line_sep>cmd_args.append(hostname)<try_stmt><block_start>raw_result=subprocess.Popen(cmd_args stderr=subprocess.PIPE stdout=subprocess.PIPE).communicate()[0]<if_stmt>sys.version_info<ge>(3 )# This function might call getpreferred encoding unless we pass
# do_setlocale=False.
<block_start>encoding=locale.getpreferredencoding(do_setlocale=<false>)<line_sep>result_list_str=raw_result.decode(encoding)<block_end><else_stmt><block_start>result_list_str=str(raw_result)<block_end>result_list=list(filter(<none> result_list_str.split("\n")))<block_end># NOTE: Python3 supports the FileNotFoundError, the errono.ENOENT is for py2 compat
# for Python3:
# except FileNotFoundError as e:
<except_stmt>OSError<as>e<block_start><if_stmt>e.errno<eq>errno.ENOENT<block_start><return>(<false> "Can't find dig installed in the path (usually /usr/bin/dig). If "<concat>"dig isn't installed, you can install it with 'sudo yum install "<concat>"bind-utils' or 'sudo apt install dnsutils'" )<block_end><else_stmt><block_start><raise>e<block_end><block_end><if_stmt>int(count)<g>len(result_list)<or>count<le>0<block_start>count=len(result_list)<block_end>output=result_list[0:count]<if_stmt>rand<is><true><block_start>random.shuffle(output)<block_end><return>output<block_end><block_end>
|
<import_stmt>mock<import_stmt>paasta_tools.paastaapi.models<as>paastamodels<import_from_stmt>paasta_tools.autoscaling.pause_service_autoscaler delete_service_autoscale_pause_time <import_from_stmt>paasta_tools.autoscaling.pause_service_autoscaler get_service_autoscale_pause_time <import_from_stmt>paasta_tools.autoscaling.pause_service_autoscaler update_service_autoscale_pause_time <line_sep>@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client" autospec=<true>)<def_stmt>test_get_service_autoscale_pause_time_error mock_client<block_start>mock_client.get_paasta_oapi_client.return_value=<none><line_sep>return_code=get_service_autoscale_pause_time("cluster1")<assert_stmt>return_code<eq>1<line_sep>mock_client.get_paasta_oapi_client.assert_called_with(cluster="cluster1" http_res=<true>)<line_sep>mock_api=mock.Mock()<line_sep>mock_client.get_paasta_oapi_client.return_value=mock.Mock(default=mock_api)<line_sep>mock_api.get_service_autoscaler_pause.return_value=(<none> 500 <none> )<line_sep>return_code=get_service_autoscale_pause_time("cluster1")<assert_stmt>return_code<eq>2<block_end>@mock.patch("builtins.print" autospec=<true>)@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.time" autospec=<true>)@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client" autospec=<true>)<def_stmt>test_get_service_autoscale_pause_time_not mock_client mock_time mock_print<block_start>mock_api=mock.Mock()<line_sep>mock_client.get_paasta_oapi_client.return_value=mock.Mock(default=mock_api)<line_sep>mock_api.get_service_autoscaler_pause.return_value=("3" 200 <none>)<line_sep>mock_time.time.return_value=4<line_sep>return_code=get_service_autoscale_pause_time("cluster1")<line_sep>mock_print.assert_called_with("Service autoscaler is not paused")<assert_stmt>return_code<eq>0<block_end>@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.print_paused_message" autospec=<true> )@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.time" autospec=<true>)@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client" autospec=<true>)<def_stmt>test_get_service_autoscale_pause_time_paused mock_client mock_time mock_print_paused_message<block_start>mock_api=mock.Mock()<line_sep>mock_client.get_paasta_oapi_client.return_value=mock.Mock(default=mock_api)<line_sep>mock_api.get_service_autoscaler_pause.return_value=("3" 200 <none>)<line_sep>mock_time.time.return_value=2<line_sep>return_code=get_service_autoscale_pause_time("cluster1")<line_sep>mock_print_paused_message.assert_called_with(3.0)<assert_stmt>return_code<eq>0<block_end>@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client" autospec=<true>)<def_stmt>test_update_service_autoscale_pause_time mock_client<block_start>mock_client.get_paasta_oapi_client.return_value=<none><line_sep>return_code=update_service_autoscale_pause_time("cluster1" "2")<assert_stmt>return_code<eq>1<line_sep>mock_client.get_paasta_oapi_client.assert_called_with(cluster="cluster1" http_res=<true>)<line_sep>mock_api=mock.Mock()<line_sep>mock_client.get_paasta_oapi_client.return_value=mock.Mock(default=mock_api)<line_sep>mock_api.update_service_autoscaler_pause=mock_update=mock.Mock()<line_sep>mock_update.return_value=(<none> 500 <none>)<line_sep>return_code=update_service_autoscale_pause_time("cluster1" "3")<line_sep>mock_update.assert_called_once_with(paastamodels.InlineObject(minutes=3) _return_http_data_only=<false>)<assert_stmt>return_code<eq>2<line_sep>mock_update.return_value=(<none> 200 <none>)<line_sep>return_code=update_service_autoscale_pause_time("cluster1" "2")<assert_stmt>return_code<eq>0<block_end>@mock.patch("paasta_tools.autoscaling.pause_service_autoscaler.client" autospec=<true>)@mock.patch("paasta_tools.paastaapi.apis.DefaultApi" autospec=<true>)<def_stmt>test_delete_service_autoscale_pause_time mock_default_api mock_client<block_start>mock_client.get_paasta_oapi_client.return_value=<none><line_sep>return_code=delete_service_autoscale_pause_time("cluster1")<assert_stmt>return_code<eq>1<line_sep>mock_client.get_paasta_oapi_client.assert_called_with(cluster="cluster1" http_res=<true>)<line_sep>mock_api=mock.Mock()<line_sep>mock_client.get_paasta_oapi_client.return_value=mock.Mock(default=mock_api)<line_sep>mock_api.delete_service_autoscaler_pause=mock_delete=mock.Mock()<line_sep>mock_delete.return_value=(<none> 500 <none>)<line_sep>return_code=delete_service_autoscale_pause_time("cluster1")<line_sep>mock_delete.assert_called_once_with(_return_http_data_only=<false>)<assert_stmt>return_code<eq>2<line_sep>mock_delete.return_value=(<none> 200 <none>)<line_sep>return_code=delete_service_autoscale_pause_time("cluster1")<assert_stmt>return_code<eq>0<block_end>
|
<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<line_sep>np.random.seed(1234)<import_stmt>os<import_stmt>time<import_stmt>datetime<import_from_stmt>argparse ArgumentParser ArgumentDefaultsHelpFormatter<import_from_stmt>builddata *<import_from_stmt>model ConvKB<line_sep># Parameters
# ==================================================
parser=ArgumentParser("ConvKB" formatter_class=ArgumentDefaultsHelpFormatter conflict_handler='resolve')<line_sep>parser.add_argument("--data" default="../data/" help="Data sources.")<line_sep>parser.add_argument("--run_folder" default="../" help="Data sources.")<line_sep>parser.add_argument("--name" default="WN18RR" help="Name of the dataset.")<line_sep>parser.add_argument("--embedding_dim" default=50 type=int help="Dimensionality of character embedding")<line_sep>parser.add_argument("--filter_sizes" default="1" help="Comma-separated filter sizes")<line_sep>parser.add_argument("--num_filters" default=500 type=int help="Number of filters per filter size")<line_sep>parser.add_argument("--dropout_keep_prob" default=1.0 type=float help="Dropout keep probability")<line_sep>parser.add_argument("--l2_reg_lambda" default=0.001 type=float help="L2 regularization lambda")<line_sep>parser.add_argument("--learning_rate" default=0.0001 type=float help="Learning rate")<line_sep>parser.add_argument("--is_trainable" default=<true> type=bool help="")<line_sep>parser.add_argument("--batch_size" default=128 type=int help="Batch Size")<line_sep>parser.add_argument("--neg_ratio" default=1.0 type=float help="Number of negative triples generated by positive")<line_sep>parser.add_argument("--num_epochs" default=201 type=int help="Number of training epochs")<line_sep>parser.add_argument("--saveStep" default=200 type=int help="")<line_sep>parser.add_argument("--allow_soft_placement" default=<true> type=bool help="Allow device soft device placement")<line_sep>parser.add_argument("--log_device_placement" default=<false> type=bool help="Log placement of ops on devices")<line_sep>parser.add_argument("--model_name" default='wn18rr' help="")<line_sep>parser.add_argument("--useConstantInit" action='store_true')<line_sep>parser.add_argument("--model_index" default='200' help="")<line_sep>parser.add_argument("--seed" default=1234 type=int help="")<line_sep>parser.add_argument("--num_splits" default=8 type=int help="Split the validation set into 8 parts for a faster evaluation")<line_sep>parser.add_argument("--testIdx" default=1 type=int help="From 0 to 7. Index of one of 8 parts")<line_sep>parser.add_argument("--decode" action='store_false')<line_sep>args=parser.parse_args()<line_sep>print(args)<line_sep># Load data
print("Loading data...")<line_sep>train,valid,test,words_indexes,indexes_words,headTailSelector,entity2id,id2entity,relation2id,id2relation=build_data(path=args.data name=args.name)<line_sep>data_size=len(train)<line_sep>train_batch=Batch_Loader(train words_indexes indexes_words headTailSelector entity2id id2entity relation2id id2relation batch_size=args.batch_size neg_ratio=args.neg_ratio)<line_sep>entity_array=np.array(list(train_batch.indexes_ents.keys()))<line_sep>lstEmbed=[]<line_sep>#Using the pre-trained embeddings.
print("Using pre-trained model.")<line_sep>lstEmbed=np.empty([len(words_indexes) args.embedding_dim]).astype(np.float32)<line_sep>initEnt,initRel=init_norm_Vector(args.data+args.name+'/relation2vec'+str(args.embedding_dim)+'.init' args.data+args.name+'/entity2vec'+str(args.embedding_dim)+'.init' args.embedding_dim)<for_stmt>_word words_indexes<block_start><if_stmt>_word<in>relation2id<block_start>index=relation2id[_word]<line_sep>_ind=words_indexes[_word]<line_sep>lstEmbed[_ind]=initRel[index]<block_end><elif_stmt>_word<in>entity2id<block_start>index=entity2id[_word]<line_sep>_ind=words_indexes[_word]<line_sep>lstEmbed[_ind]=initEnt[index]<block_end><else_stmt><block_start>print('*****************Error********************!')<line_sep><break><block_end><block_end>lstEmbed=np.array(lstEmbed dtype=np.float32)<assert_stmt>len(words_indexes)%(len(entity2id)+len(relation2id))<eq>0<line_sep>print("Loading data... finished!")<line_sep>x_valid=np.array(list(valid.keys())).astype(np.int32)<line_sep>y_valid=np.array(list(valid.values())).astype(np.float32)<line_sep>x_test=np.array(list(test.keys())).astype(np.int32)<line_sep>y_test=np.array(list(test.values())).astype(np.float32)<line_sep># Training
# ==================================================
<with_stmt>tf.Graph().as_default()<block_start>tf.set_random_seed(args.seed)<line_sep>session_conf=tf.ConfigProto(allow_soft_placement=args.allow_soft_placement log_device_placement=args.log_device_placement)<line_sep>session_conf.gpu_options.allow_growth=<true><line_sep>sess=tf.Session(config=session_conf)<with_stmt>sess.as_default()<block_start>global_step=tf.Variable(0 name="global_step" trainable=<false>)<line_sep>cnn=ConvKB(sequence_length=x_valid.shape[1] # 3
num_classes=y_valid.shape[1] # 1
pre_trained=lstEmbed embedding_size=args.embedding_dim filter_sizes=list(map(int args.filter_sizes.split(","))) num_filters=args.num_filters vocab_size=len(words_indexes) l2_reg_lambda=args.l2_reg_lambda is_trainable=args.is_trainable useConstantInit=args.useConstantInit)<line_sep>optimizer=tf.train.AdamOptimizer(learning_rate=args.learning_rate)<line_sep># optimizer = tf.train.RMSPropOptimizer(learning_rate=args.learning_rate)
# optimizer = tf.train.GradientDescentOptimizer(learning_rate=args.learning_rate)
grads_and_vars=optimizer.compute_gradients(cnn.loss)<line_sep>train_op=optimizer.apply_gradients(grads_and_vars global_step=global_step)<line_sep>out_dir=os.path.abspath(os.path.join(args.run_folder "runs" args.model_name))<line_sep>print("Writing to {}\n".format(out_dir))<line_sep># Checkpoint directory. Tensorflow assumes this directory already exists so we need to create it
checkpoint_dir=os.path.abspath(os.path.join(out_dir "checkpoints"))<line_sep>checkpoint_prefix=os.path.join(checkpoint_dir "model")<if_stmt><not>os.path.exists(checkpoint_dir)<block_start>os.makedirs(checkpoint_dir)<block_end># Initialize all variables
sess.run(tf.global_variables_initializer())<def_stmt>train_step x_batch y_batch<block_start>"""
A single training step
"""<line_sep>feed_dict={cnn.input_x:x_batch cnn.input_y:y_batch cnn.dropout_keep_prob:args.dropout_keep_prob }<line_sep>_,step,loss=sess.run([train_op global_step cnn.loss] feed_dict)<if_stmt>step%10000<eq>0<block_start>print(step)<block_end><block_end>num_batches_per_epoch=int((data_size-1)/args.batch_size)+1<for_stmt>epoch range(args.num_epochs)<block_start><for_stmt>batch_num range(num_batches_per_epoch)<block_start>x_batch,y_batch=train_batch()<line_sep>train_step(x_batch y_batch)<line_sep>current_step=tf.train.global_step(sess global_step)<block_end><if_stmt>epoch<ge>0<block_start><if_stmt>epoch%args.saveStep<eq>0<block_start>path=cnn.saver.save(sess checkpoint_prefix global_step=epoch)<line_sep>print("Saved model checkpoint to {}\n".format(path))<block_end><block_end><block_end><block_end><block_end>
|
<import_stmt>os<import_from_stmt>aztk.models.plugins.plugin_configuration PluginConfiguration PluginPort PluginTargetRole<import_from_stmt>aztk.models.plugins.plugin_file PluginFile<line_sep>dir_path=os.path.dirname(os.path.realpath(__file__))<class_stmt>HDFSPlugin(PluginConfiguration)<block_start><def_stmt>__init__ self<block_start>super().__init__(name="hdfs" ports=[PluginPort(name="File system metadata operations" internal=8020) PluginPort(name="File system metadata operations(Backup)" internal=9000) PluginPort(name="Datanode data transfer" internal=50010) PluginPort(name="Datanode IPC metadata operations" internal=50020) PluginPort(name="Namenode" internal=50070 public=<true>) PluginPort(name="Datanodes" internal=50075 public=<true>) ] target_role=PluginTargetRole.All execute="hdfs.sh" files=[PluginFile("hdfs.sh" os.path.join(dir_path "hdfs.sh"))] )<block_end><block_end>
|
<import_stmt>os<import_from_stmt>yacs.config CfgNode<as>CN<import_from_stmt>.constants IMAGENET_DEFAULT_MEAN IMAGENET_DEFAULT_STD DEFAULT_CROP_PCT<line_sep>_C=CN()<line_sep>_C.root_dir=os.getcwd()# root dir
_C.seed=-1.0# random seed (default: 42)
_C.logger_name='log'# log name
_C.amp=<false># use NVIDIA amp for mixed precision training
_C.num_gpus=1<line_sep>_C.distributed=<false><line_sep># data
_C.data_loader=CN()<line_sep>_C.data_loader.data_path=''# path to dataset, data_dir
_C.data_loader.batch_size=32# input batch size for training (default: 32)
_C.data_loader.vbatch_size=32# validation batch size
_C.data_loader.workers=0# how many training processes to use (default: 1)
_C.data_loader.pin_mem=<false># Pin CPU memory in DataLoader for more efficient (sometimes) transfer to GPU.
_C.data_loader.prefetcher=<true># enable fast prefetcher
_C.data_loader.use_multi_epochs_loader=<false># use the multi-epochs-loader to save time at the beginning of every epoch
_C.data_loader.dataset='imagenet'# imagenet, cifar10, cifar100
# model
_C.model=CN()<line_sep>_C.model.name='resnet50'# Name of model to train
_C.model.pretrained=<false># Start with pretrained version of specified network (if avail)
_C.model.initial_checkpoint=''# Initialize model from this checkpoint (default: none)
_C.model.resume=''# Resume full model and optimizer state from checkpoint (default: none)
_C.model.no_resume_opt=<false># prevent resume of optimizer state when resuming model
_C.model.num_classes=1000# number of label classes (default: 1000)
_C.model.gp='avg'# Type of global pool, "avg", "max", "avgmax", "avgmaxc" (default: "avg")
_C.model.drop=0.0# Dropout rate (default: 0.)
_C.model.drop_path=0.0# Drop path rate (default None)
_C.model.drop_block=0.0# Drop block rate (default None)
_C.model.model_ema=<false># Enable tracking moving average of model weights
_C.model.model_ema_force_cpu=<false># Force ema to be tracked on CPU, rank=0 node only. Disables EMA validation.
_C.model.model_ema_decay=0.9998# decay factor for model weights moving average (default: 0.9998)
_C.model.block_name='type1'<line_sep># BN
_C.BN=CN()<line_sep>_C.BN.bn_tf=<false># Use Tensorflow BatchNorm defaults for models that support it (default: False)
_C.BN.bn_momentum=-1.0# BatchNorm momentum override (if not None) default None
_C.BN.bn_eps=-1.0# BatchNorm epsilon override (if not None) default None
_C.BN.sync_bn=<false># Enable NVIDIA Apex or Torch synchronized BatchNorm.
_C.BN.dist_bn=''# Distribute BatchNorm stats between nodes after each epoch ("broadcast", "reduce", or "")
_C.BN.split_bn=<false># Enable separate BN layers per augmentation split.
# augmentation
_C.augmentation=CN()<line_sep>_C.augmentation.no_aug=<false><line_sep>_C.augmentation.scale=[0.08 1.0]<line_sep>_C.augmentation.ratio=[0.75 1.333333333333]<line_sep>_C.augmentation.hflip=0.5<line_sep>_C.augmentation.vflip=0.0<line_sep>_C.augmentation.interpolation=''# Image resize interpolation type (overrides model)
_C.augmentation.color_jitter=0.4# Color jitter factor (default: 0.4)
_C.augmentation.aa=''# Use AutoAugment policy. "v0" or "original". (default None)
_C.augmentation.aug_splits=0# Number of augmentation splits (default: 0, valid: 0 or >=2)
_C.augmentation.reprob=0.0# Random erase prob (default: 0.)
_C.augmentation.remode='const'# Random erase mode (default: "const")
_C.augmentation.recount=1# Random erase count (default: 1)
_C.augmentation.resplit=<false># Do not random erase first (clean) augmentation split
_C.augmentation.mixup=0.0# mixup alpha, mixup enabled if > 0. (default: 0.)
_C.augmentation.mixup_off_epoch=0# turn off mixup after this epoch, disabled if 0 (default: 0)
_C.augmentation.cutmix=0.0<line_sep>_C.augmentation.cutmix_minmax=[]<line_sep>_C.augmentation.mixup_prob=1.0<line_sep>_C.augmentation.mixup_switch_prob=0.5<line_sep>_C.augmentation.mixup_mode='batch'<line_sep>_C.augmentation.train_interpolation='random'# Training interpolation (random, bilinear, bicubic default: "random")
_C.augmentation.tta=0# Test/inference time augmentation (oversampling) factor. 0=None (default: 0)
_C.augmentation.img_size=-1# Image patch size (default: None => model default)
_C.augmentation.crop_pct=-1.0# Input image center crop percent (for validation only)
_C.augmentation.mean=[]# Override mean pixel value of dataset
_C.augmentation.std=[]# Override std deviation of of dataset
# loss
_C.loss=CN()<line_sep>_C.loss.jsd=<false># Enable Jensen-Shannon Divergence + CE loss. Use with `--aug-splits`.
_C.loss.smoothing=0.1# label smoothing (default: 0.1)
# solver
_C.solver=CN()<line_sep>_C.solver.opt='sgd'# Optimizer (default: "sgd")
_C.solver.opt_eps=1e-8# Optimizer Epsilon (default: 1e-8)
_C.solver.momentum=0.9# SGD momentum (default: 0.9)
_C.solver.weight_decay=0.0001# weight decay (default: 0.0001)
_C.solver.sched='step'# LR scheduler (default: "step")
_C.solver.lr=0.01# learning rate (default: 0.01)
_C.solver.lr_noise=[]# learning rate noise on/off epoch percentages default None
_C.solver.lr_noise_pct=0.67# learning rate noise limit percent (default: 0.67)
_C.solver.lr_noise_std=1.0# learning rate noise std-dev (default: 1.0)
_C.solver.lr_cycle_mul=1.0# learning rate cycle len multiplier (default: 1.0)
_C.solver.lr_cycle_limit=1# learning rate cycle limit
_C.solver.warmup_lr=0.0001# warmup learning rate (default: 0.0001)
_C.solver.min_lr=1e-5# lower lr bound for cyclic schedulers that hit 0 (1e-5)
_C.solver.epochs=200# number of epochs to train (default: 2)
_C.solver.start_epoch=-1# manual epoch number (useful on restarts) default None
_C.solver.decay_epochs=30# epoch interval to decay LR
_C.solver.warmup_epochs=3# epochs to warmup LR, if scheduler supports
_C.solver.cooldown_epochs=10# epochs to cooldown LR at min_lr, after cyclic schedule ends
_C.solver.patience_epochs=10# patience epochs for Plateau LR scheduler (default: 10)
_C.solver.decay_rate=0.1# LR decay rate (default: 0.1)
_C.solver.log_interval=50# how many batches to wait before logging training status
_C.solver.recovery_interval=0# how many batches to wait before writing recovery checkpoint
_C.solver.clip_grad=-1.0<line_sep>_C.solver.clip_mode='norm'<line_sep>_C.solver.use_swa=<false><line_sep>_C.solver.swa_start=75<line_sep>_C.solver.swa_freq=1<line_sep># eval
_C.eval=CN()<line_sep>_C.eval.eval_metric='top1'# Best metric (default: "top1")
<def_stmt>pop_unused_value cfg<block_start><if_stmt>cfg.BN.bn_momentum<l>0<block_start>cfg.BN.pop('bn_momentum')<block_end><if_stmt>cfg.BN.bn_eps<l>0<block_start>cfg.BN.pop('bn_eps')<block_end><if_stmt>len(cfg.solver.lr_noise)<eq>0<block_start>cfg.solver.pop('lr_noise')<block_end><if_stmt>cfg.solver.start_epoch<l>0<block_start>cfg.solver.pop('start_epoch')<block_end><if_stmt>cfg.model.drop_path<eq>0<block_start>cfg.model.pop('drop_path')<block_end><if_stmt>cfg.model.drop_block<eq>0<block_start>cfg.model.pop('drop_block')<block_end><if_stmt>len(cfg.augmentation.aa)<eq>0<block_start>cfg.augmentation.pop('aa')<block_end><if_stmt>cfg.augmentation.img_size<le>0<block_start>cfg.augmentation.pop('img_size')<block_end><if_stmt>cfg.augmentation.crop_pct<le>0<block_start>cfg.augmentation.pop('crop_pct')<block_end><if_stmt>len(cfg.augmentation.mean)<eq>0<block_start>cfg.augmentation.pop('mean')<block_end><if_stmt>len(cfg.augmentation.std)<eq>0<block_start>cfg.augmentation.pop('std')<block_end><block_end><def_stmt>resolve_data_config cfg default_cfg={} model=<none><block_start>new_config={}<line_sep>default_cfg=default_cfg<if_stmt><not>default_cfg<and>model<is><not><none><and>hasattr(model 'default_cfg')<block_start>default_cfg=model.default_cfg<block_end># Resolve input/image size
in_chans=3<line_sep>input_size=(in_chans 224 224)<if_stmt>'img_size'<in>cfg.augmentation<and>cfg.augmentation.img_size<g>0<block_start><assert_stmt>isinstance(cfg.augmentation.img_size int)<line_sep>input_size=(in_chans cfg.augmentation.img_size cfg.augmentation.img_size)<block_end><elif_stmt>'input_size'<in>default_cfg<block_start>input_size=default_cfg['input_size']<block_end>new_config['input_size']=input_size<line_sep># resolve interpolation method
new_config['interpolation']='bicubic'<if_stmt>'interpolation'<in>cfg.augmentation<and>len(cfg.augmentation.interpolation)<g>0<block_start>new_config['interpolation']=cfg.augmentation.interpolation<block_end><elif_stmt>'interpolation'<in>default_cfg<block_start>new_config['interpolation']=default_cfg['interpolation']<block_end># resolve dataset + model mean for normalization
new_config['mean']=IMAGENET_DEFAULT_MEAN<if_stmt>'mean'<in>cfg.augmentation<and>len(cfg.augmentation.mean)<g>0<block_start>mean=tuple(cfg.augmentation.mean)<if_stmt>len(mean)<eq>1<block_start>mean=tuple(list(mean)<times>in_chans)<block_end><else_stmt><block_start><assert_stmt>len(mean)<eq>in_chans<block_end>new_config['mean']=mean<block_end><elif_stmt>'mean'<in>default_cfg<block_start>new_config['mean']=default_cfg['mean']<block_end># resolve dataset + model std deviation for normalization
new_config['std']=IMAGENET_DEFAULT_STD<if_stmt>'std'<in>cfg.augmentation<and>len(cfg.augmentation.std)<g>0<block_start>std=tuple(cfg.augmentation.std)<if_stmt>len(std)<eq>1<block_start>std=tuple(list(std)<times>in_chans)<block_end><else_stmt><block_start><assert_stmt>len(std)<eq>in_chans<block_end>new_config['std']=std<block_end><elif_stmt>'std'<in>default_cfg<block_start>new_config['std']=default_cfg['std']<block_end># resolve default crop percentage
new_config['crop_pct']=DEFAULT_CROP_PCT<if_stmt>'crop_pct'<in>cfg.augmentation<and>cfg.augmentation.crop_pct<g>0<block_start>new_config['crop_pct']=cfg.augmentation.crop_pct<block_end><elif_stmt>'crop_pct'<in>default_cfg<block_start>new_config['crop_pct']=default_cfg['crop_pct']<block_end><return>new_config<block_end>
|
# -*- coding: utf-8 -*-
"""Tests for graph samplers."""<import_stmt>unittest<import_stmt>torch<import_from_stmt>pykeen.datasets Nations<import_from_stmt>pykeen.training.schlichtkrull_sampler GraphSampler _compute_compressed_adjacency_list<class_stmt>GraphSamplerTest(unittest.TestCase)<block_start>"""Test the GraphSampler."""<def_stmt>setUp self<arrow><none><block_start>"""Set up the test case with a triples factory."""<line_sep>self.triples_factory=Nations().training<line_sep>self.num_samples=20<line_sep>self.num_epochs=10<line_sep>self.graph_sampler=GraphSampler(triples_factory=self.triples_factory num_samples=self.num_samples)<block_end><def_stmt>test_sample self<arrow><none><block_start>"""Test drawing samples from GraphSampler."""<for_stmt>e range(self.num_epochs)# sample a batch
<block_start>batch_indices=[]<for_stmt>j self.graph_sampler<block_start>batch_indices.append(torch.as_tensor(j))<block_end>batch=torch.stack(batch_indices)<line_sep># check shape
<assert_stmt>batch.shape<eq>(self.num_samples )<line_sep># get triples
triples_batch=self.triples_factory.mapped_triples[batch]<line_sep># check connected components
# super inefficient
components=[{int(e)}<for>e torch.cat([triples_batch[: i]<for>i (0 2)]).unique()]<for_stmt>h,_,t triples_batch<block_start>h,t=int(h) int(t)<line_sep>s_comp_ind=[i<for>i,c enumerate(components)<if>h<in>c][0]<line_sep>o_comp_ind=[i<for>i,c enumerate(components)<if>t<in>c][0]<line_sep># join
<if_stmt>s_comp_ind<ne>o_comp_ind<block_start>s_comp=components.pop(max(s_comp_ind o_comp_ind))<line_sep>o_comp=components.pop(min(s_comp_ind o_comp_ind))<line_sep>so_comp=s_comp.union(o_comp)<line_sep>components.append(so_comp)<block_end><else_stmt><block_start><pass><line_sep># already joined
<block_end><if_stmt>len(components)<l>2<block_start><break><block_end><block_end># check that there is only a single component
<assert_stmt>len(components)<eq>1<block_end><block_end><block_end><class_stmt>AdjacencyListCompressionTest(unittest.TestCase)<block_start>"""Unittest for utility method."""<def_stmt>setUp self<arrow><none><block_start>"""Set up the test case with a triples factory."""<line_sep>self.triples_factory=Nations().training<block_end><def_stmt>test_compute_compressed_adjacency_list self<block_start>"""Test method _compute_compressed_adjacency_list ."""<line_sep>degrees,offsets,comp_adj_lists=_compute_compressed_adjacency_list(triples_factory=self.triples_factory)<line_sep>triples=self.triples_factory.mapped_triples<line_sep>uniq,cnt=torch.unique(torch.cat([triples[: i]<for>i (0 2)]) return_counts=<true>)<assert_stmt>(degrees<eq>cnt).all()<assert_stmt>(offsets[1:]<eq>torch.cumsum(cnt dim=0)[:-1]).all()<assert_stmt>(offsets<l>comp_adj_lists.shape[0]).all()<line_sep># check content of comp_adj_lists
<for_stmt>i range(self.triples_factory.num_entities)<block_start>start=offsets[i]<line_sep>stop=start+degrees[i]<line_sep>adj_list=comp_adj_lists[start:stop]<line_sep># check edge ids
edge_ids=adj_list[: 0]<line_sep>adjacent_edges=set(int(a)<for>a ((triples[: 0]<eq>i)|(triples[: 2]<eq>i)).nonzero(as_tuple=<false>).flatten())<assert_stmt>adjacent_edges<eq>set(map(int edge_ids))<block_end><block_end><block_end>
|
<import_from_stmt>migrate ForeignKeyConstraint<import_from_stmt>sqlalchemy Table Column MetaData String Integer<line_sep>meta=MetaData()<line_sep>table=Table('dag_tag' meta Column('dag' Integer primary_key=<true>) Column('tag' String(100) primary_key=<true>) )<def_stmt>upgrade migrate_engine<block_start>conn=migrate_engine.connect()<line_sep>trans=conn.begin()<try_stmt><block_start>meta.bind=conn<line_sep>table.create()<line_sep>dag=Table('dag' meta autoload=<true>)<line_sep>ForeignKeyConstraint([table.c.dag] [dag.c.id] ondelete='CASCADE').create()<block_end><except_stmt>Exception<block_start>trans.rollback()<line_sep><raise><block_end><else_stmt><block_start>trans.commit()<block_end><block_end><def_stmt>downgrade migrate_engine<block_start>conn=migrate_engine.connect()<line_sep>trans=conn.begin()<try_stmt><block_start>meta.bind=conn<line_sep>table.drop()<block_end><except_stmt>Exception<block_start>trans.rollback()<line_sep><raise><block_end><else_stmt><block_start>trans.commit()<block_end><block_end>
|
""" Environment determines the underlying law of the system.
All bandit problems should inherit from environment.
"""<import_stmt>numpy<as>np<line_sep>##############################################################################
<class_stmt>Environment(object)<block_start>"""Base class for all bandit environments."""<def_stmt>__init__ self<block_start>"""Initialize the environment."""<line_sep><pass><block_end><def_stmt>get_observation self<block_start>"""Returns an observation from the environment."""<line_sep><pass><block_end><def_stmt>get_optimal_reward self<block_start>"""Returns the optimal possible reward for the environment at that point."""<line_sep><pass><block_end><def_stmt>get_expected_reward self action<block_start>"""Gets the expected reward of an action."""<line_sep><pass><block_end><def_stmt>get_stochastic_reward self action<block_start>"""Gets a stochastic reward for the action."""<line_sep><pass><block_end><def_stmt>advance self action reward<block_start>"""Updating the environment (useful for nonstationary bandit)."""<line_sep><pass><block_end><block_end>
|
<def_stmt>create_HMM switch_prob=0.1 noise_level=1e-1 startprob=[1.0 0.0]<block_start>"""Create an HMM with binary state variable and 1D Gaussian measurements
The probability to switch to the other state is `switch_prob`. Two
measurement models have mean 1.0 and -1.0 respectively. `noise_level`
specifies the standard deviation of the measurement models.
Args:
switch_prob (float): probability to jump to the other state
noise_level (float): standard deviation of measurement models. Same for
two components
Returns:
model (GaussianHMM instance): the described HMM
"""<line_sep>n_components=2<line_sep>startprob_vec=np.asarray(startprob)<line_sep># STEP 1: Transition probabilities
transmat_mat=np.array([[1.-switch_prob switch_prob] [switch_prob 1.-switch_prob]])# # np.array([[...], [...]])
# STEP 2: Measurement probabilities
# Mean measurements for each state
means_vec=np.array([-1.0 1.0])<line_sep># Noise for each state
vars_vec=np.ones(2)<times>noise_level<times>noise_level<line_sep># Initialize model
model=GaussianHMM1D(startprob=startprob_vec transmat=transmat_mat means=means_vec vars=vars_vec n_components=n_components)<line_sep><return>model<block_end><def_stmt>sample model T<block_start>"""Generate samples from the given HMM
Args:
model (GaussianHMM1D): the HMM with Gaussian measurement
T (int): number of time steps to sample
Returns:
M (numpy vector): the series of measurements
S (numpy vector): the series of latent states
"""<line_sep># Initialize S and M
S=np.zeros((T ) dtype=int)<line_sep>M=np.zeros((T ))<line_sep># Calculate initial state
S[0]=np.random.choice([0 1] p=model.startprob)<line_sep># Latent state at time `t` depends on `t-1` and the corresponding transition probabilities to other states
<for_stmt>t range(1 T)# STEP 3: Get vector of probabilities for all possible `S[t]` given a particular `S[t-1]`
<block_start>transition_vector=model.transmat[S[t-1] :]<line_sep># Calculate latent state at time `t`
S[t]=np.random.choice([0 1] p=transition_vector)<block_end># Calculate measurements conditioned on the latent states
# Since measurements are independent of each other given the latent states, we could calculate them as a batch
means=model.means[S]<line_sep>scales=np.sqrt(model.vars[S])<line_sep>M=np.random.normal(loc=means scale=scales size=(T ))<line_sep><return>M S<block_end># Set random seed
np.random.seed(101)<line_sep># Set parameters of HMM
T=100<line_sep>switch_prob=0.1<line_sep>noise_level=2.0<line_sep># Create HMM
model=create_HMM(switch_prob=switch_prob noise_level=noise_level)<line_sep># Sample from HMM
M,S=sample(model T)<assert_stmt>M.shape<eq>(T )<assert_stmt>S.shape<eq>(T )<line_sep># Print values
print(M[:5])<line_sep>print(S[:5])<line_sep>
|
'''
Created on Feb 5, 2012
@author: marat
'''<import_stmt>sys<import_from_stmt>ResAtom *<class_stmt>Residue<block_start>'''
classdocs
'''<def_stmt>__init__ self params={}<block_start>'''
Default constructor for Residue class
atoms list of atoms in the residue
name residue name
'''<if_stmt>params.has_key("atoms")<block_start>self.atoms=params["coords"]<block_end><else_stmt><block_start>self.atoms=[]<block_end><if_stmt>params.has_key("name")<block_start>self.name=params["name"]<block_end><else_stmt><block_start>self.name=""<block_end><block_end><def_stmt>__str__ self<block_start>output=""<for_stmt>a self.atoms<block_start>output=output+str(a)+"\n"<block_end><return>output<block_end><def_stmt>toPDBrecord self id_atom=1 id_res=1<block_start>output=""<line_sep>i=id_atom-1<for_stmt>a self.atoms<block_start>i=i+1<line_sep>output=output+a.toPDBrecord(i)+"\n"<block_end><return>output<block_end><def_stmt>AddAtom self a<block_start><if_stmt>self.name<eq>""<block_start>self.name=a.resname<block_end><else_stmt><block_start><if_stmt>a.resname<ne>self.name<block_start>print("different names for the same residue index")<line_sep>sys.exit(1)<block_end><block_end>self.atoms.append(a)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>aline1="ATOM 3 O2 IO3 1 -1.182 1.410 0.573 -0.80 O"<line_sep>aline2="ATOM 1 I1 IO3 1 -1.555 -0.350 0.333 1.39 I"<line_sep>res0=Residue()<line_sep>a=ResAtom.fromPDBrecord(aline2)<line_sep>b=ResAtom.fromPDBrecord(aline1)<line_sep>res0.AddAtom(a)<line_sep>res0.AddAtom(b)<line_sep>print(res0.toPDBrecord(id_atom=4))<block_end>
|
<import_from_future_stmt> absolute_import division print_function unicode_literals <import_stmt>sys<import_stmt>os<try_stmt><block_start><import_from_stmt>functools lru_cache<block_end><except_stmt>ImportError# Just a dummy decorator to get the checks to run on python2
# because honestly I don't want to support a byte-level unicode BPE tokenizer on python 2 right now.
<block_start><def_stmt>lru_cache <block_start><return><lambda>func:func<block_end><block_end><import_stmt>logging<import_stmt>json<import_stmt>six<import_from_stmt>io open<import_from_stmt>functools wraps<import_stmt>boto3<import_stmt>requests<import_from_stmt>botocore.exceptions ClientError<import_stmt>shutil<import_from_stmt>hashlib sha256<import_stmt>fnmatch<import_stmt>tempfile<import_from_stmt>tqdm tqdm<try_stmt><block_start><import_from_stmt>urllib.parse urlparse<block_end><except_stmt>ImportError<block_start><import_from_stmt>urlparse urlparse<block_end><try_stmt><block_start><import_from_stmt>torch.hub _get_torch_home<line_sep>torch_cache_home=_get_torch_home()<block_end><except_stmt>ImportError<block_start>torch_cache_home=os.path.expanduser(os.getenv('TORCH_HOME' os.path.join(os.getenv('XDG_CACHE_HOME' '~/.cache') 'torch')))<block_end>default_cache_path=os.path.join(torch_cache_home 'pytorch_transformers')<try_stmt><block_start><import_from_stmt>pathlib Path<line_sep>PYTORCH_PRETRAINED_BERT_CACHE=Path(os.getenv('PYTORCH_TRANSFORMERS_CACHE' os.getenv('PYTORCH_PRETRAINED_BERT_CACHE' default_cache_path)))<block_end><except_stmt>(AttributeError ImportError)<block_start>PYTORCH_PRETRAINED_BERT_CACHE=os.getenv('PYTORCH_TRANSFORMERS_CACHE' os.getenv('PYTORCH_PRETRAINED_BERT_CACHE' default_cache_path))<block_end>PYTORCH_TRANSFORMERS_CACHE=PYTORCH_PRETRAINED_BERT_CACHE# Kept for backward compatibility
logger=logging.getLogger(__name__)<line_sep>SPECIAL_TOKENS_MAP_FILE='special_tokens_map.json'<line_sep>ADDED_TOKENS_FILE='added_tokens.json'<line_sep>@lru_cache()<def_stmt>bytes_to_unicode <block_start>"""
Returns list of utf-8 byte and a corresponding list of unicode strings.
The reversible bpe codes work on unicode strings.
This means you need a large # of unicode characters in your vocab if you want to avoid UNKs.
When you're at something like a 10B token dataset you end up needing around 5K for decent coverage.
This is a signficant percentage of your normal, say, 32K bpe vocab.
To avoid that, we want lookup tables between utf-8 bytes and unicode strings.
And avoids mapping to whitespace/control characters the bpe code barfs on.
"""<line_sep>_chr=unichr<if>sys.version_info[0]<eq>2<else>chr<line_sep>bs=list(range(ord("!") ord("~")+1))+list(range(ord("¡") ord("¬")+1))+list(range(ord("®") ord("ÿ")+1))<line_sep>cs=bs[:]<line_sep>n=0<for_stmt>b range(2<power>8)<block_start><if_stmt>b<not><in>bs<block_start>bs.append(b)<line_sep>cs.append(2<power>8+n)<line_sep>n<augadd>1<block_end><block_end>cs=[_chr(n)<for>n cs]<line_sep><return>dict(zip(bs cs))<block_end><def_stmt>get_pairs word<block_start>"""Return set of symbol pairs in a word.
Word is represented as tuple of symbols (symbols being variable-length strings).
"""<line_sep>pairs=set()<line_sep>prev_char=word[0]<for_stmt>char word[1:]<block_start>pairs.add((prev_char char))<line_sep>prev_char=char<block_end><return>pairs<block_end><def_stmt>s3_request func<block_start>"""
Wrapper function for s3 requests in order to create more helpful error
messages.
"""<line_sep>@wraps(func)<def_stmt>wrapper url *args **kwargs<block_start><try_stmt><block_start><return>func(url *args **kwargs)<block_end><except_stmt>ClientError<as>exc<block_start><if_stmt>int(exc.response["Error"]["Code"])<eq>404<block_start><raise>EnvironmentError("file {} not found".format(url))<block_end><else_stmt><block_start><raise><block_end><block_end><block_end><return>wrapper<block_end><def_stmt>split_s3_path url<block_start>"""Split a full s3 path into the bucket name and path."""<line_sep>parsed=urlparse(url)<if_stmt><not>parsed.netloc<or><not>parsed.path<block_start><raise>ValueError("bad s3 path {}".format(url))<block_end>bucket_name=parsed.netloc<line_sep>s3_path=parsed.path<line_sep># Remove '/' at beginning of path.
<if_stmt>s3_path.startswith("/")<block_start>s3_path=s3_path[1:]<block_end><return>bucket_name s3_path<block_end>@s3_request<def_stmt>s3_etag url<block_start>"""Check ETag on S3 object."""<line_sep>s3_resource=boto3.resource("s3")<line_sep>bucket_name,s3_path=split_s3_path(url)<line_sep>s3_object=s3_resource.Object(bucket_name s3_path)<line_sep><return>s3_object.e_tag<block_end>@s3_request<def_stmt>s3_get url temp_file<block_start>"""Pull a file directly from S3."""<line_sep>s3_resource=boto3.resource("s3")<line_sep>bucket_name,s3_path=split_s3_path(url)<line_sep>s3_resource.Bucket(bucket_name).download_fileobj(s3_path temp_file)<block_end><def_stmt>http_get url temp_file<block_start>req=requests.get(url stream=<true>)<line_sep>content_length=req.headers.get('Content-Length')<line_sep>total=int(content_length)<if>content_length<is><not><none><else><none><line_sep>progress=tqdm(unit="B" total=total)<for_stmt>chunk req.iter_content(chunk_size=1024)<block_start><if_stmt>chunk# filter out keep-alive new chunks
<block_start>progress.update(len(chunk))<line_sep>temp_file.write(chunk)<block_end><block_end>progress.close()<block_end><def_stmt>cached_path url_or_filename cache_dir=<none><block_start>"""
Given something that might be a URL (or might be a local path),
determine which. If it's a URL, download the file and cache it, and
return the path to the cached file. If it's already a local path,
make sure the file exists and then return the path.
"""<if_stmt>cache_dir<is><none><block_start>cache_dir=PYTORCH_TRANSFORMERS_CACHE<block_end><if_stmt>sys.version_info[0]<eq>3<and>isinstance(url_or_filename Path)<block_start>url_or_filename=str(url_or_filename)<block_end><if_stmt>sys.version_info[0]<eq>3<and>isinstance(cache_dir Path)<block_start>cache_dir=str(cache_dir)<block_end>parsed=urlparse(url_or_filename)<if_stmt>parsed.scheme<in>('http' 'https' 's3')# URL, so get it from the cache (downloading if necessary)
<block_start><return>get_from_cache(url_or_filename cache_dir)<block_end><elif_stmt>os.path.exists(url_or_filename)# File, and it exists.
<block_start><return>url_or_filename<block_end><elif_stmt>parsed.scheme<eq>''# File, but it doesn't exist.
<block_start><raise>EnvironmentError("file {} not found".format(url_or_filename))<block_end><else_stmt># Something unknown
<block_start><raise>ValueError("unable to parse {} as a URL or as a local path".format(url_or_filename))<block_end><block_end><def_stmt>url_to_filename url etag=<none><block_start>"""
Convert `url` into a hashed filename in a repeatable way.
If `etag` is specified, append its hash to the url's, delimited
by a period.
"""<line_sep>url_bytes=url.encode('utf-8')<line_sep>url_hash=sha256(url_bytes)<line_sep>filename=url_hash.hexdigest()<if_stmt>etag<block_start>etag_bytes=etag.encode('utf-8')<line_sep>etag_hash=sha256(etag_bytes)<line_sep>filename<augadd>'.'+etag_hash.hexdigest()<block_end><return>filename<block_end><def_stmt>get_from_cache url cache_dir=<none><block_start>"""
Given a URL, look for the corresponding dataset in the local cache.
If it's not there, download it. Then return the path to the cached file.
"""<if_stmt>cache_dir<is><none><block_start>cache_dir=PYTORCH_TRANSFORMERS_CACHE<block_end><if_stmt>sys.version_info[0]<eq>3<and>isinstance(cache_dir Path)<block_start>cache_dir=str(cache_dir)<block_end><if_stmt>sys.version_info[0]<eq>2<and><not>isinstance(cache_dir str)<block_start>cache_dir=str(cache_dir)<block_end><if_stmt><not>os.path.exists(cache_dir)<block_start>os.makedirs(cache_dir)<block_end># Get eTag to add to filename, if it exists.
<if_stmt>url.startswith("s3://")<block_start>etag=s3_etag(url)<block_end><else_stmt><block_start><try_stmt><block_start>response=requests.head(url allow_redirects=<true>)<if_stmt>response.status_code<ne>200<block_start>etag=<none><block_end><else_stmt><block_start>etag=response.headers.get("ETag")<block_end><block_end><except_stmt>EnvironmentError<block_start>etag=<none><block_end><block_end><if_stmt>sys.version_info[0]<eq>2<and>etag<is><not><none><block_start>etag=etag.decode('utf-8')<block_end>filename=url_to_filename(url etag)<line_sep># get cache path to put the file
cache_path=os.path.join(cache_dir filename)<line_sep># If we don't have a connection (etag is None) and can't identify the file
# try to get the last downloaded one
<if_stmt><not>os.path.exists(cache_path)<and>etag<is><none><block_start>matching_files=fnmatch.filter(os.listdir(cache_dir) filename+'.*')<line_sep>matching_files=list(filter(<lambda>s:<not>s.endswith('.json') matching_files))<if_stmt>matching_files<block_start>cache_path=os.path.join(cache_dir matching_files[-1])<block_end><block_end><if_stmt><not>os.path.exists(cache_path)# Download to temporary file, then copy to cache dir once finished.
# Otherwise you get corrupt cache entries if the download gets interrupted.
<block_start><with_stmt>tempfile.NamedTemporaryFile()<as>temp_file<block_start>logger.info("%s not found in cache, downloading to %s" url temp_file.name)<line_sep># GET file object
<if_stmt>url.startswith("s3://")<block_start>s3_get(url temp_file)<block_end><else_stmt><block_start>http_get(url temp_file)<block_end># we are copying the file before closing it, so flush to avoid truncation
temp_file.flush()<line_sep># shutil.copyfileobj() starts at the current position, so go to the start
temp_file.seek(0)<line_sep>logger.info("copying %s to cache at %s" temp_file.name cache_path)<with_stmt>open(cache_path 'wb')<as>cache_file<block_start>shutil.copyfileobj(temp_file cache_file)<block_end>logger.info("creating metadata file for %s" cache_path)<line_sep>meta={'url':url 'etag':etag}<line_sep>meta_path=cache_path+'.json'<with_stmt>open(meta_path 'w')<as>meta_file<block_start>output_string=json.dumps(meta)<if_stmt>sys.version_info[0]<eq>2<and>isinstance(output_string str)<block_start>output_string=unicode(output_string 'utf-8')# The beauty of python 2
<block_end>meta_file.write(output_string)<block_end>logger.info("removing temp file %s" temp_file.name)<block_end><block_end><return>cache_path<block_end><class_stmt>PreTrainedTokenizer(object)<block_start>""" Base class for all tokenizers.
Handle all the shared methods for tokenization and special tokens as well as methods dowloading/caching/loading pretrained tokenizers as well as adding tokens to the vocabulary.
This class also contain the added tokens in a unified way on top of all tokenizers so we don't have to handle the specific vocabulary augmentation methods of the various underlying dictionary structures (BPE, sentencepiece...).
Class attributes (overridden by derived classes):
- ``vocab_files_names``: a python ``dict`` with, as keys, the ``__init__`` keyword name of each vocabulary file required by the model, and as associated values, the filename for saving the associated file (string).
- ``pretrained_vocab_files_map``: a python ``dict of dict`` the high-level keys being the ``__init__`` keyword name of each vocabulary file required by the model, the low-level being the `short-cut-names` (string) of the pretrained models with, as associated values, the `url` (string) to the associated pretrained vocabulary file.
- ``max_model_input_sizes``: a python ``dict`` with, as keys, the `short-cut-names` (string) of the pretrained models, and as associated values, the maximum length of the sequence inputs of this model, or None if the model has no maximum input size.
Parameters:
- ``bos_token``: (`Optional`) string: a beginning of sentence token. Will be associated to ``self.bos_token``
- ``eos_token``: (`Optional`) string: an end of sentence token. Will be associated to ``self.eos_token``
- ``unk_token``: (`Optional`) string: an unknown token. Will be associated to ``self.unk_token``
- ``sep_token``: (`Optional`) string: a separation token (e.g. to separate context and query in an input sequence). Will be associated to ``self.sep_token``
- ``pad_token``: (`Optional`) string: a padding token. Will be associated to ``self.pad_token``
- ``cls_token``: (`Optional`) string: a classification token (e.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model). Will be associated to ``self.cls_token``
- ``mask_token``: (`Optional`) string: a masking token (e.g. when training a model with masked-language modeling). Will be associated to ``self.mask_token``
- ``additional_special_tokens``: (`Optional`) list: a list of additional special tokens. Adding all special tokens here ensure they won't be split by the tokenization process. Will be associated to ``self.additional_special_tokens``
"""<line_sep>vocab_files_names={}<line_sep>pretrained_vocab_files_map={}<line_sep>max_model_input_sizes={}<line_sep>SPECIAL_TOKENS_ATTRIBUTES=["bos_token" "eos_token" "unk_token" "sep_token" "pad_token" "cls_token" "mask_token" "additional_special_tokens"]<line_sep>@property<def_stmt>bos_token self<block_start>""" Beginning of sentence token (string). Log an error if used while not having been set. """<if_stmt>self._bos_token<is><none><block_start>logger.error("Using bos_token, but it is not set yet.")<block_end><return>self._bos_token<block_end>@property<def_stmt>eos_token self<block_start>""" End of sentence token (string). Log an error if used while not having been set. """<if_stmt>self._eos_token<is><none><block_start>logger.error("Using eos_token, but it is not set yet.")<block_end><return>self._eos_token<block_end>@property<def_stmt>unk_token self<block_start>""" Unknown token (string). Log an error if used while not having been set. """<if_stmt>self._unk_token<is><none><block_start>logger.error("Using unk_token, but it is not set yet.")<block_end><return>self._unk_token<block_end>@property<def_stmt>sep_token self<block_start>""" Separation token (string). E.g. separate context and query in an input sequence. Log an error if used while not having been set. """<if_stmt>self._sep_token<is><none><block_start>logger.error("Using sep_token, but it is not set yet.")<block_end><return>self._sep_token<block_end>@property<def_stmt>pad_token self<block_start>""" Padding token (string). Log an error if used while not having been set. """<if_stmt>self._pad_token<is><none><block_start>logger.error("Using pad_token, but it is not set yet.")<block_end><return>self._pad_token<block_end>@property<def_stmt>cls_token self<block_start>""" Classification token (string). E.g. to extract a summary of an input sequence leveraging self-attention along the full depth of the model. Log an error if used while not having been set. """<if_stmt>self._cls_token<is><none><block_start>logger.error("Using cls_token, but it is not set yet.")<block_end><return>self._cls_token<block_end>@property<def_stmt>mask_token self<block_start>""" Mask token (string). E.g. when training a model with masked-language modeling. Log an error if used while not having been set. """<if_stmt>self._mask_token<is><none><block_start>logger.error("Using mask_token, but it is not set yet.")<block_end><return>self._mask_token<block_end>@property<def_stmt>additional_special_tokens self<block_start>""" All the additional special tokens you may want to use (list of strings). Log an error if used while not having been set. """<if_stmt>self._additional_special_tokens<is><none><block_start>logger.error("Using additional_special_tokens, but it is not set yet.")<block_end><return>self._additional_special_tokens<block_end>@bos_token.setter<def_stmt>bos_token self value<block_start>self._bos_token=value<block_end>@eos_token.setter<def_stmt>eos_token self value<block_start>self._eos_token=value<block_end>@unk_token.setter<def_stmt>unk_token self value<block_start>self._unk_token=value<block_end>@sep_token.setter<def_stmt>sep_token self value<block_start>self._sep_token=value<block_end>@pad_token.setter<def_stmt>pad_token self value<block_start>self._pad_token=value<block_end>@cls_token.setter<def_stmt>cls_token self value<block_start>self._cls_token=value<block_end>@mask_token.setter<def_stmt>mask_token self value<block_start>self._mask_token=value<block_end>@additional_special_tokens.setter<def_stmt>additional_special_tokens self value<block_start>self._additional_special_tokens=value<block_end><def_stmt>__init__ self max_len=<none> **kwargs<block_start>self._bos_token=<none><line_sep>self._eos_token=<none><line_sep>self._unk_token=<none><line_sep>self._sep_token=<none><line_sep>self._pad_token=<none><line_sep>self._cls_token=<none><line_sep>self._mask_token=<none><line_sep>self._additional_special_tokens=[]<line_sep>self.max_len=max_len<if>max_len<is><not><none><else>int(1e12)<line_sep>self.added_tokens_encoder={}<line_sep>self.added_tokens_decoder={}<for_stmt>key,value kwargs.items()<block_start><if_stmt>key<in>self.SPECIAL_TOKENS_ATTRIBUTES<block_start><if_stmt>key<eq>'additional_special_tokens'<block_start><assert_stmt>isinstance(value (list tuple))<and>all(isinstance(t str)<or>(six.PY2<and>isinstance(t unicode))<for>t value)<block_end><else_stmt><block_start><assert_stmt>isinstance(value str)<or>(six.PY2<and>isinstance(value unicode))<block_end>setattr(self key value)<block_end><block_end><block_end>@classmethod<def_stmt>from_pretrained cls *inputs **kwargs<block_start>r"""
Instantiate a :class:`~pytorch_transformers.PreTrainedTokenizer` (or a derived class) from a predefined tokenizer.
Args:
pretrained_model_name_or_path: either:
- a string with the `shortcut name` of a predefined tokenizer to load from cache or download, e.g.: ``bert-base-uncased``.
- a path to a `directory` containing vocabulary files required by the tokenizer, for instance saved using the :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` method, e.g.: ``./my_model_directory/``.
- (not applicable to all derived classes) a path or url to a single saved vocabulary file if and only if the tokenizer only requires a single vocabulary file (e.g. Bert, XLNet), e.g.: ``./my_model_directory/vocab.txt``.
cache_dir: (`optional`) string:
Path to a directory in which a downloaded predefined tokenizer vocabulary files should be cached if the standard cache should not be used.
inputs: (`optional`) positional arguments: will be passed to the Tokenizer ``__init__`` method.
kwargs: (`optional`) keyword arguments: will be passed to the Tokenizer ``__init__`` method. Can be used to set special tokens like ``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``, ``additional_special_tokens``. See parameters in the doc string of :class:`~pytorch_transformers.PreTrainedTokenizer` for details.
Examples::
# We can't instantiate directly the base class `PreTrainedTokenizer` so let's show our examples on a derived class: BertTokenizer
# Download vocabulary from S3 and cache.
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
# If vocabulary files are in a directory (e.g. tokenizer was saved using `save_pretrained('./test/saved_model/')`)
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/')
# If the tokenizer uses a single vocabulary file, you can point directly to this file
tokenizer = BertTokenizer.from_pretrained('./test/saved_model/my_vocab.txt')
# You can link tokens to special vocabulary when instantiating
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased', unk_token='<unk>')
# You should be sure '<unk>' is in the vocabulary when doing that.
# Otherwise use tokenizer.add_special_tokens({'unk_token': '<unk>'}) instead)
assert tokenizer.unk_token == '<unk>'
"""<line_sep><return>cls._from_pretrained(*inputs **kwargs)<block_end>@classmethod<def_stmt>_from_pretrained cls pretrained_model_name_or_path *inputs **kwargs<block_start>cache_dir=kwargs.pop('cache_dir' <none>)<line_sep>s3_models=list(cls.max_model_input_sizes.keys())<line_sep>vocab_files={}<if_stmt>pretrained_model_name_or_path<in>s3_models# Get the vocabulary from AWS S3 bucket
<block_start><for_stmt>file_id,map_list cls.pretrained_vocab_files_map.items()<block_start>vocab_files[file_id]=map_list[pretrained_model_name_or_path]<block_end><block_end><else_stmt># Get the vocabulary from local files
<block_start>logger.info("Model name '{}' not found in model shortcut name list ({}). "<concat>"Assuming '{}' is a path or url to a directory containing tokenizer files.".format(pretrained_model_name_or_path ', '.join(s3_models) pretrained_model_name_or_path))<line_sep># Look for the tokenizer main vocabulary files
<for_stmt>file_id,file_name cls.vocab_files_names.items()<block_start><if_stmt>os.path.isdir(pretrained_model_name_or_path)# If a directory is provided we look for the standard filenames
<block_start>full_file_name=os.path.join(pretrained_model_name_or_path file_name)<block_end><else_stmt># If a path to a file is provided we use it (will only work for non-BPE tokenizer using a single vocabulary file)
<block_start>full_file_name=pretrained_model_name_or_path<block_end><if_stmt><not>os.path.exists(full_file_name)<block_start>logger.info("Didn't find file {}. We won't load it.".format(full_file_name))<line_sep>full_file_name=<none><block_end>vocab_files[file_id]=full_file_name<block_end># Look for the additional tokens files
all_vocab_files_names={'added_tokens_file':ADDED_TOKENS_FILE 'special_tokens_map_file':SPECIAL_TOKENS_MAP_FILE}<line_sep># If a path to a file was provided, get the parent directory
saved_directory=pretrained_model_name_or_path<if_stmt>os.path.exists(saved_directory)<and><not>os.path.isdir(saved_directory)<block_start>saved_directory=os.path.dirname(saved_directory)<block_end><for_stmt>file_id,file_name all_vocab_files_names.items()<block_start>full_file_name=os.path.join(saved_directory file_name)<if_stmt><not>os.path.exists(full_file_name)<block_start>logger.info("Didn't find file {}. We won't load it.".format(full_file_name))<line_sep>full_file_name=<none><block_end>vocab_files[file_id]=full_file_name<block_end><if_stmt>all(full_file_name<is><none><for>full_file_name vocab_files.values())<block_start>logger.error("Model name '{}' was not found in model name list ({}). "<concat>"We assumed '{}' was a path or url but couldn't find tokenizer files"<concat>"at this path or url.".format(pretrained_model_name_or_path ', '.join(s3_models) pretrained_model_name_or_path ))<line_sep><return><none><block_end><block_end># Get files from url, cache, or disk depending on the case
<try_stmt><block_start>resolved_vocab_files={}<for_stmt>file_id,file_path vocab_files.items()<block_start><if_stmt>file_path<is><none><block_start>resolved_vocab_files[file_id]=<none><block_end><else_stmt><block_start>resolved_vocab_files[file_id]=cached_path(file_path cache_dir=cache_dir)<block_end><block_end><block_end><except_stmt>EnvironmentError<block_start><if_stmt>pretrained_model_name_or_path<in>s3_models<block_start>logger.error("Couldn't reach server to download vocabulary.")<block_end><else_stmt><block_start>logger.error("Model name '{}' was not found in model name list ({}). "<concat>"We assumed '{}' was a path or url but couldn't find files {} "<concat>"at this path or url.".format(pretrained_model_name_or_path ', '.join(s3_models) pretrained_model_name_or_path str(vocab_files.keys())))<block_end><return><none><block_end><for_stmt>file_id,file_path vocab_files.items()<block_start><if_stmt>file_path<eq>resolved_vocab_files[file_id]<block_start>logger.info("loading file {}".format(file_path))<block_end><else_stmt><block_start>logger.info("loading file {} from cache at {}".format(file_path resolved_vocab_files[file_id]))<block_end><block_end># Set max length if needed
<if_stmt>pretrained_model_name_or_path<in>cls.max_model_input_sizes# if we're using a pretrained model, ensure the tokenizer
# wont index sequences longer than the number of positional embeddings
<block_start>max_len=cls.max_model_input_sizes[pretrained_model_name_or_path]<if_stmt>max_len<is><not><none><and>isinstance(max_len (int float))<block_start>kwargs['max_len']=min(kwargs.get('max_len' int(1e12)) max_len)<block_end><block_end># Merge resolved_vocab_files arguments in kwargs.
added_tokens_file=resolved_vocab_files.pop('added_tokens_file' <none>)<line_sep>special_tokens_map_file=resolved_vocab_files.pop('special_tokens_map_file' <none>)<for_stmt>args_name,file_path resolved_vocab_files.items()<block_start><if_stmt>args_name<not><in>kwargs<block_start>kwargs[args_name]=file_path<block_end><block_end><if_stmt>special_tokens_map_file<is><not><none><block_start>special_tokens_map=json.load(open(special_tokens_map_file encoding="utf-8"))<for_stmt>key,value special_tokens_map.items()<block_start><if_stmt>key<not><in>kwargs<block_start>kwargs[key]=value<block_end><block_end><block_end># Instantiate tokenizer.
tokenizer=cls(*inputs **kwargs)<line_sep># Add supplementary tokens.
<if_stmt>added_tokens_file<is><not><none><block_start>added_tok_encoder=json.load(open(added_tokens_file encoding="utf-8"))<line_sep>added_tok_decoder={v:k<for>k,v added_tok_encoder.items()}<line_sep>tokenizer.added_tokens_encoder.update(added_tok_encoder)<line_sep>tokenizer.added_tokens_decoder.update(added_tok_decoder)<block_end><return>tokenizer<block_end><def_stmt>save_pretrained self save_directory<block_start>""" Save the tokenizer vocabulary files (with added tokens) and the
special-tokens-to-class-attributes-mapping to a directory.
This method make sure the full tokenizer can then be re-loaded using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method.
"""<if_stmt><not>os.path.isdir(save_directory)<block_start>logger.error("Saving directory ({}) should be a directory".format(save_directory))<line_sep><return><block_end>special_tokens_map_file=os.path.join(save_directory SPECIAL_TOKENS_MAP_FILE)<line_sep>added_tokens_file=os.path.join(save_directory ADDED_TOKENS_FILE)<with_stmt>open(special_tokens_map_file 'w' encoding='utf-8')<as>f<block_start>f.write(json.dumps(self.special_tokens_map ensure_ascii=<false>))<block_end><with_stmt>open(added_tokens_file 'w' encoding='utf-8')<as>f<block_start><if_stmt>self.added_tokens_encoder<block_start>out_str=json.dumps(self.added_tokens_encoder ensure_ascii=<false>)<block_end><else_stmt><block_start>out_str=u"{}"<block_end>f.write(out_str)<block_end>vocab_files=self.save_vocabulary(save_directory)<line_sep><return>vocab_files+(special_tokens_map_file added_tokens_file)<block_end><def_stmt>save_vocabulary self save_directory<block_start>""" Save the tokenizer vocabulary to a directory. This method does *NOT* save added tokens
and special token mappings.
Please use :func:`~pytorch_transformers.PreTrainedTokenizer.save_pretrained` `()` to save the full Tokenizer state if you want to reload it using the :func:`~pytorch_transformers.PreTrainedTokenizer.from_pretrained` class method.
"""<line_sep><raise>NotImplementedError<block_end><def_stmt>vocab_size self<block_start>""" Size of the base vocabulary (without the added tokens) """<line_sep><raise>NotImplementedError<block_end><def_stmt>__len__ self<block_start>""" Size of the full vocabulary with the added tokens """<line_sep><return>self.vocab_size+len(self.added_tokens_encoder)<block_end><def_stmt>add_tokens self new_tokens<block_start>"""
Add a list of new tokens to the tokenizer class. If the new tokens are not in the
vocabulary, they are added to it with indices starting from length of the current vocabulary.
Args:
new_tokens: list of string. Each string is a token to add. Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to increase the vocabulary of Bert model and tokenizer
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
model = BertModel.from_pretrained('bert-base-uncased')
num_added_toks = tokenizer.add_tokens(['new_tok1', 'my_new-tok2'])
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
"""<if_stmt><not>new_tokens<block_start><return>0<block_end>to_add_tokens=[]<for_stmt>token new_tokens<block_start><assert_stmt>isinstance(token str)<or>(six.PY2<and>isinstance(token unicode))<if_stmt>token<ne>self.unk_token<and>self.convert_tokens_to_ids(token)<eq>self.convert_tokens_to_ids(self.unk_token)<block_start>to_add_tokens.append(token)<line_sep>logger.info("Adding %s to the vocabulary" token)<block_end><block_end>added_tok_encoder=dict((tok len(self)+i)<for>i,tok enumerate(to_add_tokens))<line_sep>added_tok_decoder={v:k<for>k,v added_tok_encoder.items()}<line_sep>self.added_tokens_encoder.update(added_tok_encoder)<line_sep>self.added_tokens_decoder.update(added_tok_decoder)<line_sep><return>len(to_add_tokens)<block_end><def_stmt>add_special_tokens self special_tokens_dict<block_start>"""
Add a dictionary of special tokens (eos, pad, cls...) to the encoder and link them
to class attributes. If special tokens are NOT in the vocabulary, they are added
to it (indexed starting from the last index of the current vocabulary).
Args:
special_tokens_dict: dict of string. Keys should be in the list of predefined special attributes:
[``bos_token``, ``eos_token``, ``unk_token``, ``sep_token``, ``pad_token``, ``cls_token``, ``mask_token``,
``additional_special_tokens``].
Tokens are only added if they are not already in the vocabulary (tested by checking if the tokenizer assign the index of the ``unk_token`` to them).
Returns:
Number of tokens added to the vocabulary.
Examples::
# Let's see how to add a new classification token to GPT-2
tokenizer = GPT2Tokenizer.from_pretrained('gpt2')
model = GPT2Model.from_pretrained('gpt2')
special_tokens_dict = {'cls_token': '<CLS>'}
num_added_toks = tokenizer.add_special_tokens(special_tokens_dict)
print('We have added', num_added_toks, 'tokens')
model.resize_token_embeddings(len(tokenizer)) # Notice: resize_token_embeddings expect to receive the full size of the new vocabulary, i.e. the length of the tokenizer.
assert tokenizer.cls_token == '<CLS>'
"""<if_stmt><not>special_tokens_dict<block_start><return>0<block_end>added_tokens=0<for_stmt>key,value special_tokens_dict.items()<block_start><assert_stmt>key<in>self.SPECIAL_TOKENS_ATTRIBUTES<if_stmt>key<eq>'additional_special_tokens'<block_start><assert_stmt>isinstance(value (list tuple))<and>all(isinstance(t str)<or>(six.PY2<and>isinstance(t unicode))<for>t value)<line_sep>added_tokens<augadd>self.add_tokens(value)<block_end><else_stmt><block_start><assert_stmt>isinstance(value str)<or>(six.PY2<and>isinstance(value unicode))<line_sep>added_tokens<augadd>self.add_tokens([value])<block_end>logger.info("Assigning %s to the %s key of the tokenizer" value key)<line_sep>setattr(self key value)<block_end><return>added_tokens<block_end><def_stmt>tokenize self text **kwargs<block_start>""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Take care of added tokens.
"""<def_stmt>split_on_tokens tok_list text<block_start><if_stmt><not>text<block_start><return>[]<block_end><if_stmt><not>tok_list<block_start><return>self._tokenize(text **kwargs)<block_end>tok=tok_list[0]<line_sep>split_text=text.split(tok)<line_sep><return>sum((split_on_tokens(tok_list[1:] sub_text.strip())+[tok]<for>sub_text split_text) [])[:-1]<block_end>added_tokens=list(self.added_tokens_encoder.keys())+self.all_special_tokens<line_sep>tokenized_text=split_on_tokens(added_tokens text)<line_sep><return>tokenized_text<block_end><def_stmt>_tokenize self text **kwargs<block_start>""" Converts a string in a sequence of tokens (string), using the tokenizer.
Split in words for word-based vocabulary or sub-words for sub-word-based
vocabularies (BPE/SentencePieces/WordPieces).
Do NOT take care of added tokens.
"""<line_sep><raise>NotImplementedError<block_end><def_stmt>convert_tokens_to_ids self tokens<block_start>""" Converts a single token, or a sequence of tokens, (str/unicode) in a single integer id
(resp. a sequence of ids), using the vocabulary.
"""<if_stmt>isinstance(tokens str)<or>(six.PY2<and>isinstance(tokens unicode))<block_start><return>self._convert_token_to_id_with_added_voc(tokens)<block_end>ids=[]<for_stmt>token tokens<block_start>ids.append(self._convert_token_to_id_with_added_voc(token))<block_end><if_stmt>len(ids)<g>self.max_len<block_start>logger.warning("Token indices sequence length is longer than the specified maximum sequence length "<concat>"for this model ({} > {}). Running this sequence through the model will result in "<concat>"indexing errors".format(len(ids) self.max_len))<block_end><return>ids<block_end><def_stmt>_convert_token_to_id_with_added_voc self token<block_start><if_stmt>token<in>self.added_tokens_encoder<block_start><return>self.added_tokens_encoder[token]<block_end><return>self._convert_token_to_id(token)<block_end><def_stmt>_convert_token_to_id self token<block_start><raise>NotImplementedError<block_end><def_stmt>encode self text text_pair=<none> add_special_tokens=<false><block_start>"""
Converts a string in a sequence of ids (integer), using the tokenizer and vocabulary.
Same as doing ``self.convert_tokens_to_ids(self.tokenize(text))``.
Args:
text: The first sequence to be encoded.
text_pair: Optional second sequence to be encoded.
add_special_tokens: if set to ``True``, the sequences will be encoded with the special tokens relative
to their model.
"""<if_stmt>text_pair<is><none><block_start><if_stmt>add_special_tokens<block_start><return>self.add_special_tokens_single_sentence(self.convert_tokens_to_ids(self.tokenize(text)))<block_end><else_stmt><block_start><return>self.convert_tokens_to_ids(self.tokenize(text))<block_end><block_end>first_sentence_tokens=[self._convert_token_to_id(token)<for>token self.tokenize(text)]<line_sep>second_sentence_tokens=[self._convert_token_to_id(token)<for>token self.tokenize(text_pair)]<if_stmt>add_special_tokens<block_start><return>self.add_special_tokens_sentences_pair(first_sentence_tokens second_sentence_tokens)<block_end><else_stmt><block_start><return>first_sentence_tokens second_sentence_tokens<block_end><block_end><def_stmt>add_special_tokens_single_sentence self token_ids<block_start><raise>NotImplementedError<block_end><def_stmt>add_special_tokens_sentences_pair self token_ids_0 token_ids_1<block_start><raise>NotImplementedError<block_end><def_stmt>convert_ids_to_tokens self ids skip_special_tokens=<false><block_start>""" Converts a single index or a sequence of indices (integers) in a token "
(resp.) a sequence of tokens (str/unicode), using the vocabulary and added tokens.
Args:
skip_special_tokens: Don't decode special tokens (self.all_special_tokens). Default: False
"""<if_stmt>isinstance(ids int)<block_start><if_stmt>ids<in>self.added_tokens_decoder<block_start><return>self.added_tokens_decoder[ids]<block_end><else_stmt><block_start><return>self._convert_id_to_token(ids)<block_end><block_end>tokens=[]<for_stmt>index ids<block_start><if_stmt>index<in>self.all_special_ids<and>skip_special_tokens<block_start><continue><block_end><if_stmt>index<in>self.added_tokens_decoder<block_start>tokens.append(self.added_tokens_decoder[index])<block_end><else_stmt><block_start>tokens.append(self._convert_id_to_token(index))<block_end><block_end><return>tokens<block_end><def_stmt>_convert_id_to_token self index<block_start><raise>NotImplementedError<block_end><def_stmt>convert_tokens_to_string self tokens<block_start>""" Converts a sequence of tokens (string) in a single string.
The most simple way to do it is ' '.join(self.convert_ids_to_tokens(token_ids))
but we often want to remove sub-word tokenization artifacts at the same time.
"""<line_sep><return>' '.join(self.convert_ids_to_tokens(tokens))<block_end><def_stmt>decode self token_ids skip_special_tokens=<false> clean_up_tokenization_spaces=<true><block_start>"""
Converts a sequence of ids (integer) in a string, using the tokenizer and vocabulary
with options to remove special tokens and clean up tokenization spaces.
Similar to doing ``self.convert_tokens_to_string(self.convert_ids_to_tokens(token_ids))``.
"""<line_sep>filtered_tokens=self.convert_ids_to_tokens(token_ids skip_special_tokens=skip_special_tokens)<line_sep>text=self.convert_tokens_to_string(filtered_tokens)<if_stmt>self.sep_token<is><not><none><and>self.sep_token<in>text<block_start>text=text.replace(self.cls_token self.sep_token)<line_sep>split_text=list(filter(<lambda>sentence:len(sentence)<g>0 text.split(self.sep_token)))<if_stmt>clean_up_tokenization_spaces<block_start>clean_text=[self.clean_up_tokenization(text)<for>text split_text]<line_sep><return>clean_text<block_end><else_stmt><block_start><return>split_text<block_end><block_end><else_stmt><block_start><if_stmt>clean_up_tokenization_spaces<block_start>clean_text=self.clean_up_tokenization(text)<line_sep><return>clean_text<block_end><else_stmt><block_start><return>text<block_end><block_end><block_end>@property<def_stmt>special_tokens_map self<block_start>""" A dictionary mapping special token class attribute (cls_token, unk_token...) to their
values ('<unk>', '<cls>'...)
"""<line_sep>set_attr={}<for_stmt>attr self.SPECIAL_TOKENS_ATTRIBUTES<block_start>attr_value=getattr(self "_"+attr)<if_stmt>attr_value<block_start>set_attr[attr]=attr_value<block_end><block_end><return>set_attr<block_end>@property<def_stmt>all_special_tokens self<block_start>""" List all the special tokens ('<unk>', '<cls>'...) mapped to class attributes
(cls_token, unk_token...).
"""<line_sep>all_toks=[]<line_sep>set_attr=self.special_tokens_map<for_stmt>attr_value set_attr.values()<block_start>all_toks=all_toks+(attr_value<if>isinstance(attr_value (list tuple))<else>[attr_value])<block_end>all_toks=list(set(all_toks))<line_sep><return>all_toks<block_end>@property<def_stmt>all_special_ids self<block_start>""" List the vocabulary indices of the special tokens ('<unk>', '<cls>'...) mapped to
class attributes (cls_token, unk_token...).
"""<line_sep>all_toks=self.all_special_tokens<line_sep>all_ids=list(self._convert_token_to_id(t)<for>t all_toks)<line_sep><return>all_ids<block_end>@staticmethod<def_stmt>clean_up_tokenization out_string<block_start>""" Clean up a list of simple English tokenization artifacts like spaces before punctuations and abreviated forms.
"""<line_sep>out_string=out_string.replace(' .' '.').replace(' ?' '?').replace(' !' '!').replace(' ,' ',').replace(" ' " "'").replace(" n't" "n't").replace(" 'm" "'m").replace(" do not" " don't").replace(" 's" "'s").replace(" 've" "'ve").replace(" 're" "'re")<line_sep><return>out_string<block_end><block_end>
|
# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Python wrapper for prefetching_ops."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_stmt>tensorflow.python.data.experimental.ops prefetching_ops<import_from_stmt>tensorflow.python.util deprecation<line_sep>@deprecation.deprecated(<none> "Use `tf.data.experimental.prefetch_to_device(...)`.")<def_stmt>prefetch_to_device device buffer_size=<none><block_start>"""A transformation that prefetches dataset values to the given `device`.
NOTE: Although the transformation creates a `tf.data.Dataset`, the
transformation must be the final `Dataset` in the input pipeline.
Args:
device: A string. The name of a device to which elements will be prefetched.
buffer_size: (Optional.) The number of elements to buffer on `device`.
Defaults to an automatically chosen value.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""<line_sep><return>prefetching_ops.prefetch_to_device(device buffer_size)<block_end>@deprecation.deprecated(<none> "Use `tf.data.experimental.copy_to_device(...)`.")<def_stmt>copy_to_device target_device source_device="/cpu:0"<block_start>"""A transformation that copies dataset elements to the given `target_device`.
Args:
target_device: The name of a device to which elements will be copied.
source_device: The original device on which `input_dataset` will be placed.
Returns:
A `Dataset` transformation function, which can be passed to
`tf.data.Dataset.apply`.
"""<line_sep><return>prefetching_ops.copy_to_device(target_device source_device)<block_end>
|
<import_stmt>os<import_stmt>sys<import_stmt>py<import_stmt>tempfile<try_stmt><block_start><import_from_stmt>io StringIO<block_end><except_stmt>ImportError<block_start><import_from_stmt>StringIO StringIO<block_end><if_stmt>sys.version_info<l>(3 0)<block_start><class_stmt>TextIO(StringIO)<block_start><def_stmt>write self data<block_start><if_stmt><not>isinstance(data unicode)<block_start>data=unicode(data getattr(self '_encoding' 'UTF-8') 'replace')<block_end>StringIO.write(self data)<block_end><block_end><block_end><else_stmt><block_start>TextIO=StringIO<block_end><try_stmt><block_start><import_from_stmt>io BytesIO<block_end><except_stmt>ImportError<block_start><class_stmt>BytesIO(StringIO)<block_start><def_stmt>write self data<block_start><if_stmt>isinstance(data unicode)<block_start><raise>TypeError("not a byte value: %r"%(data ))<block_end>StringIO.write(self data)<block_end><block_end><block_end>patchsysdict={0:'stdin' 1:'stdout' 2:'stderr'}<class_stmt>FDCapture<block_start>""" Capture IO to/from a given os-level filedescriptor. """<def_stmt>__init__ self targetfd tmpfile=<none> now=<true> patchsys=<false><block_start>""" save targetfd descriptor, and open a new
temporary file there. If no tmpfile is
specified a tempfile.Tempfile() will be opened
in text mode.
"""<line_sep>self.targetfd=targetfd<if_stmt>tmpfile<is><none><and>targetfd<ne>0<block_start>f=tempfile.TemporaryFile('wb+')<line_sep>tmpfile=dupfile(f encoding="UTF-8")<line_sep>f.close()<block_end>self.tmpfile=tmpfile<line_sep>self._savefd=os.dup(self.targetfd)<if_stmt>patchsys<block_start>self._oldsys=getattr(sys patchsysdict[targetfd])<block_end><if_stmt>now<block_start>self.start()<block_end><block_end><def_stmt>start self<block_start><try_stmt><block_start>os.fstat(self._savefd)<block_end><except_stmt>OSError<block_start><raise>ValueError("saved filedescriptor not valid, "<concat>"did you call start() twice?")<block_end><if_stmt>self.targetfd<eq>0<and><not>self.tmpfile<block_start>fd=os.open(devnullpath os.O_RDONLY)<line_sep>os.dup2(fd 0)<line_sep>os.close(fd)<if_stmt>hasattr(self '_oldsys')<block_start>setattr(sys patchsysdict[self.targetfd] DontReadFromInput())<block_end><block_end><else_stmt><block_start>os.dup2(self.tmpfile.fileno() self.targetfd)<if_stmt>hasattr(self '_oldsys')<block_start>setattr(sys patchsysdict[self.targetfd] self.tmpfile)<block_end><block_end><block_end><def_stmt>done self<block_start>""" unpatch and clean up, returns the self.tmpfile (file object)
"""<line_sep>os.dup2(self._savefd self.targetfd)<line_sep>os.close(self._savefd)<if_stmt>self.targetfd<ne>0<block_start>self.tmpfile.seek(0)<block_end><if_stmt>hasattr(self '_oldsys')<block_start>setattr(sys patchsysdict[self.targetfd] self._oldsys)<block_end><return>self.tmpfile<block_end><def_stmt>writeorg self data<block_start>""" write a string to the original file descriptor
"""<line_sep>tempfp=tempfile.TemporaryFile()<try_stmt><block_start>os.dup2(self._savefd tempfp.fileno())<line_sep>tempfp.write(data)<block_end><finally_stmt><block_start>tempfp.close()<block_end><block_end><block_end><def_stmt>dupfile f mode=<none> buffering=0 raising=<false> encoding=<none><block_start>""" return a new open file object that's a duplicate of f
mode is duplicated if not given, 'buffering' controls
buffer size (defaulting to no buffering) and 'raising'
defines whether an exception is raised when an incompatible
file object is passed in (if raising is False, the file
object itself will be returned)
"""<try_stmt><block_start>fd=f.fileno()<line_sep>mode=mode<or>f.mode<block_end><except_stmt>AttributeError<block_start><if_stmt>raising<block_start><raise><block_end><return>f<block_end>newfd=os.dup(fd)<if_stmt>sys.version_info<ge>(3 0)<block_start><if_stmt>encoding<is><not><none><block_start>mode=mode.replace("b" "")<line_sep>buffering=<true><block_end><return>os.fdopen(newfd mode buffering encoding closefd=<true>)<block_end><else_stmt><block_start>f=os.fdopen(newfd mode buffering)<if_stmt>encoding<is><not><none><block_start><return>EncodedFile(f encoding)<block_end><return>f<block_end><block_end><class_stmt>EncodedFile(object)<block_start><def_stmt>__init__ self _stream encoding<block_start>self._stream=_stream<line_sep>self.encoding=encoding<block_end><def_stmt>write self obj<block_start><if_stmt>isinstance(obj unicode)<block_start>obj=obj.encode(self.encoding)<block_end><elif_stmt>isinstance(obj str)<block_start><pass><block_end><else_stmt><block_start>obj=str(obj)<block_end>self._stream.write(obj)<block_end><def_stmt>writelines self linelist<block_start>data=''.join(linelist)<line_sep>self.write(data)<block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self._stream name)<block_end><block_end><class_stmt>Capture(object)<block_start><def_stmt>call cls func *args **kwargs<block_start>""" return a (res, out, err) tuple where
out and err represent the output/error output
during function execution.
call the given function with args/kwargs
and capture output/error during its execution.
"""<line_sep>so=cls()<try_stmt><block_start>res=func(*args **kwargs)<block_end><finally_stmt><block_start>out,err=so.reset()<block_end><return>res out err<block_end>call=classmethod(call)<def_stmt>reset self<block_start>""" reset sys.stdout/stderr and return captured output as strings. """<if_stmt>hasattr(self '_reset')<block_start><raise>ValueError("was already reset")<block_end>self._reset=<true><line_sep>outfile,errfile=self.done(save=<false>)<line_sep>out,err="" ""<if_stmt>outfile<and><not>outfile.closed<block_start>out=outfile.read()<line_sep>outfile.close()<block_end><if_stmt>errfile<and>errfile<ne>outfile<and><not>errfile.closed<block_start>err=errfile.read()<line_sep>errfile.close()<block_end><return>out err<block_end><def_stmt>suspend self<block_start>""" return current snapshot captures, memorize tempfiles. """<line_sep>outerr=self.readouterr()<line_sep>outfile,errfile=self.done()<line_sep><return>outerr<block_end><block_end><class_stmt>StdCaptureFD(Capture)<block_start>""" This class allows to capture writes to FD1 and FD2
and may connect a NULL file to FD0 (and prevent
reads from sys.stdin). If any of the 0,1,2 file descriptors
is invalid it will not be captured.
"""<def_stmt>__init__ self out=<true> err=<true> mixed=<false> in_=<true> patchsys=<true> now=<true><block_start>self._options={"out":out "err":err "mixed":mixed "in_":in_ "patchsys":patchsys "now":now }<line_sep>self._save()<if_stmt>now<block_start>self.startall()<block_end><block_end><def_stmt>_save self<block_start>in_=self._options['in_']<line_sep>out=self._options['out']<line_sep>err=self._options['err']<line_sep>mixed=self._options['mixed']<line_sep>patchsys=self._options['patchsys']<if_stmt>in_<block_start><try_stmt><block_start>self.in_=FDCapture(0 tmpfile=<none> now=<false> patchsys=patchsys)<block_end><except_stmt>OSError<block_start><pass><block_end><block_end><if_stmt>out<block_start>tmpfile=<none><if_stmt>hasattr(out 'write')<block_start>tmpfile=out<block_end><try_stmt><block_start>self.out=FDCapture(1 tmpfile=tmpfile now=<false> patchsys=patchsys)<line_sep>self._options['out']=self.out.tmpfile<block_end><except_stmt>OSError<block_start><pass><block_end><block_end><if_stmt>err<block_start><if_stmt>out<and>mixed<block_start>tmpfile=self.out.tmpfile<block_end><elif_stmt>hasattr(err 'write')<block_start>tmpfile=err<block_end><else_stmt><block_start>tmpfile=<none><block_end><try_stmt><block_start>self.err=FDCapture(2 tmpfile=tmpfile now=<false> patchsys=patchsys)<line_sep>self._options['err']=self.err.tmpfile<block_end><except_stmt>OSError<block_start><pass><block_end><block_end><block_end><def_stmt>startall self<block_start><if_stmt>hasattr(self 'in_')<block_start>self.in_.start()<block_end><if_stmt>hasattr(self 'out')<block_start>self.out.start()<block_end><if_stmt>hasattr(self 'err')<block_start>self.err.start()<block_end><block_end><def_stmt>resume self<block_start>""" resume capturing with original temp files. """<line_sep>self.startall()<block_end><def_stmt>done self save=<true><block_start>""" return (outfile, errfile) and stop capturing. """<line_sep>outfile=errfile=<none><if_stmt>hasattr(self 'out')<and><not>self.out.tmpfile.closed<block_start>outfile=self.out.done()<block_end><if_stmt>hasattr(self 'err')<and><not>self.err.tmpfile.closed<block_start>errfile=self.err.done()<block_end><if_stmt>hasattr(self 'in_')<block_start>tmpfile=self.in_.done()<block_end><if_stmt>save<block_start>self._save()<block_end><return>outfile errfile<block_end><def_stmt>readouterr self<block_start>""" return snapshot value of stdout/stderr capturings. """<if_stmt>hasattr(self "out")<block_start>out=self._readsnapshot(self.out.tmpfile)<block_end><else_stmt><block_start>out=""<block_end><if_stmt>hasattr(self "err")<block_start>err=self._readsnapshot(self.err.tmpfile)<block_end><else_stmt><block_start>err=""<block_end><return>[out err]<block_end><def_stmt>_readsnapshot self f<block_start>f.seek(0)<line_sep>res=f.read()<line_sep>enc=getattr(f "encoding" <none>)<if_stmt>enc<block_start>res=py.builtin._totext(res enc "replace")<block_end>f.truncate(0)<line_sep>f.seek(0)<line_sep><return>res<block_end><block_end><class_stmt>StdCapture(Capture)<block_start>""" This class allows to capture writes to sys.stdout|stderr "in-memory"
and will raise errors on tries to read from sys.stdin. It only
modifies sys.stdout|stderr|stdin attributes and does not
touch underlying File Descriptors (use StdCaptureFD for that).
"""<def_stmt>__init__ self out=<true> err=<true> in_=<true> mixed=<false> now=<true><block_start>self._oldout=sys.stdout<line_sep>self._olderr=sys.stderr<line_sep>self._oldin=sys.stdin<if_stmt>out<and><not>hasattr(out 'file')<block_start>out=TextIO()<block_end>self.out=out<if_stmt>err<block_start><if_stmt>mixed<block_start>err=out<block_end><elif_stmt><not>hasattr(err 'write')<block_start>err=TextIO()<block_end><block_end>self.err=err<line_sep>self.in_=in_<if_stmt>now<block_start>self.startall()<block_end><block_end><def_stmt>startall self<block_start><if_stmt>self.out<block_start>sys.stdout=self.out<block_end><if_stmt>self.err<block_start>sys.stderr=self.err<block_end><if_stmt>self.in_<block_start>sys.stdin=self.in_=DontReadFromInput()<block_end><block_end><def_stmt>done self save=<true><block_start>""" return (outfile, errfile) and stop capturing. """<line_sep>outfile=errfile=<none><if_stmt>self.out<and><not>self.out.closed<block_start>sys.stdout=self._oldout<line_sep>outfile=self.out<line_sep>outfile.seek(0)<block_end><if_stmt>self.err<and><not>self.err.closed<block_start>sys.stderr=self._olderr<line_sep>errfile=self.err<line_sep>errfile.seek(0)<block_end><if_stmt>self.in_<block_start>sys.stdin=self._oldin<block_end><return>outfile errfile<block_end><def_stmt>resume self<block_start>""" resume capturing with original temp files. """<line_sep>self.startall()<block_end><def_stmt>readouterr self<block_start>""" return snapshot value of stdout/stderr capturings. """<line_sep>out=err=""<if_stmt>self.out<block_start>out=self.out.getvalue()<line_sep>self.out.truncate(0)<line_sep>self.out.seek(0)<block_end><if_stmt>self.err<block_start>err=self.err.getvalue()<line_sep>self.err.truncate(0)<line_sep>self.err.seek(0)<block_end><return>out err<block_end><block_end><class_stmt>DontReadFromInput<block_start>"""Temporary stub class. Ideally when stdin is accessed, the
capturing should be turned off, with possibly all data captured
so far sent to the screen. This should be configurable, though,
because in automated test runs it is better to crash than
hang indefinitely.
"""<def_stmt>read self *args<block_start><raise>IOError("reading from stdin while output is captured")<block_end>readline=read<line_sep>readlines=read<line_sep>__iter__=read<def_stmt>fileno self<block_start><raise>ValueError("redirected Stdin is pseudofile, has no fileno()")<block_end><def_stmt>isatty self<block_start><return><false><block_end><def_stmt>close self<block_start><pass><block_end><block_end><try_stmt><block_start>devnullpath=os.devnull<block_end><except_stmt>AttributeError<block_start><if_stmt>os.name<eq>'nt'<block_start>devnullpath='NUL'<block_end><else_stmt><block_start>devnullpath='/dev/null'<block_end><block_end>
|
# Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
<import_stmt>subprocess<import_from_stmt>polytracker version<as>version_string<line_sep># -- Project information -----------------------------------------------------
project="PolyTracker"<line_sep>copyright="2019–2021, Trail of Bits"<line_sep>author="<NAME> and <NAME>"<line_sep># The full version, including alpha/beta/rc tags
release=version_string()<line_sep>version=release<line_sep>github_url=f"https://github.com/trailofbits/polytracker"<line_sep># Has this version been released yet?
<if_stmt>subprocess.call(["git" "rev-list"<concat>f"v{version}"] stdout=subprocess.DEVNULL stderr=subprocess.DEVNULL)<eq>0# There is a tag associated with this release
<block_start>github_url=f"{github_url}releases/tag/v{version}"<block_end># -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions=['sphinx.ext.autodoc' 'sphinx.ext.napoleon' 'sphinx.ext.intersphinx' 'sphinx.ext.todo' 'sphinx.ext.autosectionlabel' 'sphinx_rtd_theme' #'sphinxcontrib.fulltoc'
]<line_sep># Add any paths that contain templates here, relative to this directory.
templates_path=['_templates']<line_sep># List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns=['_build' 'Thumbs.db' '.DS_Store']<line_sep># -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
#html_theme = 'classic'
html_theme='sphinx_rtd_theme'<line_sep>html_theme_options={'canonical_url':f'https://trailofbits.github.io/polytracker/latest/' 'logo_only':<false> 'display_version':<false> # This manually configured in our custom templates
'prev_next_buttons_location':'bottom' 'style_external_links':<true> #'vcs_pageview_mode': '',
#'style_nav_header_background': 'white',
# Toc options
'collapse_navigation':<true> 'sticky_navigation':<true> 'navigation_depth':4 'includehidden':<true> 'titles_only':<false>}<line_sep>html_context={'github_url':github_url}<line_sep># Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path=['_static']<line_sep>#html_js_files = [
# 'localtoc.js',
#]
<def_stmt>skip app what name obj would_skip options<block_start><if_stmt>name<eq>"__init__"<block_start><return><false><block_end><return>would_skip<block_end><def_stmt>docstring_callback app what name obj options lines:list<block_start><if_stmt>what<eq>'class'<or>what<eq>'function'<block_start><if_stmt>lines<and>lines[0].strip()<block_start>lines.insert(1 '')<line_sep>lines.insert(2 name)<line_sep>lines.insert(3 '*'<times>len(name))<if_stmt>len(lines)<eq>4<block_start>lines.append('')<block_end><block_end><block_end><block_end><def_stmt>setup app<block_start>app.connect("autodoc-skip-member" skip)<line_sep>#app.connect('autodoc-process-docstring', docstring_callback)
<block_end>add_package_names=<false><line_sep># prefix each section label with the name of the document it is in, followed by a colon
autosectionlabel_prefix_document=<true><line_sep>intersphinx_mapping={'python':('https://docs.python.org/3' <none>)}<line_sep>napoleon_include_private_with_doc=<true><line_sep>napoleon_include_special_with_doc=<true><line_sep>todo_include_todos=<true><line_sep>#autodoc_default_options = {
# 'inherited-members': True
#}
|
<import_from_stmt>seedwork.application.modules BusinessModule<import_from_stmt>.domain.repositories ListingRepository<import_from_stmt>modules.catalog.application.query.get_all_listings GetAllListings get_all_listings <import_from_stmt>modules.catalog.application.query.get_listings_of_seller GetListingsOfSeller get_listings_of_seller <import_from_stmt>modules.catalog.application.query.get_listing_details GetListingDetails get_listing_details <import_from_stmt>modules.catalog.application.command.create_listing_draft CreateListingDraftCommand create_listing_draft <class_stmt>CatalogModule(BusinessModule)<block_start>query_handlers={GetAllListings:<lambda>self q:get_all_listings(q self.listing_repository) GetAllListings:<lambda>self q:get_all_listings(q self.listing_repository) GetListingDetails:<lambda>self q:get_listing_details(q self.listing_repository) GetListingsOfSeller:<lambda>self q:get_listings_of_seller(q self.listing_repository) }<line_sep>command_handlers={CreateListingDraftCommand:<lambda>self c:create_listing_draft(c self.listing_repository) }<def_stmt>__init__ self listing_repository:ListingRepository <arrow><none><block_start>self.listing_repository=listing_repository<block_end>@staticmethod<def_stmt>create container<block_start>"""Factory method for creating a module by using dependencies from a DI container"""<line_sep><return>CatalogModule(logger=container.logger() listing_repository=container.listing_repository() )<block_end><block_end>
|
<import_stmt>json<import_from_stmt>os.path join<import_from_stmt>typing List Type Optional<import_from_stmt>fedot.core.data.data InputData<import_from_stmt>fedot.core.log Log default_log<import_from_stmt>fedot.core.pipelines.pipeline Pipeline<import_from_stmt>fedot.core.utils default_fedot_data_dir<import_from_stmt>fedot.sensitivity.operations_hp_sensitivity.multi_operations_sensitivity MultiOperationsHPAnalyze<import_from_stmt>fedot.sensitivity.sa_requirements SensitivityAnalysisRequirements<class_stmt>PipelineAnalysis<block_start>"""
This class is for analyzing the Pipeline as the black-box model,
using analysis approaches defined for whole pipeline perturbation,
i.e. MultiOperationsHPAnalyze.
:param pipeline: pipeline object to analyze
:param train_data: data used for Pipeline training
:param test_data: data used for Pipeline validation
:param approaches: methods applied to pipeline \
Default: [MultiOperationsHPAnalyze]
:param requirements: extra requirements to define specific details for different approaches.\
See SensitivityAnalysisRequirements class documentation.
:param path_to_save: path to save results to. Default: ~home/Fedot/sensitivity/pipeline_sa
Default: False
:param log: log: Log object to record messages
"""<def_stmt>__init__ self pipeline:Pipeline train_data:InputData test_data:InputData approaches:Optional[List[Type[MultiOperationsHPAnalyze]]]=<none> requirements:SensitivityAnalysisRequirements=<none> path_to_save=<none> log:Log=<none><block_start>self.pipeline=pipeline<line_sep>self.train_data=train_data<line_sep>self.test_data=test_data<line_sep>self.requirements=SensitivityAnalysisRequirements()<if>requirements<is><none><else>requirements<line_sep>self.approaches=[MultiOperationsHPAnalyze]<if>approaches<is><none><else>approaches<line_sep>self.path_to_save=join(default_fedot_data_dir() 'sensitivity' 'pipeline_sa')<if>path_to_save<is><none><else>path_to_save<line_sep>self.log=default_log(__name__)<if>log<is><none><else>log<block_end><def_stmt>analyze self<arrow>dict<block_start>"""
Apply defined approaches for the black-box pipeline analysis
:return:
"""<line_sep>all_approaches_results=dict()<for_stmt>approach self.approaches<block_start>analyze_result=approach(pipeline=self.pipeline train_data=self.train_data test_data=self.test_data requirements=self.requirements).analyze()<line_sep>all_approaches_results[f'{approach.__name__}']=analyze_result<block_end><if_stmt>self.requirements.is_save<block_start>self._save_results_to_json(all_approaches_results)<block_end><return>all_approaches_results<block_end><def_stmt>_save_results_to_json self result:dict<block_start>result_file=join(self.path_to_save 'pipeline_SA_results.json')<with_stmt>open(result_file 'w' encoding='utf-8')<as>file<block_start>file.write(json.dumps(result indent=4))<block_end><block_end><block_end>
|
# Particles for pygame
# by KidsCanCode 2015
# For educational purposes only
<import_stmt>pygame<import_stmt>random<line_sep># TODO: particle rotations
# TODO: test with varied particle images
# TODO: more particle paths
<def_stmt>interpolate v1 v2 range<block_start><return>pygame.math.Vector2(v1.x+(v2.x-v1.x)<times>range v1.y+(v2.y-v1.y)<times>range)<block_end><class_stmt>Particle(pygame.sprite.Sprite)<block_start><def_stmt>__init__ self game image pos vel life lifetime fade_start dorotate<block_start>pygame.sprite.Sprite.__init__(self)<line_sep>self.game=game<line_sep>self.pos=pos<line_sep>self.vel=vel<line_sep>self.rot_cache={}<line_sep>self.base_image=image<line_sep>self.dorotate=dorotate<if_stmt>dorotate<block_start>self.image=pygame.transform.rotate(self.base_image -self.rot)<block_end><else_stmt><block_start>self.image=self.base_image.copy()<block_end>self.rect=self.image.get_rect()<line_sep>self.lifetime=lifetime<line_sep>self.life=life<line_sep>self.fade_start=fade_start<line_sep>self.duration=lifetime-fade_start<line_sep>self.update()<block_end><def_stmt>update self# if self.dorotate:
# old_center = self.rect.center
# if self.rot in self.rot_cache:
# self.image = self.rot_cache[self.rot]
# else:
# self.image = pygame.transform.rotate(self.base_image, -self.rot)
# self.rot_cache[self.rot] = self.image
# self.rect = self.image.get_rect()
# self.rect.center = old_center
<block_start>self.life<augadd>self.game.dt<line_sep>self.fade()<line_sep>self.pos<augadd>self.vel<line_sep>self.rect.centerx=self.pos.x# + self.game.OFFSET.x
self.rect.centery=self.pos.y<block_end># + self.game.OFFSET.y
<def_stmt>blit self<block_start><return>self.game.screen.blit(self.image self.rect special_flags=pygame.BLEND_ADD)<block_end><def_stmt>fade self<block_start><if_stmt>self.life<g>self.fade_start<block_start><try_stmt><block_start>ratio=(self.life-self.fade_start)/self.duration<block_end><except_stmt>ZeroDivisionError<block_start>ratio=1.0<block_end><if_stmt>ratio<g>1.0<block_start>ratio=1.0<block_end>mask=int(255<times>(1-ratio))<line_sep>self.image.fill([mask mask mask] special_flags=pygame.BLEND_MIN)<block_end><block_end><def_stmt>is_dead self<block_start><if_stmt>self.life<g>self.lifetime<block_start><return><true><block_end><return><false><block_end><block_end><class_stmt>ParticleEmitter<block_start><def_stmt>__init__ self game parent offset vel image count lifetime fade_start size angle_range dorotate=<false><block_start>self.game=game<line_sep>self.parent=parent<line_sep>self.offset=offset<line_sep>self.particle_vel=vel<line_sep>self.pos=self.parent.pos+self.game.OFFSET+self.offset.rotate(self.parent.rot)<line_sep>self.base_image=image<line_sep>self.size=size<line_sep>self.angle_range=angle_range<line_sep>self.image=pygame.transform.scale(self.base_image (self.size self.size))<line_sep>self.count=count<line_sep>self.lifetime=lifetime<line_sep>self.fade_start=fade_start<line_sep>self.particles=[]<line_sep>self.timer=0<line_sep>self.prevcurve=[self.pos<for>x range(3)]<line_sep>self.active=<true><block_end><def_stmt>print_state self<block_start>print("c:{}, p:{}".format(self.count len(self.particles)))<block_end><def_stmt>update self<block_start>self.pos=self.parent.pos+self.game.OFFSET+self.offset.rotate(-self.parent.rot)<line_sep>self.rand_angle=random.randint(-self.angle_range self.angle_range)<line_sep># update all particles
<for_stmt>part self.particles<block_start>part.update()<if_stmt>part.is_dead()<block_start>self.particles.remove(part)<line_sep># print("p.kill")
<block_end><block_end># create a new particle
<if_stmt>self.count<ne>0<and>self.active<block_start>self.timer<augadd>self.game.dt<line_sep>newparticles=self.count<times>self.timer<if_stmt>newparticles<g>1<block_start><for_stmt>i range(int(newparticles))<block_start>t=i/newparticles<line_sep>time_elapsed=(1.0-t)<times>self.game.dt<line_sep>vel=self.particle_vel.rotate(-self.parent.rot+self.rand_angle)<line_sep>pos=interpolate(self.prevcurve[0] self.pos t)<line_sep>pos<augadd>(self.parent.vel+vel)<times>time_elapsed<line_sep># pos += vel * time_elapsed
init_life=time_elapsed<line_sep>self.timer=0<line_sep># print("new part: pos: {} vel: {}".format(pos, vel))
self.particles.append(Particle(self.game self.image pos vel init_life self.lifetime self.fade_start <false>))<block_end><block_end><block_end>self.prevcurve[2]=self.prevcurve[1]<line_sep>self.prevcurve[1]=self.prevcurve[0]<line_sep>self.prevcurve[0]=self.pos<block_end><def_stmt>draw self<block_start>rects=[]<for_stmt>part self.particles<block_start>rects.append(part.blit())<block_end><return>rects<block_end><def_stmt>kill_all self<block_start>self.count=0<line_sep>self.active=<false><line_sep>self.particles=[]<block_end><block_end>
|
"""
Парсинг датасетов из соревнования Rucoref-2015 (раскрытие анафоры и пр.)
Описание исходного датасета и задачи http://www.dialog-21.ru/evaluation/2014/anaphora/
"""<import_stmt>io<import_stmt>os<import_stmt>pandas<as>pd<import_stmt>csv<line_sep>rucoref_folder='../../../data/rucoref_2015/rucoref_29.10.2015'<line_sep>output_file='../../../tmp/ruanaphora_corpus.dat'<line_sep>df_tokens=pd.read_csv(os.path.join(rucoref_folder 'Tokens.txt') encoding='utf-8' delimiter='\t' quoting=3)<line_sep>df_groups=pd.read_csv(os.path.join(rucoref_folder 'Groups.txt') encoding='utf-8' delimiter='\t' quoting=3)<line_sep>groupid2content=dict(zip(df_groups['group_id'].values df_groups['content']))<line_sep>groupid2link=dict(zip(df_groups['group_id'].values df_groups['link']))<line_sep>token2refcontent=dict()<for_stmt>i,r df_groups.iterrows()<block_start>doc_id=r['doc_id']<line_sep>shift=r['shift']<line_sep>link=r['link']<line_sep>attr=r['attributes']<if_stmt>attr<in>['ref:def|str:pron|type:anaph' 'ref:def|str:pron|type:coref']<block_start>token_id=(doc_id shift)<if_stmt>link<ne>0<block_start>new_groupid=link<line_sep>njump=0<while_stmt>njump<l>5<block_start>link2=groupid2link[new_groupid]<if_stmt>link2<ne>0<block_start>new_groupid=groupid2link[new_groupid]<line_sep>njump<augadd>1<block_end><else_stmt><block_start><break><block_end><block_end>token2refcontent[token_id]=groupid2content[new_groupid]<block_end><block_end><block_end>df_res=pd.DataFrame(columns='doc_id shift token lemma gram refcontent'.split() index=<none>)<line_sep>n_discovered=0<for_stmt>i,r df_tokens.iterrows()<block_start>doc_id=r['doc_id']<line_sep>shift=r['shift']<line_sep>token_id=(doc_id shift)<line_sep>token=r['token']<line_sep>lemma=r['lemma']<line_sep>gram=r['gram']<line_sep>refcontent=token2refcontent.get(token_id '')<line_sep>n_discovered<augadd>refcontent<ne>''<line_sep>df_res=df_res.append({'doc_id':doc_id 'shift':shift 'token':token 'lemma':lemma 'gram':gram 'refcontent':refcontent} ignore_index=<true>)<block_end>df_res.to_csv(output_file quoting=csv.QUOTE_MINIMAL index=<false> sep='\t')<line_sep>print(u'раскрыто анафор={}'.format(n_discovered))<line_sep>
|
"""(De)serialisation of entries.
When adding entries, these are saved via the JSON API - using the functionality
of this module to obtain the appropriate data structures from
`beancount.core.data`. Similarly, for the full entry completion, a JSON
representation of the entry is provided.
This is not intended to work well enough for full roundtrips yet.
"""<import_stmt>datetime<import_stmt>functools<import_stmt>re<import_from_stmt>typing Any<import_from_stmt>typing FrozenSet<import_from_stmt>typing Tuple<import_from_stmt>beancount.core.amount Amount<import_from_stmt>beancount.core.data Balance<import_from_stmt>beancount.core.data Directive<import_from_stmt>beancount.core.data EMPTY_SET<import_from_stmt>beancount.core.data Note<import_from_stmt>beancount.core.data Posting<import_from_stmt>beancount.core.data Transaction<import_from_stmt>beancount.core.number D<import_from_stmt>beancount.core.position to_string<as>position_to_string<import_from_stmt>beancount.parser.parser parse_string<import_from_stmt>fava.helpers FavaAPIException<import_from_stmt>fava.util.date parse_date<def_stmt>extract_tags_links string:str <arrow>Tuple[str FrozenSet[str] FrozenSet[str]]<block_start>"""Extract tags and links from a narration string.
Args:
string: A string, possibly containing tags (`#tag`) and links
(`^link`).
Returns:
A triple (new_string, tags, links) where `new_string` is `string`
stripped of tags and links.
"""<if_stmt>string<is><none><block_start><return><none> EMPTY_SET EMPTY_SET<block_end>tags=re.findall(r"(?:^|\s)#([A-Za-z0-9\-_/.]+)" string)<line_sep>links=re.findall(r"(?:^|\s)\^([A-Za-z0-9\-_/.]+)" string)<line_sep>new_string=re.sub(r"(?:^|\s)[#^]([A-Za-z0-9\-_/.]+)" "" string).strip()<line_sep><return>new_string frozenset(tags) frozenset(links)<block_end>@functools.singledispatch<def_stmt>serialise entry:Directive<arrow>Any<block_start>"""Serialise an entry."""<if_stmt><not>entry<block_start><return><none><block_end>ret=entry._asdict()<line_sep>ret["type"]=entry.__class__.__name__<if_stmt>isinstance(entry Transaction)<block_start>ret["payee"]=entry.payee<or>""<if_stmt>entry.tags<block_start>ret["narration"]<augadd>" "+" ".join(["#"+t<for>t entry.tags])<block_end><if_stmt>entry.links<block_start>ret["narration"]<augadd>" "+" ".join(["^"+link<for>link entry.links])<block_end><del_stmt>ret["links"]<del_stmt>ret["tags"]<line_sep>ret["postings"]=[serialise(pos)<for>pos entry.postings]<block_end><elif_stmt>ret["type"]<eq>"Balance"<block_start>amt=ret["amount"]<line_sep>ret["amount"]={"number":str(amt.number) "currency":amt.currency}<block_end><return>ret<block_end>@serialise.register(Posting)<def_stmt>_serialise_posting posting:Posting<arrow>Any<block_start>"""Serialise a posting."""<if_stmt>isinstance(posting.units Amount)<block_start>position_str=position_to_string(posting)<block_end><else_stmt><block_start>position_str=""<block_end><if_stmt>posting.price<is><not><none><block_start>position_str<augadd>f" @ {posting.price.to_string()}"<block_end><return>{"account":posting.account "amount":position_str}<block_end><def_stmt>deserialise_posting posting:Any<arrow>Posting<block_start>"""Parse JSON to a Beancount Posting."""<line_sep>amount=posting.get("amount" "")<line_sep>entries,errors,_=parse_string(f'2000-01-01 * "" ""\n Assets:Account {amount}')<if_stmt>errors<block_start><raise>FavaAPIException(f"Invalid amount: {amount}")<block_end>txn=entries[0]<assert_stmt>isinstance(txn Transaction)<line_sep>pos=txn.postings[0]<line_sep><return>pos._replace(account=posting["account"] meta=<none>)<block_end><def_stmt>deserialise json_entry:Any<arrow>Directive<block_start>"""Parse JSON to a Beancount entry.
Args:
json_entry: The entry.
Raises:
KeyError: if one of the required entry fields is missing.
FavaAPIException: if the type of the given entry is not supported.
"""<line_sep>date=parse_date(json_entry.get("date" ""))[0]<if_stmt><not>isinstance(date datetime.date)<block_start><raise>FavaAPIException("Invalid entry date.")<block_end><if_stmt>json_entry["type"]<eq>"Transaction"<block_start>narration,tags,links=extract_tags_links(json_entry["narration"])<line_sep>postings=[deserialise_posting(pos)<for>pos json_entry["postings"]]<line_sep><return>Transaction(json_entry["meta"] date json_entry.get("flag" "") json_entry.get("payee" "") narration tags links postings )<block_end><if_stmt>json_entry["type"]<eq>"Balance"<block_start>raw_amount=json_entry["amount"]<line_sep>amount=Amount(D(str(raw_amount["number"])) raw_amount["currency"])<line_sep><return>Balance(json_entry["meta"] date json_entry["account"] amount <none> <none>)<block_end><if_stmt>json_entry["type"]<eq>"Note"<block_start>comment=json_entry["comment"].replace('"' "")<line_sep><return>Note(json_entry["meta"] date json_entry["account"] comment)<block_end><raise>FavaAPIException("Unsupported entry type.")<block_end>
|
# Configuration file for ipython.
#------------------------------------------------------------------------------
# InteractiveShellApp(Configurable) configuration
#------------------------------------------------------------------------------
## lines of code to run at IPython startup.
c.InteractiveShellApp.exec_lines=['%load_ext autoreload' '%autoreload 2']<line_sep>## A list of dotted module names of IPython extensions to load.
c.InteractiveShellApp.extensions=['autoreload']<line_sep>
|
#! /usr/bin/env python
# -*- coding:utf-8 -*-
<import_stmt>re<import_from_stmt>django.utils.deprecation MiddlewareMixin<import_from_stmt>django.conf settings<import_from_stmt>django.shortcuts HttpResponse<class_stmt>RbacMiddleware(MiddlewareMixin)<block_start><def_stmt>process_request self request<block_start>current_url=request.path_info<line_sep># 白名单,不需要做权限验证的url
<for_stmt>valid settings.VALID_URL<block_start><if_stmt>re.match(valid current_url)<block_start><return><none><block_end><block_end># 中间件返回空,中间件不拦截,执行视图函数
# 获取权限
permission_dict=request.session.get(settings.INIT_PERMISSION)<if_stmt><not>permission_dict<block_start><return>HttpResponse('未获取到用户数据,请登录!')<block_end># 路径导航
url_navigation=[{'title':'首页' 'url':'/index/'}]<line_sep># 此处代码进行判断:/logout /index
<for_stmt>url settings.NO_PERMISSION_LIST<block_start><if_stmt>re.match(url request.path_info)# 需要登录,但无需权限检验
<block_start>request.current_menu_selected=0<line_sep>request.url_navigation=url_navigation<line_sep><return><none><block_end><block_end>flag=<false><for_stmt>item permission_dict.values()<block_start>reg='^%s$'%item['url']<if_stmt>re.match(reg current_url)# 获取当前选中的菜单id ,先检测pid再检测id
# 非菜单选项挂靠:如果是pid那么则是非菜单权限,通过此pid找到父级的id,如果是id则是二级菜单权限
# 注意,此处的item['pid'] or item['id'],可能会有先后顺序
<block_start>request.current_menu_selected=item['pid']<or>item['id']<line_sep>flag=<true><line_sep># 构建导航
<if_stmt>item['pid']<block_start>url_navigation.extend([{'title':item['p_title'] 'url':item['p_url']} {'title':item['title'] 'url':item['url'] 'class':'active'}])<block_end><else_stmt><block_start>url_navigation.extend([{'title':item['title'] 'url':item['url'] 'class':'active'} ])<block_end>request.url_navigation=url_navigation<line_sep><break><block_end><block_end><if_stmt><not>flag<block_start><return>HttpResponse('无权访问')<block_end><block_end><block_end>
|
<import_from_stmt>colossalai.utils free_port get_current_device<import_from_stmt>colossalai.utils.model.colo_init_context ColoInitContext<import_from_stmt>colossalai.testing rerun_if_address_is_in_use<import_from_stmt>colossalai.tensor ComputePattern ParallelAction<import_from_stmt>functools partial<import_from_stmt>colossalai.core global_context<as>gpc<import_from_stmt>colossalai.context ParallelMode<import_from_stmt>colossalai.nn.parallel.layers init_colo_module<import_from_stmt>colossalai.nn.parallel.data_parallel ColoDDP<import_from_stmt>colossalai.nn.optimizer ColoOptimizer<import_stmt>colossalai<import_stmt>torch<import_stmt>torch.multiprocessing<as>mp<import_stmt>pytest<class_stmt>Net(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(Net self).__init__()<line_sep>self.embed=torch.nn.Embedding(20 4)<line_sep>self.proj=torch.nn.Linear(4 8)<block_end><def_stmt>forward self x# move input to cpu and restore output
<block_start>current_dev=x.device<line_sep>x=x.to('cpu')<line_sep>x=self.embed(x)<line_sep>x=x.to(current_dev)<line_sep>x=self.proj(x)<line_sep><return>x<block_end><block_end><def_stmt>run_hybrid_device use_ddp<block_start><with_stmt>ColoInitContext(device=get_current_device())<block_start>model=Net()<block_end>real_model=model<if_stmt>use_ddp<block_start>model=ColoDDP(model)<line_sep>real_model=model.module<block_end>print(f'embedding weight size: {real_model.embed.weight.size()} | device: {real_model.embed.weight.device}')<line_sep>#print(f'linear weight size: {real_model.proj.weight.size()} | device: {real_model.proj.weight.device}')
parallel_action=ParallelAction(ComputePattern.TP1D)<line_sep>init_colo_module(model parallel_action recursive=<true> mode='col')<line_sep># use cpu gloo to handle embedding
real_model.embed.to('cpu')<line_sep>gloo_group_tp=gpc.get_cpu_group(ParallelMode.PARALLEL_1D)<line_sep>real_model.embed.weight.spec.dist_spec.process_group=gloo_group_tp<line_sep>print(f'embedding weight size: {real_model.embed.weight.size()} | new device: {real_model.embed.weight.device}')<line_sep>#print(f'linear weight size: {real_model.proj.weight.size()} | new device: {real_model.proj.weight.device}')
optimizer=ColoOptimizer(dict(model.named_parameters()) torch.optim.SGD lr=0.1)<line_sep>data=torch.randint(low=0 high=20 size=(16 ) device=get_current_device())<line_sep>out=model(data)<line_sep>out.sum().backward()<line_sep>optimizer.step()<block_end><def_stmt>run_dist rank world_size port use_ddp<block_start><if_stmt>use_ddp<and>world_size<eq>1<block_start><return><block_end>tp_world_size=world_size<floordiv>2<if>use_ddp<else>world_size<line_sep>config=dict(parallel=dict(tensor=dict(mode="1d" size=tp_world_size) ))<line_sep>colossalai.launch(config=config rank=rank world_size=world_size host='localhost' port=port backend='nccl')<line_sep>run_hybrid_device(use_ddp)<block_end>@[email protected]('world_size' [1 4])@pytest.mark.parametrize('use_ddp' [<false> <true>])@rerun_if_address_is_in_use()# Working for simulate the embedding(CPU DP+TP) -> nn(GPU DP+TP)
<def_stmt>_test_hybrid_device world_size use_ddp<block_start>run_func=partial(run_dist world_size=world_size port=free_port() use_ddp=use_ddp)<line_sep>mp.spawn(run_func nprocs=world_size)<block_end><if_stmt>__name__<eq>'__main__'<block_start>_test_hybrid_device(4 <true>)<block_end>
|
<import_stmt>chainer<import_from_stmt>chainer training<import_from_stmt>chainer.training extensions ParallelUpdater<import_from_stmt>chainer.training.triggers ManualScheduleTrigger<import_from_stmt>chainer.datasets TransformDataset<import_from_stmt>chainercv.datasets VOCBboxDataset voc_bbox_label_names<import_from_stmt>chainercv transforms<import_from_stmt>chainercv.transforms.image.resize resize<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>time<line_sep>#from mask_rcnn_vgg import MaskRCNNVGG16
<import_from_stmt>mask_rcnn_resnet MaskRCNNResNet<import_from_stmt>coco_dataset COCODataset<import_from_stmt>mask_rcnn_train_chain MaskRCNNTrainChain<import_from_stmt>utils.bn_utils freeze_bn bn_to_affine<import_from_stmt>utils.cocoapi_evaluator COCOAPIEvaluator<import_from_stmt>utils.detection_coco_evaluator DetectionCOCOEvaluator<import_stmt>logging<import_stmt>traceback<import_from_stmt>utils.updater SubDivisionUpdater<import_stmt>cv2<def_stmt>resize_bbox bbox in_size out_size<block_start>bbox_o=bbox.copy()<line_sep>y_scale=float(out_size[0])/in_size[0]<line_sep>x_scale=float(out_size[1])/in_size[1]<line_sep>bbox_o[: 0]=y_scale<times>bbox[: 1]<line_sep>bbox_o[: 2]=y_scale<times>(bbox[: 1]+bbox[: 3])<line_sep>bbox_o[: 1]=x_scale<times>bbox[: 0]<line_sep>bbox_o[: 3]=x_scale<times>(bbox[: 0]+bbox[: 2])<line_sep><return>bbox_o<block_end><def_stmt>parse <block_start>parser=argparse.ArgumentParser(description='Mask RCNN trainer')<line_sep>parser.add_argument('--dataset' choices=('coco2017') default='coco2017')<line_sep>parser.add_argument('--extractor' choices=('resnet50' 'resnet101') default='resnet50' help='extractor network')<line_sep>parser.add_argument('--gpu' '-g' type=int default=0)<line_sep>parser.add_argument('--lr' '-l' type=float default=1e-4)<line_sep>parser.add_argument('--batchsize' '-b' type=int default=8)<line_sep>parser.add_argument('--freeze_bn' action='store_true' default=<false> help='freeze batchnorm gamma/beta')<line_sep>parser.add_argument('--bn2affine' action='store_true' default=<false> help='batchnorm to affine')<line_sep>parser.add_argument('--out' '-o' default='result' help='Output directory')<line_sep>parser.add_argument('--seed' '-s' type=int default=0)<line_sep>parser.add_argument('--roialign' action='store_false' default=<true> help='default: True')<line_sep>parser.add_argument('--lr_step' '-ls' type=int default=120000)<line_sep>parser.add_argument('--lr_initialchange' '-li' type=int default=400)<line_sep>parser.add_argument('--pretrained' '-p' type=str default='imagenet')<line_sep>parser.add_argument('--snapshot' type=int default=4000)<line_sep>parser.add_argument('--validation' type=int default=30000)<line_sep>parser.add_argument('--resume' type=str)<line_sep>parser.add_argument('--iteration' '-i' type=int default=180000)<line_sep>parser.add_argument('--roi_size' '-r' type=int default=14 help='ROI size for mask head input')<line_sep>parser.add_argument('--gamma' type=float default=1 help='mask loss weight')<line_sep><return>parser.parse_args()<block_end><class_stmt>Transform(object)<block_start><def_stmt>__init__ self net labelids<block_start>self.net=net<line_sep>self.labelids=labelids<block_end><def_stmt>__call__ self in_data<block_start><if_stmt>len(in_data)<eq>5<block_start>img,label,bbox,mask,i=in_data<block_end><elif_stmt>len(in_data)<eq>4<block_start>img,bbox,label,i=in_data<block_end>label=[self.labelids.index(l)<for>l label]<line_sep>_,H,W=img.shape<if_stmt>chainer.config.train<block_start>img=self.net.prepare(img)<block_end>_,o_H,o_W=img.shape<line_sep>scale=o_H/H<if_stmt>len(bbox)<eq>0<block_start><return>img [] [] 1<block_end>bbox=resize_bbox(bbox (H W) (o_H o_W))<line_sep>mask=resize(mask (o_H o_W))<if_stmt>chainer.config.train#horizontal flip
<block_start>img,params=transforms.random_flip(img x_random=<true> return_param=<true>)<line_sep>bbox=transforms.flip_bbox(bbox (o_H o_W) x_flip=params['x_flip'])<line_sep>mask=transforms.flip(mask x_flip=params['x_flip'])<block_end><return>img bbox label scale mask i<block_end><block_end><def_stmt>convert batch device<block_start><return>chainer.dataset.convert.concat_examples(batch device padding=-1)<block_end><def_stmt>main <block_start>args=parse()<line_sep>np.random.seed(args.seed)<line_sep>print('arguments: ' args)<line_sep># Model setup
<if_stmt>args.dataset<eq>'coco2017'<block_start>train_data=COCODataset()<block_end>test_data=COCODataset(json_file='instances_val2017.json' name='val2017' id_list_file='val2017.txt')<line_sep>train_class_ids=train_data.class_ids<line_sep>test_ids=test_data.ids<line_sep>cocoanns=test_data.coco<if_stmt>args.extractor<eq>'vgg16'<block_start>mask_rcnn=MaskRCNNVGG16(n_fg_class=80 pretrained_model=args.pretrained roi_size=args.roi_size roi_align=args.roialign)<block_end><elif_stmt>args.extractor<eq>'resnet50'<block_start>mask_rcnn=MaskRCNNResNet(n_fg_class=80 pretrained_model=args.pretrained roi_size=args.roi_size n_layers=50 roi_align=args.roialign class_ids=train_class_ids)<block_end><elif_stmt>args.extractor<eq>'resnet101'<block_start>mask_rcnn=MaskRCNNResNet(n_fg_class=80 pretrained_model=args.pretrained roi_size=args.roi_size n_layers=101 roi_align=args.roialign class_ids=train_class_ids)<block_end>mask_rcnn.use_preset('evaluate')<line_sep>model=MaskRCNNTrainChain(mask_rcnn gamma=args.gamma roi_size=args.roi_size)<line_sep># Trainer setup
<if_stmt>args.gpu<ge>0<block_start>chainer.cuda.get_device_from_id(args.gpu).use()<line_sep>model.to_gpu()<block_end>optimizer=chainer.optimizers.MomentumSGD(lr=args.lr momentum=0.9)<line_sep>#optimizer = chainer.optimizers.Adam()#alpha=0.001, beta1=0.9, beta2=0.999 , eps=0.00000001)
optimizer.setup(model)<line_sep>optimizer.add_hook(chainer.optimizer.WeightDecay(rate=0.0001))<line_sep>train_data=TransformDataset(train_data Transform(mask_rcnn train_class_ids))<line_sep>test_data=TransformDataset(test_data Transform(mask_rcnn train_class_ids))<line_sep>train_iter=chainer.iterators.SerialIterator(train_data batch_size=args.batchsize)<line_sep>test_iter=chainer.iterators.SerialIterator(test_data batch_size=1 repeat=<false> shuffle=<false>)<line_sep>updater=SubDivisionUpdater(train_iter optimizer device=args.gpu subdivisions=args.batchsize)<line_sep>#updater = ParallelUpdater(train_iter, optimizer, devices={"main": 0, "second": 1}, converter=convert ) #for training with multiple GPUs
trainer=training.Trainer(updater (args.iteration 'iteration') out=args.out)<line_sep># Extensions
trainer.extend(extensions.snapshot_object(model.mask_rcnn 'snapshot_model.npz') trigger=(args.snapshot 'iteration'))<line_sep>trainer.extend(extensions.ExponentialShift('lr' 10) trigger=ManualScheduleTrigger([args.lr_initialchange] 'iteration'))<line_sep>trainer.extend(extensions.ExponentialShift('lr' 0.1) trigger=(args.lr_step 'iteration'))<if_stmt>args.resume<is><not><none><block_start>chainer.serializers.load_npz(args.resume model.mask_rcnn)<block_end><if_stmt>args.freeze_bn<block_start>freeze_bn(model.mask_rcnn)<block_end><if_stmt>args.bn2affine<block_start>bn_to_affine(model.mask_rcnn)<block_end>log_interval=40 'iteration'<line_sep>plot_interval=160 'iteration'<line_sep>print_interval=40 'iteration'<line_sep>#trainer.extend(extensions.Evaluator(test_iter, model, device=args.gpu), trigger=(args.validation, 'iteration'))
#trainer.extend(DetectionCOCOEvaluator(test_iter, model.mask_rcnn), trigger=(args.validation, 'iteration')) #COCO AP Evaluator with VOC metric
trainer.extend(COCOAPIEvaluator(test_iter model.mask_rcnn test_ids cocoanns) trigger=(args.validation 'iteration'))#COCO AP Evaluator
trainer.extend(chainer.training.extensions.observe_lr() trigger=log_interval)<line_sep>trainer.extend(extensions.LogReport(trigger=log_interval))<line_sep>trainer.extend(extensions.PrintReport(['iteration' 'epoch' 'elapsed_time' 'lr' 'main/loss' 'main/avg_loss' 'main/roi_loc_loss' 'main/roi_cls_loss' 'main/roi_mask_loss' 'main/rpn_loc_loss' 'main/rpn_cls_loss' 'validation/main/loss' 'validation/main/map' ]) trigger=print_interval)<line_sep>trainer.extend(extensions.ProgressBar(update_interval=1000))<line_sep>#trainer.extend(extensions.dump_graph('main/loss'))
<try_stmt><block_start>trainer.run()<block_end><except_stmt><block_start>traceback.print_exc()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
<import_stmt>logging<import_stmt>re<import_stmt>signal<import_stmt>threading<import_from_stmt>contextlib contextmanager<import_from_stmt>enum Enum<import_from_stmt>queue Empty Queue<import_from_stmt>typing List Optional Pattern Tuple Union Callable<import_from_stmt>..protos.clearly_pb2 PatternFilter TaskMessage WorkerMessage<import_from_stmt>..utils.data accept_task accept_worker<line_sep>logger=logging.getLogger(__name__)<line_sep>BASE_THREAD_NAME='clearly-dispatcher'<class_stmt>Role(Enum)<block_start>TASKS=(accept_task )<line_sep>WORKERS=(accept_worker )<def_stmt>__new__ cls func_accept<block_start>obj=object.__new__(cls)<line_sep>obj._value_=len(cls.__members__)+1<line_sep>obj.__func_accept=func_accept<line_sep><return>obj<block_end>@property<def_stmt>thread_name self<arrow>str<block_start><return>'{}-{}'.format(BASE_THREAD_NAME self.name.lower())<block_end>@property<def_stmt>func_accept self<arrow>Callable[[Pattern bool Union[TaskMessage WorkerMessage]] bool]<block_start><return>self.__func_accept<block_end><block_end><class_stmt>StreamingDispatcher<block_start>"""Dispatch events to connected clients.
Server object, gets cleaned tasks and workers and send them to interested parties.
One instance takes care of only one of those, two instances are needed.
Attributes:
queue_input: to receive from event listener
observers: currently connected clients, interested in real time worker events
role: current role this dispatcher is running
"""<def_stmt>__init__ self queue_input:Queue role:Role<block_start>"""Construct a client dispatcher instance.
Args:
queue_input: to receive from event listener
"""<line_sep>logger.info('Creating %s' StreamingDispatcher.__name__)<line_sep>self.queue_input,self.role=queue_input role<line_sep>self.observers:List[Tuple[Queue Pattern bool]]=[]<line_sep># running engine (should be asyncio in the future)
self.dispatcher_thread:Optional[threading.Thread]=<none><line_sep># detect shutdown.
<def_stmt>sigterm_handler _signo _stack_frame# pragma: no cover
<block_start>self.__stop()<block_end>signal.signal(signal.SIGTERM sigterm_handler)<line_sep>self.__start()<block_end>@contextmanager<def_stmt>streaming_capture self capture:PatternFilter queue:Queue<arrow><none><block_start>"""Put a connected client in streaming capture mode, filtering all
incoming events in real time.
Args:
capture: the criteria for desired events
queue: where to put the matching events
"""<line_sep>observer=queue re.compile(capture.pattern) capture.negate<line_sep># should not need any locks, thanks to GIL
self.observers.append(observer)<try_stmt><block_start><yield><block_end><finally_stmt><block_start>self.observers.remove(observer)<block_end><block_end><def_stmt>__start self<arrow><none># pragma: no cover
<block_start>"""Start the real time engine that captures tasks."""<assert_stmt><not>self.dispatcher_thread<line_sep>self.dispatcher_thread=threading.Thread(target=self.__run name=self.role.thread_name)<line_sep>self.dispatcher_thread.daemon=<true><line_sep>self.running=<true># graceful shutdown
self.dispatcher_thread.start()<block_end><def_stmt>__stop self<arrow><none># pragma: no cover
<block_start>"""Stop the background engine."""<if_stmt><not>self.dispatcher_thread<block_start><return><block_end>logger.info('Stopping %s' self.role.thread_name)<line_sep>self.running=<false># graceful shutdown
self.dispatcher_thread.join(1)<line_sep>self.dispatcher_thread=<none><block_end><def_stmt>__run self<arrow><none># pragma: no cover
<block_start>logger.info('Starting: %r' threading.current_thread())<while_stmt>self.running<block_start><try_stmt><block_start>message=self.queue_input.get(timeout=1)<block_end><except_stmt>Empty<block_start><continue><block_end>self._dispatch(message)<block_end>logger.info('Stopped: %r' threading.current_thread())<block_end><def_stmt>_dispatch self message:Union[TaskMessage WorkerMessage]<arrow><none># let's see who's interested.
<block_start><for_stmt>q,pattern,negate self.observers<block_start><if_stmt>self.role.func_accept(pattern negate message)<block_start>q.put(message)<block_end><block_end><block_end><block_end>
|
"""test_3_mac_receive_jpg.py -- receive & display jpg stream.
A simple test program that uses imagezmq to receive an image jpg stream from a
Raspberry Pi and display it as a video steam.
1. Run this program in its own terminal window on the mac:
python test_3_mac_receive_jpg.py
This "receive and display images" program must be running before starting the
RPi sending program.
2. Run the jpg sending program on the RPi:
python test_3_rpi_send_jpg.py
A cv2.imshow() window will appear on the Mac showing the tramsmitted images as
a video stream. You can repeat Step 2 and start the test_3_rpi_send_jpg.py on
multiple RPis and each one will cause a new cv2.imshow() window to open.
To end the programs, press Ctrl-C in the terminal window of the RPi first.
Then press Ctrl-C in the terminal window of the receiving proram. You may
have to press Ctrl-C in the display window as well.
"""<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>imagezmq<line_sep>image_hub=imagezmq.ImageHub()<while_stmt><true># show streamed images until Ctrl-C
<block_start>rpi_name,jpg_buffer=image_hub.recv_jpg()<line_sep>image=cv2.imdecode(np.frombuffer(jpg_buffer dtype='uint8') -1)<line_sep># see opencv docs for info on -1 parameter
cv2.imshow(rpi_name image)# 1 window for each RPi
cv2.waitKey(1)<line_sep>image_hub.send_reply(b'OK')<block_end>
|
"""
Development settings for linkding webapp
"""<line_sep># Start from development settings
# noinspection PyUnresolvedReferences
<import_from_stmt>.base *<line_sep># Turn on debug mode
DEBUG=<true><line_sep># Turn on SASS compilation
SASS_PROCESSOR_ENABLED=<true><line_sep># Enable debug toolbar
INSTALLED_APPS.append('debug_toolbar')<line_sep>MIDDLEWARE.append('debug_toolbar.middleware.DebugToolbarMiddleware')<line_sep>INTERNAL_IPS=['127.0.0.1' ]<line_sep># Enable debug logging
LOGGING={'version':1 'disable_existing_loggers':<false> 'formatters':{'simple':{'format':'{levelname} {message}' 'style':'{' } } 'handlers':{'console':{'class':'logging.StreamHandler' 'formatter':'simple'}} 'root':{'handlers':['console'] 'level':'WARNING' } 'loggers':{'django.db.backends':{'level':'ERROR' # Set to DEBUG to log all SQL calls
'handlers':['console'] } 'bookmarks.services.tasks':{# Log task output
'level':'DEBUG' 'handlers':['console'] 'propagate':<false> }}}<line_sep># Import custom settings
# noinspection PyUnresolvedReferences
<import_from_stmt>.custom *<line_sep>
|
<import_from_stmt>services.utils handle_exceptions<import_from_stmt>.utils find_records<class_stmt>FlowApi(object)<block_start><def_stmt>__init__ self app db<block_start>self.db=db<line_sep>app.router.add_route("GET" "/flows" self.get_all_flows)<line_sep>app.router.add_route("GET" "/flows/{flow_id}" self.get_flow)<line_sep>self._async_table=self.db.flow_table_postgres<block_end>@handle_exceptions<async_keyword><def_stmt>get_flow self request<block_start>"""
---
description: Get one flow
tags:
- Flow
parameters:
- $ref: '#/definitions/Params/Path/flow_id'
produces:
- application/json
responses:
"200":
description: Returns one flow
schema:
$ref: '#/definitions/ResponsesFlow'
"405":
description: invalid HTTP Method
schema:
$ref: '#/definitions/ResponsesError405'
"""<line_sep>flow_name=request.match_info.get("flow_id")<line_sep><return><await>find_records(request self._async_table fetch_single=<true> initial_conditions=["flow_id = %s"] initial_values=[flow_name])<block_end>@handle_exceptions<async_keyword><def_stmt>get_all_flows self request<block_start>"""
---
description: Get all flows
tags:
- Flow
parameters:
- $ref: '#/definitions/Params/Builtin/_page'
- $ref: '#/definitions/Params/Builtin/_limit'
- $ref: '#/definitions/Params/Builtin/_order'
- $ref: '#/definitions/Params/Builtin/_tags'
- $ref: '#/definitions/Params/Builtin/_group'
- $ref: '#/definitions/Params/Custom/flow_id'
- $ref: '#/definitions/Params/Custom/user_name'
- $ref: '#/definitions/Params/Custom/ts_epoch'
produces:
- application/json
responses:
"200":
description: Returns all flows
schema:
$ref: '#/definitions/ResponsesFlowList'
"405":
description: invalid HTTP Method
schema:
$ref: '#/definitions/ResponsesError405'
"""<line_sep><return><await>find_records(request self._async_table initial_conditions=[] initial_values=[] allowed_order=self._async_table.keys allowed_group=self._async_table.keys allowed_filters=self._async_table.keys)<block_end><block_end>
|
# coding=utf-8
<import_stmt>os<import_stmt>unittest<import_from_stmt>conans.test.utils.test_files temp_folder<import_from_stmt>conans.util.files set_dirty clean_dirty set_dirty_context_manager _DIRTY_FOLDER<class_stmt>DirtyTest(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>""" Create temporary folder to save dirty state
"""<line_sep>self.temp_folder=temp_folder()<line_sep>self.dirty_folder=self.temp_folder+_DIRTY_FOLDER<block_end><def_stmt>test_set_dirty self<block_start>""" Dirty flag must be created by set_dirty
"""<line_sep>set_dirty(self.temp_folder)<line_sep>self.assertTrue(os.path.exists(self.dirty_folder))<block_end><def_stmt>test_clean_dirty self<block_start>""" Dirty flag must be cleaned by clean_dirty
"""<line_sep>set_dirty(self.temp_folder)<line_sep>self.assertTrue(os.path.exists(self.dirty_folder))<line_sep>clean_dirty(self.temp_folder)<line_sep>self.assertFalse(os.path.exists(self.dirty_folder))<block_end><def_stmt>test_set_dirty_context self<block_start>""" Dirty context must remove lock before exiting
"""<with_stmt>set_dirty_context_manager(self.temp_folder)<block_start>self.assertTrue(os.path.exists(self.dirty_folder))<block_end>self.assertFalse(os.path.exists(self.dirty_folder))<block_end><def_stmt>test_interrupted_dirty_context self<block_start>""" Broken context must preserve dirty state
Raise an exception in middle of context. By default,
dirty file is not removed.
"""<try_stmt><block_start><with_stmt>set_dirty_context_manager(self.temp_folder)<block_start>self.assertTrue(os.path.exists(self.dirty_folder))<line_sep><raise>RuntimeError()<block_end><block_end><except_stmt>RuntimeError<block_start><pass><block_end>self.assertTrue(os.path.exists(self.dirty_folder))<block_end><block_end>
|
"""App related signal handlers."""<import_stmt>redis<import_from_stmt>django.conf settings<import_from_stmt>django.db.models signals<import_from_stmt>django.dispatch receiver<import_from_stmt>modoboa.admin models<as>admin_models<import_from_stmt>. constants<def_stmt>set_message_limit instance key<block_start>"""Store message limit in Redis."""<line_sep>old_message_limit=instance._loaded_values.get("message_limit")<if_stmt>old_message_limit<eq>instance.message_limit<block_start><return><block_end>rclient=redis.Redis(host=settings.REDIS_HOST port=settings.REDIS_PORT db=settings.REDIS_QUOTA_DB)<if_stmt>instance.message_limit<is><none># delete existing key
<block_start><if_stmt>rclient.hexists(constants.REDIS_HASHNAME key)<block_start>rclient.hdel(constants.REDIS_HASHNAME key)<block_end><return><block_end><if_stmt>old_message_limit<is><not><none><block_start>diff=instance.message_limit-old_message_limit<block_end><else_stmt><block_start>diff=instance.message_limit<block_end>rclient.hincrby(constants.REDIS_HASHNAME key diff)<block_end>@receiver(signals.post_save sender=admin_models.Domain)<def_stmt>set_domain_message_limit sender instance created **kwargs<block_start>"""Store domain message limit in Redis."""<line_sep>set_message_limit(instance instance.name)<block_end>@receiver(signals.post_save sender=admin_models.Mailbox)<def_stmt>set_mailbox_message_limit sender instance created **kwargs<block_start>"""Store mailbox message limit in Redis."""<line_sep>set_message_limit(instance instance.full_address)<block_end>
|
# Copyright 2012 the V8 project authors. All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
<import_stmt>socket<import_stmt>SocketServer<import_stmt>threading<try_stmt><block_start><import_stmt>ujson<as>json<block_end><except_stmt><block_start><import_stmt>json<block_end><import_from_stmt>. constants<import_from_stmt>..objects peer<line_sep>STARTUP_REQUEST="V8 test peer starting up"<line_sep>STARTUP_RESPONSE="Let's rock some tests!"<line_sep>EXIT_REQUEST="V8 testing peer going down"<def_stmt>GetOwnIP <block_start>s=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>s.connect(("8.8.8.8" 80))<line_sep>ip=s.getsockname()[0]<line_sep>s.close()<line_sep><return>ip<block_end><class_stmt>PresenceHandler(SocketServer.BaseRequestHandler)<block_start><def_stmt>handle self<block_start>data=json.loads(self.request[0].strip())<if_stmt>data[0]<eq>STARTUP_REQUEST<block_start>jobs=data[1]<line_sep>relative_perf=data[2]<line_sep>pubkey_fingerprint=data[3]<line_sep>trusted=self.server.daemon.IsTrusted(pubkey_fingerprint)<line_sep>response=[STARTUP_RESPONSE self.server.daemon.jobs self.server.daemon.relative_perf self.server.daemon.pubkey_fingerprint trusted]<line_sep>response=json.dumps(response)<line_sep>self.server.SendTo(self.client_address[0] response)<line_sep>p=peer.Peer(self.client_address[0] jobs relative_perf pubkey_fingerprint)<line_sep>p.trusted=trusted<line_sep>self.server.daemon.AddPeer(p)<block_end><elif_stmt>data[0]<eq>STARTUP_RESPONSE<block_start>jobs=data[1]<line_sep>perf=data[2]<line_sep>pubkey_fingerprint=data[3]<line_sep>p=peer.Peer(self.client_address[0] jobs perf pubkey_fingerprint)<line_sep>p.trusted=self.server.daemon.IsTrusted(pubkey_fingerprint)<line_sep>p.trusting_me=data[4]<line_sep>self.server.daemon.AddPeer(p)<block_end><elif_stmt>data[0]<eq>EXIT_REQUEST<block_start>self.server.daemon.DeletePeer(self.client_address[0])<if_stmt>self.client_address[0]<eq>self.server.daemon.ip<block_start>self.server.shutdown_lock.release()<block_end><block_end><block_end><block_end><class_stmt>PresenceDaemon(SocketServer.ThreadingMixIn SocketServer.UDPServer)<block_start><def_stmt>__init__ self daemon<block_start>self.daemon=daemon<line_sep>address=(daemon.ip constants.PRESENCE_PORT)<line_sep>SocketServer.UDPServer.__init__(self address PresenceHandler)<line_sep>self.shutdown_lock=threading.Lock()<block_end><def_stmt>shutdown self<block_start>self.shutdown_lock.acquire()<line_sep>self.SendToAll(json.dumps([EXIT_REQUEST]))<line_sep>self.shutdown_lock.acquire()<line_sep>self.shutdown_lock.release()<line_sep>SocketServer.UDPServer.shutdown(self)<block_end><def_stmt>SendTo self target message<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>sock.sendto(message (target constants.PRESENCE_PORT))<line_sep>sock.close()<block_end><def_stmt>SendToAll self message<block_start>sock=socket.socket(socket.AF_INET socket.SOCK_DGRAM)<line_sep>ip=self.daemon.ip.split(".")<for_stmt>i range(1 254)<block_start>ip[-1]=str(i)<line_sep>sock.sendto(message (".".join(ip) constants.PRESENCE_PORT))<block_end>sock.close()<block_end><def_stmt>FindPeers self<block_start>request=[STARTUP_REQUEST self.daemon.jobs self.daemon.relative_perf self.daemon.pubkey_fingerprint]<line_sep>request=json.dumps(request)<line_sep>self.SendToAll(request)<block_end><block_end>
|
<import_from_stmt>decimal Decimal<as>D<import_from_stmt>http client<as>http_client<import_from_stmt>unittest mock<import_from_stmt>django.urls reverse<import_from_stmt>oscar.apps.shipping methods<import_from_stmt>oscar.core.loading get_class get_classes get_model<import_from_stmt>oscar.test factories<line_sep>Basket=get_model('basket' 'Basket')<line_sep>ConditionalOffer=get_model('offer' 'ConditionalOffer')<line_sep>Order=get_model('order' 'Order')<line_sep>FailedPreCondition=get_class('checkout.exceptions' 'FailedPreCondition')<line_sep>GatewayForm=get_class('checkout.forms' 'GatewayForm')<line_sep>UnableToPlaceOrder=get_class('order.exceptions' 'UnableToPlaceOrder')<line_sep>RedirectRequired,UnableToTakePayment,PaymentError=get_classes('payment.exceptions' ['RedirectRequired' 'UnableToTakePayment' 'PaymentError'])<line_sep>NoShippingRequired=get_class('shipping.methods' 'NoShippingRequired')<class_stmt>CheckoutMixin(object)<block_start><def_stmt>create_digital_product self<block_start>product_class=factories.ProductClassFactory(requires_shipping=<false> track_stock=<false>)<line_sep>product=factories.ProductFactory(product_class=product_class)<line_sep>factories.StockRecordFactory(num_in_stock=<none> price=D('12.00') product=product)<line_sep><return>product<block_end><def_stmt>add_product_to_basket self product=<none> **kwargs<block_start><if_stmt>product<is><none><block_start>product=factories.ProductFactory()<line_sep>factories.StockRecordFactory(num_in_stock=10 price=D('12.00') product=product)<block_end>detail_page=self.get(product.get_absolute_url() user=kwargs.get('logged_in_user' self.user))<line_sep>form=detail_page.forms['add_to_basket_form']<line_sep>form.submit()<block_end><def_stmt>add_voucher_to_basket self voucher=<none><block_start><if_stmt>voucher<is><none><block_start>voucher=factories.create_voucher()<block_end>basket_page=self.get(reverse('basket:summary'))<line_sep>form=basket_page.forms['voucher_form']<line_sep>form['code']=voucher.code<line_sep>form.submit()<block_end><def_stmt>enter_guest_details self email='<EMAIL>'<block_start>index_page=self.get(reverse('checkout:index'))<if_stmt>index_page.status_code<eq>200<block_start>index_page.form['username']=email<line_sep>index_page.form.select('options' GatewayForm.GUEST)<line_sep>index_page.form.submit()<block_end><block_end><def_stmt>create_shipping_country self<block_start><return>factories.CountryFactory(iso_3166_1_a2='GB' is_shipping_country=<true>)<block_end><def_stmt>enter_shipping_address self<block_start>self.create_shipping_country()<line_sep>address_page=self.get(reverse('checkout:shipping-address'))<if_stmt>address_page.status_code<eq>200<block_start>form=address_page.forms['new_shipping_address']<line_sep>form['first_name']='John'<line_sep>form['last_name']='Doe'<line_sep>form['line1']='1 Egg Road'<line_sep>form['line4']='Shell City'<line_sep>form['postcode']='N12 9RT'<line_sep>form.submit()<block_end><block_end><def_stmt>enter_shipping_method self<block_start>self.get(reverse('checkout:shipping-method'))<block_end><def_stmt>place_order self<block_start>payment_details=self.get(reverse('checkout:shipping-method')).follow().follow()<line_sep>preview=payment_details.click(linkid="view_preview")<line_sep><return>preview.forms['place_order_form'].submit().follow()<block_end><def_stmt>reach_payment_details_page self<block_start>self.add_product_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details('<EMAIL>')<block_end>self.enter_shipping_address()<line_sep><return>self.get(reverse('checkout:shipping-method')).follow().follow()<block_end><def_stmt>ready_to_place_an_order self<block_start>payment_details=self.reach_payment_details_page()<line_sep><return>payment_details.click(linkid="view_preview")<block_end><block_end><class_stmt>IndexViewPreConditionsMixin<block_start>view_name=<none><line_sep># Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')<def_stmt>test_check_basket_is_not_empty self mock_skip_unless_basket_requires_shipping mock_skip_unless_payment_is_required <block_start>response=self.get(reverse(self.view_name))<line_sep>self.assertRedirectsTo(response 'basket:summary')<block_end># Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')<def_stmt>test_check_basket_is_valid self mock_skip_unless_basket_requires_shipping mock_skip_unless_payment_is_required # Add product to basket but then remove its stock so it is not
# purchasable.
<block_start>product=factories.ProductFactory()<line_sep>self.add_product_to_basket(product)<line_sep>product.stockrecords.all().update(num_in_stock=0)<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>response=self.get(reverse(self.view_name))<line_sep>self.assertRedirectsTo(response 'basket:summary')<block_end><block_end><class_stmt>ShippingAddressViewSkipConditionsMixin<block_start>view_name=<none><line_sep>next_view_name=<none><def_stmt>test_skip_unless_basket_requires_shipping self<block_start>product=self.create_digital_product()<line_sep>self.add_product_to_basket(product)<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>response=self.get(reverse(self.view_name))<line_sep>self.assertRedirectsTo(response self.next_view_name)<block_end><block_end><class_stmt>ShippingAddressViewPreConditionsMixin(IndexViewPreConditionsMixin)<block_start>view_name=<none><line_sep># Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')<def_stmt>test_check_user_email_is_captured self mock_skip_unless_basket_requires_shipping mock_skip_unless_payment_is_required <block_start><if_stmt>self.is_anonymous<block_start>self.add_product_to_basket()<line_sep>response=self.get(reverse(self.view_name))<line_sep>self.assertRedirectsTo(response 'checkout:index')<block_end><block_end><block_end><class_stmt>ShippingAddressViewMixin(ShippingAddressViewSkipConditionsMixin ShippingAddressViewPreConditionsMixin)<block_start><def_stmt>test_submitting_valid_form_adds_data_to_session self<block_start>self.add_product_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.create_shipping_country()<line_sep>page=self.get(reverse('checkout:shipping-address'))<line_sep>form=page.forms['new_shipping_address']<line_sep>form['first_name']='Barry'<line_sep>form['last_name']='Chuckle'<line_sep>form['line1']='1 King Street'<line_sep>form['line4']='Gotham City'<line_sep>form['postcode']='N1 7RR'<line_sep>response=form.submit()<line_sep>self.assertRedirectsTo(response 'checkout:shipping-method')<line_sep>session_data=self.app.session['checkout_data']<line_sep>session_fields=session_data['shipping']['new_address_fields']<line_sep>self.assertEqual('Barry' session_fields['first_name'])<line_sep>self.assertEqual('Chuckle' session_fields['last_name'])<line_sep>self.assertEqual('1 King Street' session_fields['line1'])<line_sep>self.assertEqual('Gotham City' session_fields['line4'])<line_sep>self.assertEqual('N1 7RR' session_fields['postcode'])<block_end><def_stmt>test_shows_initial_data_if_the_form_has_already_been_submitted self<block_start>self.add_product_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.enter_shipping_address()<line_sep>page=self.get(reverse('checkout:shipping-address') user=self.user)<line_sep>form=page.forms['new_shipping_address']<line_sep>self.assertEqual('John' form['first_name'].value)<line_sep>self.assertEqual('Doe' form['last_name'].value)<line_sep>self.assertEqual('1 Egg Road' form['line1'].value)<line_sep>self.assertEqual('Shell City' form['line4'].value)<line_sep>self.assertEqual('N12 9RT' form['postcode'].value)<block_end><block_end><class_stmt>ShippingMethodViewSkipConditionsMixin<block_start>view_name=<none><line_sep>next_view_name=<none><def_stmt>test_skip_unless_basket_requires_shipping self# This skip condition is not a "normal" one, but is implemented in the
# view's "get" method
<block_start>product=self.create_digital_product()<line_sep>self.add_product_to_basket(product)<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>response=self.get(reverse(self.view_name))<line_sep>self.assertRedirectsTo(response self.next_view_name)<line_sep>self.assertEqual(self.app.session['checkout_data']['shipping']['method_code'] NoShippingRequired.code)<block_end>@mock.patch('oscar.apps.checkout.views.Repository')<def_stmt>test_skip_if_single_shipping_method_is_available self mock_repo# This skip condition is not a "normal" one, but is implemented in the
# view's "get" method
<block_start>self.add_product_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.enter_shipping_address()<line_sep># Ensure one shipping method available
instance=mock_repo.return_value<line_sep>instance.get_shipping_methods.return_value=[methods.Free()]<line_sep>response=self.get(reverse('checkout:shipping-method'))<line_sep>self.assertRedirectsTo(response 'checkout:payment-method')<block_end><block_end><class_stmt>ShippingMethodViewPreConditionsMixin(ShippingAddressViewPreConditionsMixin)<block_start>view_name=<none><line_sep># Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')@mock.patch('oscar.apps.checkout.views.Repository')<def_stmt>test_check_shipping_methods_are_available self mock_repo mock_skip_unless_basket_requires_shipping mock_skip_unless_payment_is_required # This pre condition is not a "normal" one, but is implemented in the
# view's "get" method
<block_start>self.add_product_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.enter_shipping_address()<line_sep># Ensure no shipping methods available
instance=mock_repo.return_value<line_sep>instance.get_shipping_methods.return_value=[]<line_sep>response=self.get(reverse('checkout:shipping-method'))<line_sep>self.assertRedirectsTo(response 'checkout:shipping-address')<block_end># Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')<def_stmt>test_check_shipping_data_is_captured self mock_skip_unless_basket_requires_shipping mock_skip_unless_payment_is_required # This pre condition is not a "normal" one, but is implemented in the
# view's "get" method
<block_start>self.add_product_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>response=self.get(reverse(self.view_name))<line_sep>self.assertRedirectsTo(response 'checkout:shipping-address')<block_end><block_end><class_stmt>ShippingMethodViewMixin(ShippingMethodViewSkipConditionsMixin ShippingMethodViewPreConditionsMixin)<block_start>@mock.patch('oscar.apps.checkout.views.Repository')<def_stmt>test_shows_form_when_multiple_shipping_methods_available self mock_repo<block_start>self.add_product_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.enter_shipping_address()<line_sep># Ensure multiple shipping methods available
method=mock.MagicMock()<line_sep>method.code='m'<line_sep>instance=mock_repo.return_value<line_sep>instance.get_shipping_methods.return_value=[methods.Free() method]<line_sep>form_page=self.get(reverse('checkout:shipping-method'))<line_sep>self.assertIsOk(form_page)<line_sep>response=form_page.forms[0].submit()<line_sep>self.assertRedirectsTo(response 'checkout:payment-method')<block_end># Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')@mock.patch('oscar.apps.checkout.views.Repository')<def_stmt>test_check_user_can_submit_only_valid_shipping_method self mock_repo mock_skip_unless_basket_requires_shipping mock_skip_unless_payment_is_required <block_start>self.add_product_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.enter_shipping_address()<line_sep>method=mock.MagicMock()<line_sep>method.code='m'<line_sep>instance=mock_repo.return_value<line_sep>instance.get_shipping_methods.return_value=[methods.Free() method]<line_sep>form_page=self.get(reverse('checkout:shipping-method'))<line_sep># a malicious attempt?
form_page.forms[0]['method_code'].value='super-free-shipping'<line_sep>response=form_page.forms[0].submit()<line_sep>self.assertIsNotRedirect(response)<line_sep>response.mustcontain('Your submitted shipping method is not permitted')<block_end><block_end><class_stmt>PaymentMethodViewSkipConditionsMixin<block_start>@mock.patch('oscar.apps.checkout.session.SurchargeApplicator.get_surcharges')<def_stmt>test_skip_unless_payment_is_required self mock_get_surcharges<block_start>mock_get_surcharges.return_value=[]<line_sep>product=factories.create_product(price=D('0.00') num_in_stock=100)<line_sep>self.add_product_to_basket(product)<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.enter_shipping_address()<line_sep># The shipping method is set automatically, as there is only one (free)
# available
response=self.get(reverse('checkout:payment-method'))<line_sep>self.assertRedirectsTo(response 'checkout:preview')<block_end><block_end><class_stmt>PaymentMethodViewPreConditionsMixin(ShippingMethodViewPreConditionsMixin)<block_start>view_name=<none><line_sep># Disable skip conditions, so that we do not first get redirected forwards
@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')<def_stmt>test_check_shipping_data_is_captured self mock_skip_unless_basket_requires_shipping mock_skip_unless_payment_is_required <block_start>super().test_check_shipping_data_is_captured()<line_sep>self.enter_shipping_address()<line_sep>response=self.get(reverse(self.view_name))<line_sep>self.assertRedirectsTo(response 'checkout:shipping-method')<block_end><block_end><class_stmt>PaymentMethodViewMixin(PaymentMethodViewSkipConditionsMixin PaymentMethodViewPreConditionsMixin)<block_start><pass><block_end><class_stmt>PaymentDetailsViewSkipConditionsMixin<block_start>@mock.patch('oscar.apps.checkout.session.SurchargeApplicator.get_surcharges')<def_stmt>test_skip_unless_payment_is_required self mock_get_surcharges<block_start>mock_get_surcharges.return_value=[]<line_sep>product=factories.create_product(price=D('0.00') num_in_stock=100)<line_sep>self.add_product_to_basket(product)<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.enter_shipping_address()<line_sep># The shipping method is set automatically, as there is only one (free)
# available
response=self.get(reverse('checkout:payment-details'))<line_sep>self.assertRedirectsTo(response 'checkout:preview')<block_end><block_end><class_stmt>PaymentDetailsViewPreConditionsMixin(PaymentMethodViewPreConditionsMixin)<block_start>"""
Does not add any new pre conditions.
"""<block_end><class_stmt>PaymentDetailsViewMixin(PaymentDetailsViewSkipConditionsMixin PaymentDetailsViewPreConditionsMixin)<block_start>@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_payment')<def_stmt>test_redirects_customers_when_using_bank_gateway self mock_method<block_start>bank_url='https://bank-website.com'<line_sep>e=RedirectRequired(url=bank_url)<line_sep>mock_method.side_effect=e<line_sep>preview=self.ready_to_place_an_order()<line_sep>bank_redirect=preview.forms['place_order_form'].submit()<assert_stmt>bank_redirect.status_code<eq>302<assert_stmt>bank_redirect.url<eq>bank_url<block_end>@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_payment')<def_stmt>test_handles_anticipated_payments_errors_gracefully self mock_method<block_start>msg='Submitted expiration date is wrong'<line_sep>e=UnableToTakePayment(msg)<line_sep>mock_method.side_effect=e<line_sep>preview=self.ready_to_place_an_order()<line_sep>response=preview.forms['place_order_form'].submit()<line_sep>self.assertIsOk(response)<line_sep># check user is warned
response.mustcontain(msg)<line_sep># check basket is restored
basket=Basket.objects.get()<line_sep>self.assertEqual(basket.status Basket.OPEN)<block_end>@mock.patch('oscar.apps.checkout.views.logger')@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_payment')<def_stmt>test_handles_unexpected_payment_errors_gracefully self mock_method mock_logger<block_start>msg='This gateway is down for maintenance'<line_sep>e=PaymentError(msg)<line_sep>mock_method.side_effect=e<line_sep>preview=self.ready_to_place_an_order()<line_sep>response=preview.forms['place_order_form'].submit()<line_sep>self.assertIsOk(response)<line_sep># check user is warned with a generic error
response.mustcontain('A problem occurred while processing payment for this order' no=[msg])<line_sep># admin should be warned
self.assertTrue(mock_logger.error.called)<line_sep># check basket is restored
basket=Basket.objects.get()<line_sep>self.assertEqual(basket.status Basket.OPEN)<block_end>@mock.patch('oscar.apps.checkout.views.logger')@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_payment')<def_stmt>test_handles_bad_errors_during_payments self mock_method mock_logger<block_start>e=Exception()<line_sep>mock_method.side_effect=e<line_sep>preview=self.ready_to_place_an_order()<line_sep>response=preview.forms['place_order_form'].submit()<line_sep>self.assertIsOk(response)<line_sep>self.assertTrue(mock_logger.exception.called)<line_sep>basket=Basket.objects.get()<line_sep>self.assertEqual(basket.status Basket.OPEN)<block_end>@mock.patch('oscar.apps.checkout.views.logger')@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_order_placement')<def_stmt>test_handles_unexpected_order_placement_errors_gracefully self mock_method mock_logger<block_start>e=UnableToPlaceOrder()<line_sep>mock_method.side_effect=e<line_sep>preview=self.ready_to_place_an_order()<line_sep>response=preview.forms['place_order_form'].submit()<line_sep>self.assertIsOk(response)<line_sep>self.assertTrue(mock_logger.error.called)<line_sep>basket=Basket.objects.get()<line_sep>self.assertEqual(basket.status Basket.OPEN)<block_end>@mock.patch('oscar.apps.checkout.views.logger')@mock.patch('oscar.apps.checkout.views.PaymentDetailsView.handle_order_placement')<def_stmt>test_handles_all_other_exceptions_gracefully self mock_method mock_logger<block_start>mock_method.side_effect=Exception()<line_sep>preview=self.ready_to_place_an_order()<line_sep>response=preview.forms['place_order_form'].submit()<line_sep>self.assertIsOk(response)<line_sep>self.assertTrue(mock_logger.exception.called)<line_sep>basket=Basket.objects.get()<line_sep>self.assertEqual(basket.status Basket.OPEN)<block_end><block_end><class_stmt>PaymentDetailsPreviewViewPreConditionsMixin(PaymentDetailsViewPreConditionsMixin)# Disable skip conditions, so that we do not first get redirected forwards
<block_start>@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_payment_is_required')@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.skip_unless_basket_requires_shipping')@mock.patch('oscar.apps.checkout.session.CheckoutSessionMixin.check_payment_data_is_captured')<def_stmt>test_check_payment_data_is_captured self mock_check_payment_data_is_captured mock_skip_unless_basket_requires_shipping mock_skip_unless_payment_is_required <block_start>mock_check_payment_data_is_captured.side_effect=FailedPreCondition(url=reverse('checkout:payment-details'))<line_sep>response=self.ready_to_place_an_order()<line_sep>self.assertRedirectsTo(response 'checkout:payment-details')<block_end><block_end><class_stmt>PaymentDetailsPreviewViewMixin(PaymentDetailsPreviewViewPreConditionsMixin)<block_start><def_stmt>test_allows_order_to_be_placed self<block_start>self.add_product_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.enter_shipping_address()<line_sep>payment_details=self.get(reverse('checkout:shipping-method')).follow().follow()<line_sep>preview=payment_details.click(linkid="view_preview")<line_sep>preview.forms['place_order_form'].submit().follow()<line_sep>self.assertEqual(1 Order.objects.all().count())<block_end><def_stmt>test_payment_form_being_submitted_from_payment_details_view self<block_start>payment_details=self.reach_payment_details_page()<line_sep>preview=payment_details.forms['sensible_data'].submit()<line_sep>self.assertEqual(0 Order.objects.all().count())<line_sep>preview.form.submit().follow()<line_sep>self.assertEqual(1 Order.objects.all().count())<block_end><def_stmt>test_handles_invalid_payment_forms self<block_start>payment_details=self.reach_payment_details_page()<line_sep>form=payment_details.forms['sensible_data']<line_sep># payment forms should use the preview URL not the payment details URL
form.action=reverse('checkout:payment-details')<line_sep>self.assertEqual(form.submit(status="*").status_code http_client.BAD_REQUEST)<block_end><def_stmt>test_placing_an_order_using_a_voucher_records_use self<block_start>self.add_product_to_basket()<line_sep>self.add_voucher_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.enter_shipping_address()<line_sep>thankyou=self.place_order()<line_sep>order=thankyou.context['order']<line_sep>self.assertEqual(1 order.discounts.all().count())<line_sep>discount=order.discounts.all()[0]<line_sep>voucher=discount.voucher<line_sep>self.assertEqual(1 voucher.num_orders)<block_end><def_stmt>test_placing_an_order_using_an_offer_records_use self<block_start>offer=factories.create_offer()<line_sep>self.add_product_to_basket()<if_stmt>self.is_anonymous<block_start>self.enter_guest_details()<block_end>self.enter_shipping_address()<line_sep>self.place_order()<line_sep># Reload offer
offer=ConditionalOffer.objects.get(id=offer.id)<line_sep>self.assertEqual(1 offer.num_orders)<line_sep>self.assertEqual(1 offer.num_applications)<block_end><block_end>
|
# Django Imports
<import_from_stmt>django.contrib admin<class_stmt>S3MediaAssetAdmin(admin.ModelAdmin)<block_start>list_display=('id' )<block_end>
|
"""
This module provides basic methods for unit conversion and calculation of basic wind plant variables
"""<def_stmt>convert_power_to_energy power_col sample_rate_min="10T"<block_start>"""
Compute energy [kWh] from power [kw] and return the data column
Args:
df(:obj:`pandas.DataFrame`): the existing data frame to append to
col(:obj:`string`): Power column to use if not power_kw
sample_rate_min(:obj:`float`): Sampling rate in minutes to use for conversion, if not ten minutes
Returns:
:obj:`pandas.Series`: Energy in kWh that matches the length of the input data frame 'df'
"""<line_sep>time_conversion={"1T":1.0 "5T":5.0 "10T":10.0 "30T":30.0 "1H":60.0}<line_sep>energy_kwh=power_col<times>time_conversion[sample_rate_min]/60.0<line_sep><return>energy_kwh<block_end><def_stmt>compute_gross_energy net_energy avail_losses curt_losses avail_type="frac" curt_type="frac"<block_start>"""
This function computes gross energy for a wind plant or turbine by adding reported availability and
curtailment losses to reported net energy. Account is made of whether availabilty or curtailment loss data
is reported in energy ('energy') or fractional units ('frac'). If in energy units, this function assumes that net
energy, availability loss, and curtailment loss are all reported in the same units
Args:
net energy (numpy array of Pandas series): reported net energy for wind plant or turbine
avail (numpy array of Pandas series): reported availability losses for wind plant or turbine
curt (numpy array of Pandas series): reported curtailment losses for wind plant or turbine
Returns:
gross (numpy array of Pandas series): calculated gross energy for wind plant or turbine
"""<if_stmt>(avail_type<eq>"frac")&(curt_type<eq>"frac")<block_start>gross=net_energy/(1-avail_losses-curt_losses)<block_end><elif_stmt>(avail_type<eq>"frac")&(curt_type<eq>"energy")<block_start>gross=net_energy/(1-avail_losses)+curt_losses<block_end><elif_stmt>(avail_type<eq>"energy")&(curt_type<eq>"frac")<block_start>gross=net_energy/(1-curt_losses)+avail_losses<block_end><elif_stmt>(avail_type<eq>"energy")&(curt_type<eq>"energy")<block_start>gross=net_energy+curt_losses+avail_losses<block_end><if_stmt>len(gross[gross<l>net_energy])<g>0<block_start><raise>Exception("Gross energy cannot be less than net energy. Check your input values")<block_end><if_stmt>(len(avail_losses[avail_losses<l>0])<g>0)|(len(curt_losses[curt_losses<l>0])<g>0)<block_start><raise>Exception("Cannot have negative availability or curtailment input values. Check your data")<block_end><return>gross<block_end><def_stmt>convert_feet_to_meter variable<block_start>"""
Compute variable in [meter] from [feet] and return the data column
Args:
df(:obj:`pandas.Series`): the existing data frame to append to
variable(:obj:`string`): variable in feet
Returns:
:obj:`pandas.Series`: variable in meters of the input data frame 'df'
"""<line_sep>out=variable<times>0.3048<line_sep><return>out<block_end>
|
"""Test check DNS Servers for IPv6 errors."""<import_from_stmt>unittest.mock AsyncMock call patch<import_from_stmt>aiodns.error DNSError<import_stmt>pytest<import_from_stmt>supervisor.const CoreState<import_from_stmt>supervisor.coresys CoreSys<import_from_stmt>supervisor.resolution.checks.dns_server_ipv6_error CheckDNSServerIPv6Errors<import_from_stmt>supervisor.resolution.const ContextType IssueType<line_sep>@pytest.fixture(name="dns_query")<async_keyword><def_stmt>fixture_dns_query <arrow>AsyncMock<block_start>"""Mock aiodns query."""<with_stmt>patch("supervisor.resolution.checks.dns_server_ipv6_error.DNSResolver.query" new_callable=AsyncMock )<as>dns_query<block_start><yield>dns_query<block_end><block_end><async_keyword><def_stmt>test_base coresys:CoreSys<block_start>"""Test check basics."""<line_sep>dns_server_ipv6_errors=CheckDNSServerIPv6Errors(coresys)<assert_stmt>dns_server_ipv6_errors.slug<eq>"dns_server_ipv6_error"<assert_stmt>dns_server_ipv6_errors.enabled<block_end><async_keyword><def_stmt>test_check coresys:CoreSys dns_query:AsyncMock<block_start>"""Test check for DNS server IPv6 errors."""<line_sep>dns_server_ipv6_errors=CheckDNSServerIPv6Errors(coresys)<line_sep>coresys.core.state=CoreState.RUNNING<line_sep>coresys.plugins.dns.servers=["dns://1.1.1.1"]<assert_stmt>dns_server_ipv6_errors.dns_servers<eq>["dns://1.1.1.1" "dns://192.168.30.1" ]<assert_stmt>len(coresys.resolution.issues)<eq>0<line_sep><await>dns_server_ipv6_errors.run_check.__wrapped__(dns_server_ipv6_errors)<assert_stmt>dns_query.call_args_list<eq>[call("_checkdns.home-assistant.io" "AAAA") call("_checkdns.home-assistant.io" "AAAA") ]<assert_stmt>len(coresys.resolution.issues)<eq>0<line_sep>dns_query.reset_mock()<line_sep>coresys.plugins.dns.servers=[]<assert_stmt>dns_server_ipv6_errors.dns_servers<eq>["dns://192.168.30.1"]<line_sep>dns_query.side_effect=DNSError(1 "DNS server returned answer with no data")<line_sep><await>dns_server_ipv6_errors.run_check.__wrapped__(dns_server_ipv6_errors)<line_sep>dns_query.assert_called_once_with("_checkdns.home-assistant.io" "AAAA")<assert_stmt>len(coresys.resolution.issues)<eq>0<line_sep>dns_query.reset_mock()<line_sep>dns_query.side_effect=DNSError(4 "Domain name not found")<line_sep><await>dns_server_ipv6_errors.run_check.__wrapped__(dns_server_ipv6_errors)<line_sep>dns_query.assert_called_once_with("_checkdns.home-assistant.io" "AAAA")<assert_stmt>len(coresys.resolution.issues)<eq>1<assert_stmt>coresys.resolution.issues[0].type<is>IssueType.DNS_SERVER_IPV6_ERROR<assert_stmt>coresys.resolution.issues[0].context<is>ContextType.DNS_SERVER<assert_stmt>coresys.resolution.issues[0].reference<eq>"dns://192.168.30.1"<block_end><async_keyword><def_stmt>test_approve coresys:CoreSys dns_query:AsyncMock<block_start>"""Test approve existing DNS Server IPv6 error issues."""<line_sep>dns_server_ipv6_errors=CheckDNSServerIPv6Errors(coresys)<line_sep>coresys.core.state=CoreState.RUNNING<assert_stmt>dns_server_ipv6_errors.dns_servers<eq>["dns://192.168.30.1"]<line_sep>dns_query.side_effect=DNSError(4 "Domain name not found")<assert_stmt>(<await>dns_server_ipv6_errors.approve_check(reference="dns://1.1.1.1")<is><false>)<line_sep>dns_query.assert_not_called()<assert_stmt>(<await>dns_server_ipv6_errors.approve_check(reference="dns://192.168.30.1")<is><true>)<line_sep>dns_query.assert_called_once_with("_checkdns.home-assistant.io" "AAAA")<line_sep>dns_query.reset_mock()<line_sep>dns_query.side_effect=DNSError(1 "DNS server returned answer with no data")<assert_stmt>(<await>dns_server_ipv6_errors.approve_check(reference="dns://192.168.30.1")<is><false>)<line_sep>dns_query.assert_called_once_with("_checkdns.home-assistant.io" "AAAA")<line_sep>dns_query.reset_mock()<line_sep>dns_query.side_effect=<none><assert_stmt>(<await>dns_server_ipv6_errors.approve_check(reference="dns://192.168.30.1")<is><false>)<line_sep>dns_query.assert_called_once_with("_checkdns.home-assistant.io" "AAAA")<block_end><async_keyword><def_stmt>test_did_run coresys:CoreSys<block_start>"""Test that the check ran as expected."""<line_sep>dns_server_ipv6_errors=CheckDNSServerIPv6Errors(coresys)<line_sep>should_run=dns_server_ipv6_errors.states<line_sep>should_not_run=[state<for>state CoreState<if>state<not><in>should_run]<assert_stmt>should_run<eq>[CoreState.RUNNING]<assert_stmt>len(should_not_run)<ne>0<with_stmt>patch.object(CheckDNSServerIPv6Errors "run_check" return_value=<none>)<as>check<block_start><for_stmt>state should_run<block_start>coresys.core.state=state<line_sep><await>dns_server_ipv6_errors()<line_sep>check.assert_called_once()<line_sep>check.reset_mock()<block_end><for_stmt>state should_not_run<block_start>coresys.core.state=state<line_sep><await>dns_server_ipv6_errors()<line_sep>check.assert_not_called()<line_sep>check.reset_mock()<block_end><block_end><block_end><async_keyword><def_stmt>test_check_if_affected coresys:CoreSys<block_start>"""Test that check is still executed even if already affected."""<line_sep>dns_server_ipv6_errors=CheckDNSServerIPv6Errors(coresys)<line_sep>coresys.core.state=CoreState.RUNNING<line_sep>coresys.resolution.create_issue(IssueType.DNS_SERVER_IPV6_ERROR ContextType.DNS_SERVER reference="dns://192.168.30.1" )<assert_stmt>len(coresys.resolution.issues)<eq>1<with_stmt>patch.object(CheckDNSServerIPv6Errors "approve_check" return_value=<true>)<as>approve patch.object(CheckDNSServerIPv6Errors "run_check" return_value=<none>)<as>check<block_start><await>dns_server_ipv6_errors()<line_sep>approve.assert_called_once()<line_sep>check.assert_called_once()<block_end><block_end>
|
<import_stmt>pickle<import_stmt>os<import_stmt>argparse<import_stmt>sys<def_stmt>main task_dir dry_run=<false><block_start><with_stmt>open(os.path.join(task_dir 'file_list.p') 'rb')<as>f<block_start>bucketed=pickle.load(f)<block_end><if_stmt>dry_run<block_start>print("Got {} (for example)".format(bucketed[0][0]))<block_end>bucketed=[['./bucket_'+x.split('bucket_')[1]<for>x b]<for>b bucketed]<if_stmt>dry_run<block_start>print("Converting to {} (for example)".format(bucketed[0][0]))<line_sep>print("Will resolve to {} (for example)".format(os.path.normpath(os.path.join(task_dir bucketed[0][0]))))<block_end><else_stmt><block_start><with_stmt>open(os.path.join(task_dir 'file_list.p') 'wb')<as>f<block_start>pickle.dump(bucketed f)<block_end><block_end><block_end>parser=argparse.ArgumentParser(description='Fix the file list of a parsed directory.')<line_sep>parser.add_argument('task_dir' help="Directory of parsed files")<line_sep>parser.add_argument('--dry-run' action="store_true" help="Don't overwrite files")<if_stmt>__name__<eq>'__main__'<block_start>args=vars(parser.parse_args())<line_sep>main(**args)<block_end>
|
'''initialize'''<import_from_stmt>.gcnet GCNet<import_from_stmt>.contextblock ContextBlock<line_sep>
|
<import_stmt>statistics<import_stmt>sys<if_stmt>__name__<eq>'__main__'<block_start>print('##################### Summary ##########################')<with_stmt>open('bleu.txt')<as>f<block_start>bleu=float(f.read().strip().split()[2].replace(',' ''))<block_end>print("BLEU: {:.2f}".format(bleu))<with_stmt>open('meteor.txt')<as>f<block_start>meteor=float(f.readlines()[-1].strip().split()[-1])<block_end>print("METEOR: {:.2f}".format(meteor))<with_stmt>open('ter.txt')<as>f<block_start>ter=float(f.readlines()[-4].strip().split()[2])<block_end>print("TER: {:.2f}".format(ter))<with_stmt>open('moverscore.txt')<as>f<block_start>moverscore=float(f.readlines()[-1].strip())<block_end>print("MoverScore: {:.2f}".format(moverscore))<with_stmt>open('bertscore.txt')<as>f<block_start>bertscore=float(f.read().strip().split()[-1])<block_end>print("BERTScore F1: {:.2f}".format(bertscore))<with_stmt>open('bleurt.txt')<as>f<block_start>scores=[float(s)<for>s f.readlines()]<line_sep>bleurt=statistics.mean(scores)<block_end>print("BLEURT: {:.2f}".format(bleurt))<line_sep>print(' & '.join(["{:.2f}".format(bleu) "{:.2f}".format(meteor) "{:.2f}".format(ter) "{:.2f}".format(moverscore) "{:.2f}".format(bertscore) "{:.2f}".format(bleurt)]))<block_end>
|
<import_from_stmt>spidermon.utils.field_coverage calculate_field_coverage<def_stmt>test_calculate_field_coverage_from_stats <block_start>spider_stats={"finish_reason":"finished" "spidermon_item_scraped_count":100 "spidermon_item_scraped_count/dict":100 "spidermon_item_scraped_count/dict/author":100 "spidermon_item_scraped_count/dict/author/author_url":64 "spidermon_item_scraped_count/dict/author/name":100 "spidermon_item_scraped_count/dict/quote":50 "spidermon_item_scraped_count/dict/tags":100 }<line_sep>expected_coverage={"spidermon_field_coverage/dict/author":1.0 "spidermon_field_coverage/dict/author/author_url":0.64 "spidermon_field_coverage/dict/author/name":1.0 "spidermon_field_coverage/dict/quote":0.5 "spidermon_field_coverage/dict/tags":1.0 }<line_sep>coverage=calculate_field_coverage(spider_stats)<assert_stmt>coverage<eq>expected_coverage<block_end>
|
# -*- coding: utf-8 -*-
#
# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is
# holder of all proprietary rights on this computer program.
# Using this computer program means that you agree to the terms
# in the LICENSE file included with this software distribution.
# Any use not explicitly granted by the LICENSE is prohibited.
#
# Copyright©2019 Max-Planck-Gesellschaft zur Förderung
# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute
# for Intelligent Systems. All rights reserved.
#
# For comments or questions, please email us at <EMAIL>
# For commercial licensing contact, please contact <EMAIL>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>numpy<as>np<import_stmt>pickle<import_stmt>torch.nn.functional<as>F<class_stmt>FLAMETex(nn.Module)<block_start>"""
FLAME texture:
https://github.com/TimoBolkart/TF_FLAME/blob/ade0ab152300ec5f0e8555d6765411555c5ed43d/sample_texture.py#L64
FLAME texture converted from BFM:
https://github.com/TimoBolkart/BFM_to_FLAME
"""<def_stmt>__init__ self config<block_start>super(FLAMETex self).__init__()<if_stmt>config.tex_type<eq>'BFM'<block_start>mu_key='MU'<line_sep>pc_key='PC'<line_sep>n_pc=199<line_sep>tex_path=config.tex_path<line_sep>tex_space=np.load(tex_path)<line_sep>texture_mean=tex_space[mu_key].reshape(1 -1)<line_sep>texture_basis=tex_space[pc_key].reshape(-1 n_pc)<block_end><elif_stmt>config.tex_type<eq>'FLAME'<block_start>mu_key='mean'<line_sep>pc_key='tex_dir'<line_sep>n_pc=200<line_sep>tex_path=config.flame_tex_path<line_sep>tex_space=np.load(tex_path)<line_sep>texture_mean=tex_space[mu_key].reshape(1 -1)/255.<line_sep>texture_basis=tex_space[pc_key].reshape(-1 n_pc)/255.<block_end><else_stmt><block_start>print('texture type ' config.tex_type 'not exist!')<line_sep><raise>NotImplementedError<block_end>n_tex=config.n_tex<line_sep>num_components=texture_basis.shape[1]<line_sep>texture_mean=torch.from_numpy(texture_mean).float()[<none> <ellipsis>]<line_sep>texture_basis=torch.from_numpy(texture_basis[: :n_tex]).float()[<none> <ellipsis>]<line_sep>self.register_buffer('texture_mean' texture_mean)<line_sep>self.register_buffer('texture_basis' texture_basis)<block_end><def_stmt>forward self texcode=<none><block_start>'''
texcode: [batchsize, n_tex]
texture: [bz, 3, 256, 256], range: 0-1
'''<line_sep>texture=self.texture_mean+(self.texture_basis<times>texcode[: <none> :]).sum(-1)<line_sep>texture=texture.reshape(texcode.shape[0] 512 512 3).permute(0 3 1 2)<line_sep>texture=F.interpolate(texture [256 256])<line_sep>texture=texture[: [2 1 0] : :]<line_sep><return>texture<block_end><block_end><def_stmt>texture_flame2smplx cached_data flame_texture smplx_texture<block_start>''' Convert flame texture map (face-only) into smplx texture map (includes body texture)
TODO: pytorch version ==> grid sample
'''<if_stmt>smplx_texture.shape[0]<ne>smplx_texture.shape[1]<block_start>print('SMPL-X texture not squared (%d != %d)'%(smplx_texture[0] smplx_texture[1]))<line_sep><return><block_end><if_stmt>smplx_texture.shape[0]<ne>cached_data['target_resolution']<block_start>print('SMPL-X texture size does not match cached image resolution (%d != %d)'%(smplx_texture.shape[0] cached_data['target_resolution']))<line_sep><return><block_end>x_coords=cached_data['x_coords']<line_sep>y_coords=cached_data['y_coords']<line_sep>target_pixel_ids=cached_data['target_pixel_ids']<line_sep>source_uv_points=cached_data['source_uv_points']<line_sep>source_tex_coords=np.zeros_like((source_uv_points)).astype(int)<line_sep>source_tex_coords[: 0]=np.clip(flame_texture.shape[0]<times>(1.0-source_uv_points[: 1]) 0.0 flame_texture.shape[0]).astype(int)<line_sep>source_tex_coords[: 1]=np.clip(flame_texture.shape[1]<times>(source_uv_points[: 0]) 0.0 flame_texture.shape[1]).astype(int)<line_sep>smplx_texture[y_coords[target_pixel_ids].astype(int) x_coords[target_pixel_ids].astype(int) :]=flame_texture[source_tex_coords[: 0] source_tex_coords[: 1]]<line_sep><return>smplx_texture<block_end>
|
<import_from_stmt>.feed_forward PositionwiseFeedForward<import_from_stmt>.layer_norm LayerNorm<import_from_stmt>.sublayer *<import_from_stmt>.gelu GELU<line_sep>
|
<import_from_stmt>hyp.schematics Responder<as>SchematicsResponder<import_from_stmt>fixtures PostResponder PersonResponder PostSerializer <class_stmt>TestLinked(object)<block_start><def_stmt>test_single self<block_start>author={'id':1 'name':'John'}<line_sep>comments=[{'id':1 'content':'My comment'} {'id':2 'content':'Another comment'} ]<line_sep>post={'id':1 'title':'My title' 'comments':comments 'author':author}<line_sep>response=PostResponder.build(post linked={'comments':comments 'author':[author]})<assert_stmt>response<eq>{'posts':{'id':1 'title':'My title' 'links':{'author':1 'comments':[1 2] }} 'links':{'posts.author':{'href':'http://example.com/people/{posts.author}' 'type':'people' } 'posts.comments':{'href':'http://example.com/comments/{posts.comments}' 'type':'comments' }} 'linked':{'comments':[{'id':1 'content':'My comment'} {'id':2 'content':'Another comment'} ] 'people':[{'id':1 'name':'John'} ]}}<block_end><def_stmt>test_multiple_same_type self<block_start><class_stmt>MultipleAuthorsResponder(SchematicsResponder)<block_start>TYPE='posts'<line_sep>SERIALIZER=PostSerializer<line_sep>LINKS={'author':{'responder':PersonResponder 'href':'http://example.com/people/{posts.author}' } 'coauthor':{'responder':PersonResponder 'href':'http://example.com/people/{posts.author}' } }<block_end>author={'id':1 'name':'John'}<line_sep>coauthor={'id':2 'name':'Lisa'}<line_sep>post={'id':1 'title':'My title' 'author':author 'coauthor':coauthor}<line_sep>response=MultipleAuthorsResponder.build(post linked={'author':[author] 'coauthor':[coauthor]})<assert_stmt>len(response['linked']['people'])<eq>2<line_sep>ids=[person['id']<for>person response['linked']['people']]<assert_stmt>1<in>ids<assert_stmt>2<in>ids<block_end><def_stmt>test_custom_linked_key self<block_start><class_stmt>CustomPostResponder(SchematicsResponder)<block_start>TYPE='posts'<line_sep>SERIALIZER=PostSerializer<line_sep>LINKS={'author':{'responder':PersonResponder 'href':'http://example.com/people/{posts.author}' 'key':'writer' } }<block_end>author={'id':1 'name':'John'}<line_sep>post={'id':1 'title':'My title' 'writer':author}<line_sep>response=CustomPostResponder.build(post linked={'author':[author] })<assert_stmt>response<eq>{'posts':{'id':1 'title':'My title' 'links':{'author':1 }} 'links':{'posts.author':{'href':'http://example.com/people/{posts.author}' 'type':'people' }} 'linked':{'people':[{'id':1 'name':'John'} ] }}<block_end><block_end>
|
<import_stmt>sys<import_stmt>os<import_stmt>errno<import_from_stmt>os listdir<import_from_stmt>os.path dirname relpath join<def_stmt>ensure_dir_exists path<block_start><try_stmt><block_start>os.makedirs(path)<block_end><except_stmt>OSError<as>e<block_start><if_stmt>e.errno<eq>errno.EEXIST<and>os.path.isdir(path)<block_start><pass><block_end><else_stmt><block_start><raise><block_end><block_end><block_end><def_stmt>make_dir directory<block_start><if_stmt><not>os.path.exists(directory)<block_start>os.makedirs(directory)<block_end><block_end><def_stmt>files directory<block_start><for_stmt>dirpath,dirnames,filenames os.walk(directory)<block_start><for_stmt>name filenames<block_start><yield>relpath(join(dirpath name) directory)<block_end><block_end><block_end><def_stmt>headers_set directory<block_start><return>{f<for>f files(directory)<if>f.endswith('.h')<and><not>f.startswith('internal/')}<block_end><if_stmt>__name__<eq>"__main__"<block_start>python2_path=sys.argv[1]<line_sep>python3_path=sys.argv[2]<line_sep>output_path=sys.argv[3]<line_sep>ensure_dir_exists(join('.' python2_path))<line_sep>ensure_dir_exists(join('.' python3_path))<line_sep>only_headers2=headers_set(python2_path)<line_sep>only_headers3=headers_set(python3_path)<line_sep>all_headers=only_headers2|only_headers3<for_stmt>header all_headers<block_start>path=join(output_path header)<line_sep>make_dir(dirname(path))<line_sep>f=open(path 'w')<line_sep>f.write('#pragma once\n\n')<line_sep>f.write('#ifdef USE_PYTHON3\n')<if_stmt>(header<in>only_headers3)<block_start>f.write('#include <'+join(python3_path header)+'>\n')<block_end><else_stmt><block_start>f.write('#error "No <'+header+'> in Python3"\n')<block_end>f.write('#else\n')<if_stmt>(header<in>only_headers2)<block_start>f.write('#include <'+join(python2_path header)+'>\n')<block_end><else_stmt><block_start>f.write('#error "No <'+header+'> in Python2"\n')<block_end>f.write('#endif\n')<block_end><block_end>
|
""" Build index from directory listing
From: https://stackoverflow.com/questions/39048654/how-to-enable-directory-indexing-on-github-pages
make_index.py </path/to/directory>
"""<line_sep>INDEX_TEMPLATE=r"""
<html>
<title>Links for lief</title>
<body>
<h1>Links for lief</h1>
% for name in names:
<a href="${base_url}/${base}/${name}">${name}</a><br />
% endfor
</body>
</html>
"""<line_sep>EXCLUDED=['index.html' '.gitkeep']<line_sep>BASE_URL="https://lief-project.github.io"<import_stmt>os<import_stmt>argparse<line_sep># May need to do "pip install mako"
<import_from_stmt>mako.template Template<def_stmt>main <block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("directory")<line_sep>parser.add_argument("--base")<line_sep>parser.add_argument("--output")<line_sep>args=parser.parse_args()<line_sep>fnames=[fname<for>fname sorted(os.listdir(args.directory))<if>fname<not><in>EXCLUDED]<line_sep>html=Template(INDEX_TEMPLATE).render(names=fnames base_url=BASE_URL base=args.base)<with_stmt>open(args.output "w")<as>f<block_start>f.write(html)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
<import_from_stmt>collections OrderedDict<import_stmt>importlib<line_sep>"""
This module is a collection of classes that provide a
friendlier interface to MPI (through mpi4py). They help
allocate local tasks/data from global tasks/data and gather
global data (from all processors).
Although general, this module was only implemented to
work with the convergence evaluation framework. More work
is needed to make this appropriate for general use.
"""<class_stmt>MPIInterface<block_start>__have_mpi__=<none><def_stmt>__init__ self<block_start><if_stmt>MPIInterface.__have_mpi__<is><none># This is trying to import mpy4py.MPI, and setting a flag to indicate
# if it succeeds or not.
# we do this here instead of at the module level, because we only want
# to do the import if an MPIInterface is ever requested.
<block_start><try_stmt># try the import (the 'globals()' makes sure it is imported
# in the module space and not local to the __init__ method)
<block_start>globals()['MPI']=importlib.import_module('mpi4py.MPI')<line_sep># import succeeded
MPIInterface.__have_mpi__=<true><block_end><except_stmt># import failed (e.g., no mpi4py installed)
<block_start>MPIInterface.__have_mpi__=<false><block_end><block_end>self._comm=<none><line_sep>self._size=<none><line_sep>self._rank=<none><if_stmt>self.have_mpi<block_start>self._comm=MPI.COMM_WORLD# pylint: disable=undefined-variable
self._size=self._comm.Get_size()<line_sep>self._rank=self._comm.Get_rank()<block_end><block_end>@property<def_stmt>have_mpi self<block_start><assert_stmt>MPIInterface.__have_mpi__<is><not><none><line_sep><return>MPIInterface.__have_mpi__<block_end>@property<def_stmt>comm self<block_start><return>self._comm<block_end>@property<def_stmt>rank self<block_start><return>self._rank<block_end>@property<def_stmt>size self<block_start><return>self._size<block_end><block_end><class_stmt>ParallelTaskManager<block_start><def_stmt>__init__ self n_total_tasks mpi_interface=<none><block_start><if_stmt>mpi_interface<is><none><block_start>self._mpi_interface=MPIInterface()<block_end><else_stmt><block_start>self._mpi_interface=mpi_interface<block_end>self._n_total_tasks=n_total_tasks<if_stmt><not>self._mpi_interface.have_mpi<block_start>self._local_map=range(n_total_tasks)<block_end><else_stmt><block_start>rank=self._mpi_interface.rank<line_sep>size=self._mpi_interface.size<line_sep># there must be a better way to do this
# find which entries in global correspond
# to this process (want them to be contiguous
# for the MPI Allgather calls later
local_N=[0<for>i range(self._mpi_interface.size)]<for_stmt>i range(n_total_tasks)<block_start>process_i=i%size<line_sep>local_N[process_i]<augadd>1<block_end>start=0<line_sep>end=<none><for_stmt>i,v enumerate(local_N)<block_start><if_stmt>i<eq>self._mpi_interface.rank<block_start>end=start+v<line_sep><break><block_end><else_stmt><block_start>start<augadd>v<block_end><block_end>self._local_map=list(range(start end))<block_end><block_end><def_stmt>is_root self<block_start><if_stmt><not>self._mpi_interface.have_mpi<or>self._mpi_interface.rank<eq>0<block_start><return><true><block_end><return><false><block_end># ToDo: fix the parallel task manager to handle dictionaries as well as lists
<def_stmt>global_to_local_data self global_data<block_start><if_stmt>type(global_data)<is>list<block_start>local_data=list()<assert_stmt>(len(global_data)<eq>self._n_total_tasks)<for_stmt>i self._local_map<block_start>local_data.append(global_data[i])<block_end><return>local_data<block_end><elif_stmt>type(global_data)<is>OrderedDict<block_start>local_data=OrderedDict()<assert_stmt>(len(global_data)<eq>self._n_total_tasks)<line_sep>idx=0<for_stmt>k,v global_data.items()<block_start><if_stmt>idx<in>self._local_map<block_start>local_data[k]=v<block_end>idx<augadd>idx<block_end><return>local_data<block_end><raise>ValueError('Unknown type passed to global_to_local_data. Expected list or OrderedDict.')<block_end><def_stmt>allgather_global_data self local_data<block_start><assert_stmt>(len(local_data)<eq>len(self._local_map))<if_stmt><not>self._mpi_interface.have_mpi<block_start><return>list(local_data)<block_end>comm=self._mpi_interface.comm<line_sep>global_data_list=comm.allgather(local_data)<line_sep># PYLINT-TODO-FIX fix the error due to the
# non-existing global_data_list_of_lists variable
# pylint: disable=undefined-variable
<return>self._stack_global_data(global_data_list_of_lists)<block_end><def_stmt>gather_global_data self local_data<block_start><assert_stmt>(len(local_data)<eq>len(self._local_map))<if_stmt><not>self._mpi_interface.have_mpi<block_start><return>list(local_data)<block_end>comm=self._mpi_interface.comm<line_sep>global_data_list_of_lists=comm.gather(local_data)<if_stmt>global_data_list_of_lists<is><not><none><block_start><return>self._stack_global_data(global_data_list_of_lists)<block_end><assert_stmt>self.is_root()<eq><false><line_sep><return><none><block_end><def_stmt>_stack_global_data self global_data_list_of_lists# stack the list of lists into one global data list
# ToDo: test that this is equivalent to [d for sublist in global_data_list_of_lists for d in sublist]
<block_start>global_data=list()<for_stmt>i range(self._mpi_interface.size)<block_start>global_data.extend(global_data_list_of_lists[i])<block_end><return>global_data<block_end><block_end>
|
# terrascript/data/Trois-Six/sendgrid.py
# Automatically generated by tools/makecode.py (24-Sep-2021 15:26:45 UTC)
__all__=[]<line_sep>
|
<import_from_stmt>nautobot.extras.jobs Job FileVar StringVar<class_stmt>TestFieldOrder(Job)<block_start>"""My job demo."""<line_sep>var23=StringVar(description="I want to be second")<line_sep>var2=StringVar(description="Hello")<line_sep>var1=FileVar(description="Some file wants to be first")<class_stmt>Meta<block_start>"""Metaclass attrs."""<line_sep>field_order=["var1" "var2" "var23"]<block_end><block_end>
|
"""
Test case for a project that includes a script that has the same
base-name as a package used by the script.
"""<import_stmt>sys<if_stmt>(sys.version_info[0]<eq>2<and>sys.version_info[:2]<ge>(2 7))<or>(sys.version_info[0]<eq>3<and>sys.version_info[:2]<ge>(3 2))<block_start><import_stmt>unittest<block_end><else_stmt><block_start><import_stmt>unittest2<as>unittest<block_end><import_stmt>subprocess<import_stmt>shutil<import_stmt>time<import_stmt>os<import_stmt>signal<import_stmt>py2app<import_stmt>zipfile<import_from_stmt>.tools kill_child_processes<line_sep>DIR_NAME=os.path.dirname(os.path.abspath(__file__))<class_stmt>TestBasicApp(unittest.TestCase)<block_start>py2app_args=[]<line_sep>python_args=[]<line_sep>app_dir=os.path.join(DIR_NAME 'pkg_script_app')<line_sep># Basic setup code
#
# The code in this block needs to be moved to
# a base-class.
@classmethod<def_stmt>setUpClass cls<block_start>kill_child_processes()<line_sep>env=os.environ.copy()<line_sep>env['TMPDIR']=os.getcwd()<line_sep>pp=os.path.dirname(os.path.dirname(py2app.__file__))<if_stmt>'PYTHONPATH'<in>env<block_start>env['PYTHONPATH']=pp+':'+env['PYTHONPATH']<block_end><else_stmt><block_start>env['PYTHONPATH']=pp<block_end><if_stmt>'LANG'<not><in>env# Ensure that testing though SSH works
<block_start>env['LANG']='en_US.UTF-8'<block_end>p=subprocess.Popen([sys.executable]+cls.python_args+['setup.py' 'py2app']+cls.py2app_args cwd=cls.app_dir stdout=subprocess.PIPE stderr=subprocess.STDOUT close_fds=<false> env=env)<line_sep>lines=p.communicate()[0]<if_stmt>p.wait()<ne>0<block_start>print(lines)<line_sep><raise>AssertionError("Creating basic_app bundle failed")<block_end><block_end>@classmethod<def_stmt>tearDownClass cls<block_start><if_stmt>os.path.exists(os.path.join(cls.app_dir 'build'))<block_start>shutil.rmtree(os.path.join(cls.app_dir 'build'))<block_end><if_stmt>os.path.exists(os.path.join(cls.app_dir 'dist'))<block_start>shutil.rmtree(os.path.join(cls.app_dir 'dist'))<block_end>time.sleep(2)<block_end><def_stmt>tearDown self<block_start>kill_child_processes()<line_sep>time.sleep(1)<block_end><def_stmt>start_app self# Start the test app, return a subprocess object where
# stdin and stdout are connected to pipes.
<block_start>path=os.path.join(self.app_dir 'dist/quot.app/Contents/MacOS/quot')<line_sep>p=subprocess.Popen([path] stdin=subprocess.PIPE stdout=subprocess.PIPE close_fds=<false> )<line_sep>#stderr=subprocess.STDOUT)
<return>p<block_end><def_stmt>wait_with_timeout self proc timeout=10<block_start><for_stmt>i range(timeout)<block_start>x=proc.poll()<if_stmt>x<is><none><block_start>time.sleep(1)<block_end><else_stmt><block_start><return>x<block_end><block_end>os.kill(proc.pid signal.SIGKILL)<line_sep><return>proc.wait()<block_end>#
# End of setup code
#
<def_stmt>test_basic_start self<block_start>p=self.start_app()<line_sep>p.stdin.close()<line_sep>exit=self.wait_with_timeout(p)<line_sep>self.assertEqual(exit 0)<line_sep>p.stdout.close()<block_end><def_stmt>test_simple_imports self<block_start>p=self.start_app()<line_sep>p.stdin.write(("print(%r in sys.path)\n"%(os.path.join(self.app_dir 'dist/quot.app/Contents/Resources') )).encode('latin1'))<line_sep>p.stdin.flush()<line_sep>ln=p.stdout.readline()<line_sep>self.assertEqual(ln.strip() b"False")<line_sep># Basic module that is always present:
p.stdin.write('import_module("os")\n'.encode('latin1'))<line_sep>p.stdin.flush()<line_sep>ln=p.stdout.readline()<line_sep>self.assertEqual(ln.strip() b"os")<line_sep># Dependency of the main module:
p.stdin.write('import_module("quot")\n'.encode('latin1'))<line_sep>p.stdin.flush()<line_sep>ln=p.stdout.readline()<line_sep>self.assertEqual(ln.strip() b"quot")<line_sep># - verify that the right one gets loaded
<if_stmt>'--alias'<not><in>self.py2app_args<block_start>p.stdin.write('import quot;print(quot.__file__)\n'.encode('latin1'))<line_sep>p.stdin.flush()<line_sep>ln=p.stdout.readline()<line_sep>self.assertTrue(b"Contents/Resources/lib"<in>ln.strip())<block_end>p.stdin.write('import_module("quot.queue")\n'.encode('latin1'))<line_sep>p.stdin.flush()<line_sep>ln=p.stdout.readline()<line_sep>self.assertEqual(ln.strip() b"quot.queue")<line_sep>p.stdin.close()<line_sep>p.stdout.close()<line_sep>self.wait_with_timeout(p)<block_end><def_stmt>test_zip_contents self<block_start><if_stmt>'--alias'<in>self.py2app_args<block_start><raise>unittest.SkipTest("Not relevant for Alias builds")<block_end>dirpath=os.path.join(self.app_dir 'dist/quot.app/Contents')<line_sep>zfpath=os.path.join(dirpath 'Resources/lib/python%d%d.zip'%(sys.version_info[:2]))<if_stmt><not>os.path.exists(zfpath)<block_start>zfpath=os.path.join(dirpath 'Resources/lib/python%d.%d/site-packages.zip'%(sys.version_info[:2]))<block_end><if_stmt><not>os.path.exists(zfpath)<block_start>zfpath=os.path.join(dirpath 'Resources/lib/site-packages.zip')<block_end><if_stmt><not>os.path.exists(zfpath)<block_start>self.fail("Cannot locate embedded zipfile")<block_end>zf=zipfile.ZipFile(zfpath 'r')<for_stmt>nm ('quot.py' 'quot.pyc' 'quot.pyo')<block_start><try_stmt><block_start>zf.read(nm)<line_sep>self.fail("'quot' module is in the zipfile")<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><block_end><block_end><class_stmt>TestBasicAliasApp(TestBasicApp)<block_start>py2app_args=['--alias' ]<block_end><class_stmt>TestBasicSemiStandaloneApp(TestBasicApp)<block_start>py2app_args=['--semi-standalone' ]<block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
|
# -*- coding: utf-8 -*-
"""
The Evolved Transformer and large-scale evolution of image classifiers
======================================================================
Implement evolution to exploit configurations with fixed resource efficiently
"""<import_stmt>copy<import_stmt>importlib<import_stmt>logging<import_stmt>numpy<as>np<import_from_stmt>orion.algo.hyperband Hyperband HyperbandBracket<import_from_stmt>orion.core.utils format_trials<line_sep>logger=logging.getLogger(__name__)<line_sep>REGISTRATION_ERROR="""
Bad fidelity level {fidelity}. Should be in {budgets}.
Params: {params}
"""<line_sep>SPACE_ERROR="""
EvolutionES cannot be used if space does not contain a fidelity dimension.
"""<line_sep>BUDGET_ERROR="""
Cannot build budgets below max_resources;
(max: {}) - (min: {}) > (num_rungs: {})
"""<def_stmt>compute_budgets min_resources max_resources reduction_factor nums_population pairs<block_start>"""Compute the budgets used for each execution of hyperband"""<line_sep>budgets_eves=[]<if_stmt>reduction_factor<eq>1<block_start><for_stmt>i range(min_resources max_resources+1)<block_start><if_stmt>i<eq>min_resources<block_start>budgets_eves.append([(nums_population i)])<block_end><else_stmt><block_start>budgets_eves[0].append((pairs<times>2 i))<block_end><block_end><block_end><else_stmt><block_start>num_brackets=int(np.log(max_resources)/np.log(reduction_factor))<line_sep>budgets=[]<line_sep>budgets_tab={}# just for display consideration
<for_stmt>bracket_id range(0 num_brackets+1)<block_start>bracket_budgets=[]<line_sep>num_trials=int(np.ceil(int((num_brackets+1)/(num_brackets-bracket_id+1))<times>(reduction_factor<power>(num_brackets-bracket_id))))<line_sep>min_resources=max_resources/reduction_factor<power>(num_brackets-bracket_id)<for_stmt>i range(0 num_brackets-bracket_id+1)<block_start>n_i=int(num_trials/reduction_factor<power>i)<line_sep>min_i=int(min_resources<times>reduction_factor<power>i)<line_sep>bracket_budgets.append((n_i min_i))<if_stmt>budgets_tab.get(i)<block_start>budgets_tab[i].append((n_i min_i))<block_end><else_stmt><block_start>budgets_tab[i]=[(n_i min_i)]<block_end><block_end>budgets.append(bracket_budgets)<block_end><for_stmt>i range(len(budgets[0]))<block_start><if_stmt>i<eq>0<block_start>budgets_eves.append([(nums_population budgets[0][i][1])])<block_end><else_stmt><block_start>budgets_eves[0].append((pairs<times>2 budgets[0][i][1]))<block_end><block_end><block_end><return>budgets_eves<block_end><class_stmt>EvolutionES(Hyperband)<block_start>"""EvolutionES formulates hyperparameter optimization as an evolution.
For more information on the algorithm,
see original paper at
https://arxiv.org/pdf/1703.01041.pdf and
https://arxiv.org/pdf/1901.11117.pdf
Real et al. "Large-Scale Evolution of Image Classifiers"
So et all. "The Evolved Transformer"
Parameters
----------
space: `orion.algo.space.Space`
Optimisation space with priors for each dimension.
seed: None, int or sequence of int
Seed for the random number generator used to sample new trials.
Default: ``None``
repetitions: int
Number of execution of Hyperband. Default is numpy.inf which means to
run Hyperband until no new trials can be suggested.
nums_population: int
Number of population for EvolutionES. Larger number of population often gets better
performance but causes more computation. So there is a trade-off according to the search
space and required budget of your problems.
Default: 20
mutate: str or None, optional
In the mutate part, one can define the customized mutate function with its mutate factors,
such as multiply factor (times/divides by a multiply factor) and add factor
(add/subtract by a multiply factor). The function must be defined by
an importable string. If None, default
mutate function is used: ``orion.algo.mutate_functions.default_mutate``.
"""<line_sep>requires_type=<none><line_sep>requires_dist=<none><line_sep>requires_shape="flattened"<def_stmt>__init__ self space seed=<none> repetitions=np.inf nums_population=20 mutate=<none> max_retries=1000 <block_start>super(EvolutionES self).__init__(space seed=seed repetitions=repetitions)<line_sep>pair=nums_population<floordiv>2<line_sep>mutate_ratio=0.3<line_sep>self.nums_population=nums_population<line_sep>self.nums_comp_pairs=pair<line_sep>self.max_retries=max_retries<line_sep>self.mutate_ratio=mutate_ratio<line_sep>self.mutate=mutate<line_sep>self.nums_mutate_gene=(int((len(self.space.values())-1)<times>mutate_ratio)<if>int((len(self.space.values())-1)<times>mutate_ratio)<g>0<else>1)<line_sep>self._param_names<augadd>["nums_population" "mutate" "max_retries"]<line_sep>self.hurdles=[]<line_sep>self.population={}<for_stmt>i,dim enumerate(self.space.values())<block_start><if_stmt>dim.type<ne>"fidelity"<block_start>self.population[i]=[-1]<times>nums_population<block_end><block_end>self.performance=np.inf<times>np.ones(nums_population)<line_sep>self.budgets=compute_budgets(self.min_resources self.max_resources self.reduction_factor nums_population pair )<line_sep>self.brackets=[BracketEVES(self bracket_budgets 1)<for>bracket_budgets self.budgets]<line_sep>self.seed_rng(seed)<block_end>@property<def_stmt>state_dict self<block_start>"""Return a state dict that can be used to reset the state of the algorithm."""<line_sep>state_dict=super(EvolutionES self).state_dict<line_sep>state_dict["population"]=copy.deepcopy(self.population)<line_sep>state_dict["performance"]=copy.deepcopy(self.performance)<line_sep>state_dict["hurdles"]=copy.deepcopy(self.hurdles)<line_sep><return>state_dict<block_end><def_stmt>set_state self state_dict<block_start>"""Reset the state of the algorithm based on the given state_dict"""<line_sep>super(EvolutionES self).set_state(state_dict)<line_sep>self.population=state_dict["population"]<line_sep>self.performance=state_dict["performance"]<line_sep>self.hurdles=state_dict["hurdles"]<block_end><def_stmt>_get_bracket self trial<block_start>"""Get the bracket of a trial during observe"""<line_sep><return>self.brackets[-1]<block_end><block_end><class_stmt>BracketEVES(HyperbandBracket)<block_start>"""Bracket of rungs for the algorithm Hyperband.
Parameters
----------
evolutiones: `evolutiones` algorithm
The evolutiones algorithm object which this bracket will be part of.
budgets: list of tuple
Each tuple gives the (n_trials, resource_budget) for the respective rung.
repetition_id: int
The id of hyperband execution this bracket belongs to
"""<def_stmt>__init__ self evolution_es budgets repetition_id<block_start>super(BracketEVES self).__init__(evolution_es budgets repetition_id)<line_sep>self.eves=self.hyperband<line_sep>self.search_space_without_fidelity=[]<line_sep>self._candidates={}<if_stmt>evolution_es.mutate<block_start>self.mutate_attr=copy.deepcopy(evolution_es.mutate)<block_end><else_stmt><block_start>self.mutate_attr={}<block_end>function_string=self.mutate_attr.pop("function" "orion.algo.mutate_functions.default_mutate")<line_sep>mod_name,func_name=function_string.rsplit("." 1)<line_sep>mod=importlib.import_module(mod_name)<line_sep>self.mutate_func=getattr(mod func_name)<for_stmt>i,dim enumerate(self.space.values())<block_start><if_stmt>dim.type<ne>"fidelity"<block_start>self.search_space_without_fidelity.append(i)<block_end><block_end><block_end>@property<def_stmt>space self<block_start><return>self.eves.space<block_end>@property<def_stmt>state_dict self<block_start>state_dict=super(BracketEVES self).state_dict<line_sep>state_dict["candidates"]=copy.deepcopy(self._candidates)<line_sep><return>state_dict<block_end><def_stmt>set_state self state_dict<block_start>super(BracketEVES self).set_state(state_dict)<line_sep>self._candidates=state_dict["candidates"]<block_end><def_stmt>_get_teams self rung_id<block_start>"""Get the red team and blue team"""<if_stmt>self.has_rung_filled(rung_id+1)<block_start><return>[]<block_end>rung=self.rungs[rung_id]["results"]<line_sep>population_range=(self.eves.nums_population<if>len(list(rung.values()))<g>self.eves.nums_population<else>len(list(rung.values())))<line_sep>rung_trials=list(rung.values())<for_stmt>trial_index range(population_range)<block_start>objective,trial=rung_trials[trial_index]<line_sep>self.eves.performance[trial_index]=objective<for_stmt>ith_dim self.search_space_without_fidelity<block_start>self.eves.population[ith_dim][trial_index]=trial.params[self.space[ith_dim].name]<block_end><block_end>population_index=list(range(self.eves.nums_population))<line_sep>red_team=self.eves.rng.choice(population_index self.eves.nums_comp_pairs replace=<false>)<line_sep>diff_list=list(set(population_index).difference(set(red_team)))<line_sep>blue_team=self.eves.rng.choice(diff_list self.eves.nums_comp_pairs replace=<false>)<line_sep><return>rung population_range red_team blue_team<block_end><def_stmt>_mutate_population self red_team blue_team rung population_range fidelity<block_start>"""Get the mutated population and hurdles"""<line_sep>winner_list=[]<line_sep>loser_list=[]<if_stmt>set(red_team)<ne>set(blue_team)<block_start>hurdles=0<for_stmt>i,_ enumerate(red_team)<block_start>winner,loser=((red_team blue_team)<if>self.eves.performance[red_team[i]]<l>self.eves.performance[blue_team[i]]<else>(blue_team red_team))<line_sep>winner_list.append(winner[i])<line_sep>loser_list.append(loser[i])<line_sep>hurdles<augadd>self.eves.performance[winner[i]]<line_sep>self._mutate(winner[i] loser[i])<block_end>hurdles<augdiv>len(red_team)<line_sep>self.eves.hurdles.append(hurdles)<line_sep>logger.debug("Evolution hurdles are: %s" str(self.eves.hurdles))<block_end>trials=[]<line_sep>trial_ids=set()<line_sep>nums_all_equal=[0]<times>population_range<for_stmt>i range(population_range)<block_start>point=[0]<times>len(self.space)<while_stmt><true><block_start>point=list(point)<line_sep>point[list(self.space.keys()).index(self.eves.fidelity_index)]=fidelity<for_stmt>j self.search_space_without_fidelity<block_start>point[j]=self.eves.population[j][i]<block_end>trial=format_trials.tuple_to_trial(point self.space)<line_sep>trial=self.eves.format_trial(trial)<line_sep>trial_id=self.eves.get_id(trial)<if_stmt>trial_id<in>trial_ids<block_start>nums_all_equal[i]<augadd>1<line_sep>logger.debug("find equal one, continue to mutate.")<line_sep>self._mutate(i i)<block_end><elif_stmt>self.eves.has_suggested(trial)<block_start>nums_all_equal[i]<augadd>1<line_sep>logger.debug("find one already suggested, continue to mutate.")<line_sep>self._mutate(i i)<block_end><else_stmt><block_start><break><block_end><if_stmt>nums_all_equal[i]<g>self.eves.max_retries<block_start>logger.warning("Can not Evolve any more. You can make an early stop.")<line_sep><break><block_end><block_end><if_stmt>nums_all_equal[i]<l>self.eves.max_retries<block_start>trials.append(trial)<line_sep>trial_ids.add(trial_id)<block_end><else_stmt><block_start>logger.debug("Dropping trial %s" trial)<block_end><block_end><return>trials np.array(nums_all_equal)<block_end><def_stmt>get_candidates self rung_id<block_start>"""Get a candidate for promotion"""<if_stmt>rung_id<not><in>self._candidates<block_start>rung,population_range,red_team,blue_team=self._get_teams(rung_id)<line_sep>fidelity=self.rungs[rung_id+1]["resources"]<line_sep>self._candidates[rung_id]=self._mutate_population(red_team blue_team rung population_range fidelity)[0]<block_end>candidates=[]<for_stmt>candidate self._candidates[rung_id]<block_start><if_stmt><not>self.eves.has_suggested(candidate)<block_start>candidates.append(candidate)<block_end><block_end><return>candidates<block_end><def_stmt>_mutate self winner_id loser_id<block_start>select_genes_key_list=self.eves.rng.choice(self.search_space_without_fidelity self.eves.nums_mutate_gene replace=<false> )<line_sep>self.copy_winner(winner_id loser_id)<line_sep>kwargs=copy.deepcopy(self.mutate_attr)<for_stmt>i,_ enumerate(select_genes_key_list)<block_start>space=self.space.values()[select_genes_key_list[i]]<line_sep>old=self.eves.population[select_genes_key_list[i]][loser_id]<line_sep>new=self.mutate_func(space self.eves.rng old **kwargs)<line_sep>self.eves.population[select_genes_key_list[i]][loser_id]=new<block_end>self.eves.performance[loser_id]=-1<block_end><def_stmt>copy_winner self winner_id loser_id<block_start>"""Copy winner to loser"""<for_stmt>key self.search_space_without_fidelity<block_start>self.eves.population[key][loser_id]=self.eves.population[key][winner_id]<block_end><block_end><block_end>
|
<import_from_stmt>.gripper Gripper<import_from_stmt>.gripper_factory gripper_factory<import_from_stmt>.two_finger_gripper TwoFingerGripper LeftTwoFingerGripper<import_from_stmt>.pr2_gripper PR2Gripper<import_from_stmt>.pushing_gripper PushingGripper<import_from_stmt>.robotiq_gripper RobotiqGripper<import_from_stmt>.robotiq_three_finger_gripper RobotiqThreeFingerGripper<import_from_stmt>.panda_gripper PandaGripper<import_from_stmt>.jaco_gripper JacoGripper<import_from_stmt>.fetch_gripper FetchGripper<line_sep>
|
#
# This file is part of snmpsim software.
#
# Copyright (c) 2010-2019, <NAME> <<EMAIL>>
# License: http://snmplabs.com/snmpsim/license.html
#
# SNMP Agent Simulator
#
<import_stmt>json<import_stmt>os<import_stmt>re<import_stmt>tempfile<import_stmt>time<import_stmt>uuid<import_from_stmt>functools wraps<import_from_stmt>pyasn1.type univ<import_from_stmt>pysnmp.carrier.asyncore.dgram udp<import_from_stmt>pysnmp.carrier.asyncore.dgram udp6<import_from_stmt>pysnmp.entity engine<import_from_stmt>snmpsim error<import_from_stmt>snmpsim log<import_from_stmt>snmpsim.reporting.formats base<def_stmt>camel2snake name<block_start>s1=re.sub(r'(.)([A-Z][a-z]+)' r'\1_\2' name)<line_sep><return>re.sub(r'([a-z0-9])([A-Z])' r'\1_\2' s1).lower()<block_end><def_stmt>ensure_base_types f<block_start>"""Convert decorated function's kwargs to Python types.
Also turn camel-cased keys into snake case.
"""<def_stmt>to_base_types item<block_start><if_stmt>isinstance(item engine.SnmpEngine)<block_start>item=item.snmpEngineID<block_end><if_stmt>isinstance(item (univ.Integer univ.OctetString univ.ObjectIdentifier))<block_start>item=item.prettyPrint()<if_stmt>item.startswith('0x')<block_start>item=item[2:]<block_end><return>item<block_end><if_stmt>isinstance(item (udp.UdpTransportAddress udp6.Udp6TransportAddress))<block_start><return>str(item[0])<block_end><return>item<block_end><def_stmt>to_dct dct<block_start>items={}<for_stmt>k,v dct.items()<block_start>k=to_base_types(k)<line_sep>k=camel2snake(k)<if_stmt>isinstance(v dict)<block_start>v=to_dct(v)<block_end><else_stmt><block_start>v=to_base_types(v)<block_end>items[k]=v<block_end><return>items<block_end>@wraps(f)<def_stmt>decorated_function *args **kwargs<block_start><return>f(*args **to_dct(kwargs))<block_end><return>decorated_function<block_end><class_stmt>NestingDict(dict)<block_start>"""Dict with sub-dict as a defaulted value"""<def_stmt>__getitem__ self item<block_start><try_stmt><block_start><return>dict.__getitem__(self item)<block_end><except_stmt>KeyError<block_start>value=self[item]=type(self)()<line_sep><return>value<block_end><block_end><block_end><class_stmt>BaseJsonReporter(base.BaseReporter)<block_start>"""Common base for JSON-backed family of reporters.
"""<line_sep>REPORTING_PERIOD=300<line_sep>REPORTING_FORMAT=''<line_sep>REPORTING_VERSION=1<line_sep>PRODUCER_UUID=str(uuid.uuid1())<def_stmt>__init__ self *args<block_start><if_stmt><not>args<block_start><raise>error.SnmpsimError('Missing %s parameter(s). Expected: '<concat>'<method>:<reports-dir>[:dumping-period]'%self.__class__.__name__)<block_end>self._reports_dir=os.path.join(args[0] self.REPORTING_FORMAT)<if_stmt>len(args)<g>1<block_start><try_stmt><block_start>self.REPORTING_PERIOD=int(args[1])<block_end><except_stmt>Exception<as>exc<block_start><raise>error.SnmpsimError('Malformed reports dumping period: %s'%args[1])<block_end><block_end><try_stmt><block_start><if_stmt><not>os.path.exists(self._reports_dir)<block_start>os.makedirs(self._reports_dir)<block_end><block_end><except_stmt>OSError<as>exc<block_start><raise>error.SnmpsimError('Failed to create reports directory %s: '<concat>'%s'%(self._reports_dir exc))<block_end>self._metrics=NestingDict()<line_sep>self._next_dump=time.time()+self.REPORTING_PERIOD<line_sep>log.debug('Initialized %s metrics reporter for instance %s, metrics '<concat>'directory %s, dumping period is %s seconds'%(self.__class__.__name__ self.PRODUCER_UUID self._reports_dir self.REPORTING_PERIOD))<block_end><def_stmt>flush self<block_start>"""Dump accumulated metrics into a JSON file.
Reset all counters upon success.
"""<if_stmt><not>self._metrics<block_start><return><block_end>now=int(time.time())<if_stmt>self._next_dump<g>now<block_start><return><block_end>self._next_dump=now+self.REPORTING_PERIOD<line_sep>self._metrics['format']=self.REPORTING_FORMAT<line_sep>self._metrics['version']=self.REPORTING_VERSION<line_sep>self._metrics['producer']=self.PRODUCER_UUID<line_sep>dump_path=os.path.join(self._reports_dir '%s.json'%now)<line_sep>log.debug('Dumping JSON metrics to %s'%dump_path)<try_stmt><block_start>json_doc=json.dumps(self._metrics indent=2)<with_stmt>tempfile.NamedTemporaryFile(delete=<false>)<as>fl<block_start>fl.write(json_doc.encode('utf-8'))<block_end>os.rename(fl.name dump_path)<block_end><except_stmt>Exception<as>exc<block_start>log.error('Failure while dumping metrics into '<concat>'%s: %s'%(dump_path exc))<block_end>self._metrics.clear()<block_end><block_end><class_stmt>MinimalJsonReporter(BaseJsonReporter)<block_start>"""Collect activity metrics and dump brief report.
Accumulates and periodically dumps activity metrics reflecting
SNMP command responder performance.
These counters are accumulated in memory for some time, then get
written down as a JSON file indexed by time. Consumers are expected
to process each of these files and are free to remove them.
`MinimalJsonReporter` works with both SNMPv1/v2c and SNMPv3
command responder.
Activity metrics are arranged as a data structure like this:
.. code-block:: python
{
'format': 'minimaljson',
'version': 1,
'producer': <UUID>,
'first_update': '{timestamp}',
'last_update': '{timestamp}',
'transports': {
'total': 0,
'failures': 0
},
'agents': {
'total': 0,
'failures': 0
},
'data_files': {
'total': 0,
'failures': 0
}
}
"""<line_sep>REPORTING_FORMAT='minimaljson'<def_stmt>update_metrics self **kwargs<block_start>"""Process activity update.
Update internal counters based on activity update information.
Parameters in `kwargs` serve two purposes: some are used to
build activity scopes e.g. {transport_domain}->{snmp_engine},
however those suffixed `*_count` are used to update corresponding
activity counters that eventually will make their way to
consumers.
"""<line_sep>root_metrics=self._metrics<line_sep>metrics=root_metrics<line_sep>now=int(time.time())<if_stmt>'first_update'<not><in>metrics<block_start>metrics['first_update']=now<block_end>metrics['last_update']=now<line_sep>metrics=root_metrics<try_stmt><block_start>metrics=metrics['transports']<line_sep>metrics['total']=(metrics.get('total' 0)+kwargs.get('transport_call_count' 0))<line_sep>metrics['failures']=(metrics.get('failures' 0)+kwargs.get('transport_failure_count' 0))<block_end><except_stmt>KeyError<block_start><pass><block_end>metrics=root_metrics<try_stmt><block_start>metrics=metrics['data_files']<line_sep>metrics['total']=(metrics.get('total' 0)+kwargs.get('datafile_call_count' 0))<line_sep>metrics['failures']=(metrics.get('failures' 0)+kwargs.get('datafile_failure_count' 0))<line_sep># TODO: some data is still not coming from snmpsim v2carch core
<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><block_end><class_stmt>FullJsonReporter(BaseJsonReporter)<block_start>"""Collect activity metrics and dump detailed report.
Accumulates and periodically dumps activity counters reflecting
SNMP command responder performance.
These counters are accumulated in memory for some time, then get
written down as a JSON file indexed by time. Consumers are expected
to process each of these files and are free to remove them.
`FullJsonReporter` can only work within full SNMPv3 command responder.
Activity metrics are arranged as a data structure like this:
.. code-block:: python
{
'format': 'fulljson',
'version': 1,
'producer': <UUID>,
'first_update': '{timestamp}',
'last_update': '{timestamp}',
'{transport_protocol}': {
'{transport_endpoint}': { # local address
'transport_domain': '{transport_domain}', # endpoint ID
'{transport_address}', { # peer address
'packets': 0,
'parse_failures': 0, # n/a
'auth_failures': 0, # n/a
'context_failures': 0, # n/a
'{snmp_engine}': {
'{security_model}': {
'{security_level}': {
'{security_name}': {
'{context_engine_id}': {
'{context_name}': {
'{pdu_type}': {
'{data_file}': {
'pdus': 0,
'varbinds': 0,
'failures': 0,
'{variation_module}': {
'calls': 0,
'failures': 0
}
}
}
}
}
}
}
}
}
}
}
}
}
Where `{token}` is replaced with a concrete value taken from request.
"""<line_sep>REPORTING_FORMAT='fulljson'<line_sep>@ensure_base_types<def_stmt>update_metrics self **kwargs<block_start>"""Process activity update.
Update internal counters based on activity update information.
Parameters in `kwargs` serve two purposes: some are used to
build activity scopes e.g. {transport_domain}->{snmp_engine},
however those suffixed `*_count` are used to update corresponding
activity counters that eventually will make their way to
consumers.
"""<line_sep>metrics=self._metrics<line_sep>now=int(time.time())<if_stmt>'first_update'<not><in>metrics<block_start>metrics['first_update']=now<block_end>metrics['last_update']=now<try_stmt><block_start>metrics=metrics[kwargs['transport_protocol']]<line_sep>metrics=metrics['%s:%s'%kwargs['transport_endpoint']]<line_sep>metrics['transport_domain']=kwargs['transport_domain']<line_sep>metrics=metrics[kwargs['transport_address']]<line_sep>metrics['packets']=(metrics.get('packets' 0)+kwargs.get('transport_call_count' 0))<line_sep># TODO: collect these counters
metrics['parse_failures']=0<line_sep>metrics['auth_failures']=0<line_sep>metrics['context_failures']=0<line_sep>metrics=metrics[kwargs['snmp_engine']]<line_sep>metrics=metrics[kwargs['security_model']]<line_sep>metrics=metrics[kwargs['security_level']]<line_sep>metrics=metrics[kwargs['security_name']]<line_sep>metrics=metrics[kwargs['context_engine_id']]<line_sep>metrics=metrics[kwargs['pdu_type']]<line_sep>metrics=metrics[kwargs['data_file']]<line_sep>metrics['pdus']=(metrics.get('pdus' 0)+kwargs.get('datafile_call_count' 0))<line_sep>metrics['failures']=(metrics.get('failures' 0)+kwargs.get('datafile_failure_count' 0))<line_sep>metrics['varbinds']=(metrics.get('varbinds' 0)+kwargs.get('varbind_count' 0))<line_sep>metrics=metrics['variations']<line_sep>metrics=metrics[kwargs['variation']]<line_sep>metrics['calls']=(metrics.get('pdus' 0)+kwargs.get('variation_call_count' 0))<line_sep>metrics['failures']=(metrics.get('failures' 0)+kwargs.get('variation_failure_count' 0))<block_end><except_stmt>KeyError<block_start><return><block_end><block_end><block_end>
|
#**************************************************************************
#* Copyright(c) 1998-2014, ALICE Experiment at CERN, All rights reserved. *
#* *
#* Author: The ALICE Off-line Project. *
#* Contributors are mentioned in the code where appropriate. *
#* *
#* Permission to use, copy, modify and distribute this software and its *
#* documentation strictly for non-commercial purposes is hereby granted *
#* without fee, provided that the above copyright notice appears in all *
#* copies and that both the copyright notice and this permission notice *
#* appear in the supporting documentation. The authors make no claims *
#* about the suitability of this software for any purpose. It is *
#* provided "as is" without express or implied warranty. *
#**************************************************************************
<import_from_stmt>PWGJE.EMCALJetTasks.Tracks.analysis.base.Graphics GraphicsObject SinglePanelPlot<import_from_stmt>ROOT TFile<class_stmt>ComparisonObject(object)<block_start>"""
Base entry type for object inside comparison data
"""<def_stmt>__init__ self data style<block_start>self.__data=data<line_sep>self.__style=style<block_end><def_stmt>GetData self<block_start><return>self.__data<block_end><def_stmt>GetGraphicsObject self<block_start><return>GraphicsObject(self.__data self.__style)<block_end><def_stmt>GetRootPrimitive self<block_start>self.__data.SetName(self.GetObjectName())<line_sep><return>self.__data<block_end><def_stmt>Draw self pad addToLegend=<true><block_start>pad.DrawGraphicsObject(self.GetGraphicsObject() addToLegend self.GetLegendTitle())<block_end><def_stmt>GetLegendTitle self<block_start>"""
To be implemented in inheriting classes
"""<line_sep><return>""<block_end><def_stmt>GetObjectName self<block_start>"""
To be implemented in inheriting classes
"""<line_sep><return>""<block_end><block_end><class_stmt>ComparisonData(object)<block_start>"""
General comparison data collection
"""<def_stmt>__init__ self<block_start>"""
Constructor
"""<line_sep>self.__entries=[]<block_end><def_stmt>GetEntries self<block_start><return>self.__entries<block_end><def_stmt>AddEntry self entry<block_start>self.__entries.append(entry)<block_end><def_stmt>DrawObjects self pad addToLegend=<true><block_start><for_stmt>entry self.__entries<block_start>entry.Draw(pad addToLegend)<block_end><block_end><def_stmt>GetListOfRootObjects self<block_start>"""
Get a list of root-primitive trigger efficiencies
"""<line_sep>rootprimitives=[]<for_stmt>entry self.__entries<block_start>rootprimitives.append(entry.GetRootPrimitive())<block_end><return>rootprimitives<block_end><block_end><class_stmt>ComparisonPlot(SinglePanelPlot)<block_start>"""
General comparison plot type
"""<def_stmt>__init__ self<block_start>"""
Constructor
"""<line_sep>SinglePanelPlot.__init__(self)<line_sep>self.__frame=<none><line_sep>self._comparisonContainer=<none># be specified in inheriting classes
self.__legendAttributes=<none><line_sep>self.__padattributes={"logx":<false> "logy":<false> "gridx":<false> "gridy":<false>}<block_end><def_stmt>SetFrame self frame<block_start>self.__frame=frame<block_end><def_stmt>SetLegendAttributes self xmin ymin xmax ymax<block_start>self.__legendAttributes={"xmin":xmin "xmax":xmax "ymin":ymin "ymax":ymax}<block_end><def_stmt>SetPadAttributes self logx logy gridx gridy<block_start>self.__padattributes["logx"]=logx<line_sep>self.__padattributes["logy"]=logy<line_sep>self.__padattributes["gridx"]=gridx<line_sep>self.__padattributes["gridy"]=gridy<block_end><def_stmt>_Create self canvasname canvastitle<block_start>"""
Make the plot
"""<line_sep>self._OpenCanvas(canvasname canvastitle)<line_sep>pad=self._GetFramedPad()<if_stmt>self.__padattributes["logx"]<block_start>pad.GetPad().SetLogx()<block_end><if_stmt>self.__padattributes["logy"]<block_start>pad.GetPad().SetLogy()<block_end>pad.DrawFrame(self.__frame)<line_sep>doLegend=<false><if_stmt>self.__legendAttributes<block_start>doLegend=<true><block_end>self._comparisonContainer.DrawObjects(pad doLegend)<if_stmt>doLegend<block_start>pad.CreateLegend(self.__legendAttributes["xmin"] self.__legendAttributes["ymin"] self.__legendAttributes["xmax"] self.__legendAttributes["ymax"])<block_end><block_end><def_stmt>WriteData self rootfilename<block_start>"""
Write out trigger efficiency curves to a root file
"""<line_sep>outputfile=TFile(rootfilename "RECREATE")<for_stmt>rootprim self._comparisonContainer.GetListOfRootObjects()<block_start>rootprim.Write()<block_end>outputfile.Close()<block_end><block_end>
|
"""
costs according to supply systems
"""<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>geopandas GeoDataFrame<as>gpdf<import_stmt>itertools<import_stmt>cea.config<import_stmt>cea.inputlocator<import_from_stmt>cea.analysis.costs.equations calc_capex_annualized calc_opex_annualized<line_sep>__author__="<NAME>"<line_sep>__copyright__="Copyright 2020, Architecture and Building Systems - ETH Zurich"<line_sep>__credits__=["<NAME>" " <NAME>"]<line_sep>__license__="MIT"<line_sep>__version__="0.1"<line_sep>__maintainer__="<NAME>"<line_sep>__email__="<EMAIL>"<line_sep>__status__="Production"<def_stmt>costs_main locator config# get local variables
<block_start>capital=config.costs.capital<line_sep>operational=config.costs.operational<line_sep># get demand
demand=pd.read_csv(locator.get_total_demand())<line_sep># get the databases for each main system
cooling_db,hot_water_db,electricity_db,heating_db=get_databases(demand locator)<line_sep># COSTS DUE TO HEATING SERIVICES (EXCEPT HOTWATER)
heating_final_services=['OIL_hs' 'NG_hs' 'WOOD_hs' 'COAL_hs' 'GRID_hs' 'DH_hs']<line_sep>costs_heating_services_dict=calc_costs_per_energy_service(heating_db heating_final_services)<line_sep># COSTS DUE TO HOT WATER SERVICES
hot_water_final_services=['OIL_ww' 'NG_ww' 'WOOD_ww' 'COAL_ww' 'GRID_ww' 'DH_ww']<line_sep>costs_hot_water_services_dict=calc_costs_per_energy_service(hot_water_db hot_water_final_services)<line_sep># COSTS DUE TO COOLING SERVICES
cooling_final_services=['GRID_cs' 'GRID_cdata' 'GRID_cre' 'DC_cs']<line_sep>costs_cooling_services_dict=calc_costs_per_energy_service(cooling_db cooling_final_services)<line_sep># COSTS DUE TO ELECTRICITY SERVICES
electricity_final_services=['GRID_pro' 'GRID_l' 'GRID_aux' 'GRID_v' 'GRID_a' 'GRID_data' 'GRID_ve']<line_sep>costs_electricity_services_dict=calc_costs_per_energy_service(electricity_db electricity_final_services)<line_sep># COMBINE INTO ONE DICT
result=dict(itertools.chain(costs_heating_services_dict.items() costs_hot_water_services_dict.items() costs_cooling_services_dict.items() costs_electricity_services_dict.items()))<line_sep># sum up for all fields
# create a dict to map from the convention of fields to the final variables
mapping_dict={'_capex_total_USD':'Capex_total_sys_USD' '_opex_fixed_USD':'Opex_fixed_sys_USD' '_opex_var_USD':'Opex_var_sys_USD' '_opex_USD':'Opex_sys_USD' # all system annualized
'_capex_a_USD':'Capex_a_sys_USD' '_opex_a_var_USD':'Opex_a_var_sys_USD' '_opex_a_fixed_USD':'Opex_a_fixed_sys_USD' '_opex_a_USD':'Opex_a_sys_USD' '_TAC_USD':'TAC_sys_USD' # building_scale_systems
'_capex_total_building_scale_USD':'Capex_total_sys_building_scale_USD' '_opex_building_scale_USD':'Opex_sys_building_scale_USD' '_capex_a_building_scale_USD':'Capex_a_sys_building_scale_USD' '_opex_a_building_scale_USD':'Opex_a_sys_building_scale_USD' # district_scale_systems
'_capex_total_district_scale_USD':'Capex_total_sys_district_scale_USD' '_opex_district_scale_USD':'Opex_sys_district_scale_USD' '_capex_a_district_scale_USD':'Capex_a_sys_district_scale_USD' '_opex_a_district_scale_USD':'Opex_a_sys_district_scale_USD' # city_scale_systems
'_capex_total_city_scale_USD':'Capex_total_sys_city_scale_USD' '_opex_city_scale_USD':'Opex_sys_city_scale_USD' '_capex_a_city_scale_USD':'Capex_a_sys_city_scale_USD' '_opex_a_city_scale_USD':'Opex_a_sys_city_scale_USD' }<line_sep># initialize the names of the variables in the result to zero
n_buildings=demand.shape[0]<for_stmt>_,value mapping_dict.items()<block_start>result[value]=np.zeros(n_buildings)<block_end># loop inside the results and sum the results
<for_stmt>field result.keys()<block_start><for_stmt>key,value mapping_dict.items()<block_start><if_stmt>key<in>field<block_start>result[value]<augadd>result[field]<block_end><block_end><block_end># add name and create dataframe
result.update({'Name':demand.Name.values})<line_sep>result_out=pd.DataFrame(result)<line_sep># save dataframe
result_out.to_csv(locator.get_costs_operation_file() index=<false> float_format='%.2f' na_rep='nan')<block_end><def_stmt>calc_costs_per_energy_service database heating_services<block_start>result={}<for_stmt>service heating_services# TOTALS
<block_start>result[service+'_capex_total_USD']=(database[service+'0_kW'].values<times>database['efficiency'].values<times># because it is based on the end use
database['CAPEX_USD2015kW'].values)<line_sep>result[service+'_opex_fixed_USD']=(result[service+'_capex_total_USD']<times>database['O&M_%'].values/100)<line_sep>result[service+'_opex_var_USD']=database[service+'_MWhyr'].values<times>database['Opex_var_buy_USD2015kWh'].values<times>1000<line_sep>result[service+'_opex_USD']=result[service+'_opex_fixed_USD']+result[service+'_opex_var_USD']<line_sep># ANNUALIZED
result[service+'_capex_a_USD']=np.vectorize(calc_capex_annualized)(result[service+'_capex_total_USD'] database['IR_%'] database['LT_yr'])<line_sep>result[service+'_opex_a_fixed_USD']=np.vectorize(calc_opex_annualized)(result[service+'_opex_fixed_USD'] database['IR_%'] database['LT_yr'])<line_sep>result[service+'_opex_a_var_USD']=np.vectorize(calc_opex_annualized)(result[service+'_opex_var_USD'] database['IR_%'] database['LT_yr'])<line_sep>result[service+'_opex_a_USD']=np.vectorize(calc_opex_annualized)(result[service+'_opex_USD'] database['IR_%'] database['LT_yr'])<line_sep>result[service+'_TAC_USD']=result[service+'_opex_a_USD']+result[service+'_capex_a_USD']<line_sep># GET CONNECTED AND DISCONNECTED
<for_stmt>field ['_capex_total_USD' '_capex_a_USD' '_opex_USD' '_opex_a_USD']<block_start>field_district=field.split("_USD")[0]+"_district_scale_USD"<line_sep>field_building_scale=field.split("_USD")[0]+"_building_scale_USD"<line_sep>field_city_scale=field.split("_USD")[0]+"_city_scale_USD"<line_sep>result[service+field_district],result[service+field_building_scale],result[service+field_city_scale]=np.vectorize(calc_scale_costs)(result[service+field] database['scale'])<block_end><block_end><return>result<block_end><def_stmt>calc_scale_costs value flag_scale<block_start><if_stmt>flag_scale<eq>"BUILDING"<block_start>district=0.0<line_sep>building=value<line_sep>city=0.0<block_end><elif_stmt>flag_scale<eq>"DISTRICT"<block_start>district=value<line_sep>building=0.0<line_sep>city=0.0<block_end><elif_stmt>flag_scale<eq>"CITY"<block_start>district=0.0<line_sep>building=0.0<line_sep>city=value<block_end><elif_stmt>flag_scale<eq>"NONE"<block_start><if_stmt>value<eq>0.0<or>np.isnan(value)<block_start>district=0.0<line_sep>building=0.0<line_sep>city=0.0<block_end><else_stmt><block_start><raise>ValueError("the scale is NONE but somehow there is a cost here?"<concat>" the inputs of SUPPLY database may be wrong")<block_end><block_end><else_stmt><block_start><raise>ValueError("the scale in the system is {}, this is not a valid argument"<concat>"valid arguments are CITY, DISTRICT, BUILDING, NONE".format(flag_scale))<block_end><return>district building city<block_end><def_stmt>get_databases demand locator<block_start>supply_systems=gpdf.from_file(locator.get_building_supply()).drop('geometry' axis=1)<line_sep>data_all_in_one_systems=pd.read_excel(locator.get_database_supply_assemblies() sheet_name=<none>)<line_sep>factors_heating=data_all_in_one_systems['HEATING']<line_sep>factors_dhw=data_all_in_one_systems['HOT_WATER']<line_sep>factors_cooling=data_all_in_one_systems['COOLING']<line_sep>factors_electricity=data_all_in_one_systems['ELECTRICITY']<line_sep>factors_resources=pd.read_excel(locator.get_database_feedstocks() sheet_name=<none>)<line_sep># get the mean of all values for this
factors_resources_simple=[(name values['Opex_var_buy_USD2015kWh'].mean())<for>name,values factors_resources.items()]<line_sep>factors_resources_simple=pd.DataFrame(factors_resources_simple columns=['code' 'Opex_var_buy_USD2015kWh']).append(# append NONE choice with zero values
{'code':'NONE'} ignore_index=<true>).fillna(0)<line_sep># local variables
# calculate the total operational non-renewable primary energy demand and CO2 emissions
## create data frame for each type of end use energy containing the type of supply system use, the final energy
## demand and the primary energy and emissions factors for each corresponding type of supply system
heating_costs=factors_heating.merge(factors_resources_simple left_on='feedstock' right_on='code')[['code_x' 'feedstock' 'scale' 'efficiency' 'Opex_var_buy_USD2015kWh' 'CAPEX_USD2015kW' 'LT_yr' 'O&M_%' 'IR_%']]<line_sep>cooling_costs=factors_cooling.merge(factors_resources_simple left_on='feedstock' right_on='code')[['code_x' 'feedstock' 'scale' 'efficiency' 'Opex_var_buy_USD2015kWh' 'CAPEX_USD2015kW' 'LT_yr' 'O&M_%' 'IR_%']]<line_sep>dhw_costs=factors_dhw.merge(factors_resources_simple left_on='feedstock' right_on='code')[['code_x' 'feedstock' 'scale' 'efficiency' 'Opex_var_buy_USD2015kWh' 'CAPEX_USD2015kW' 'LT_yr' 'O&M_%' 'IR_%']]<line_sep>electricity_costs=factors_electricity.merge(factors_resources_simple left_on='feedstock' right_on='code')[['code_x' 'feedstock' 'scale' 'efficiency' 'Opex_var_buy_USD2015kWh' 'CAPEX_USD2015kW' 'LT_yr' 'O&M_%' 'IR_%']]<line_sep>heating=supply_systems.merge(demand on='Name').merge(heating_costs left_on='type_hs' right_on='code_x')<line_sep>dhw=supply_systems.merge(demand on='Name').merge(dhw_costs left_on='type_dhw' right_on='code_x')<line_sep>cooling=supply_systems.merge(demand on='Name').merge(cooling_costs left_on='type_cs' right_on='code_x')<line_sep>electricity=supply_systems.merge(demand on='Name').merge(electricity_costs left_on='type_el' right_on='code_x')<line_sep><return>cooling dhw electricity heating<block_end><def_stmt>main config<block_start>locator=cea.inputlocator.InputLocator(scenario=config.scenario)<line_sep>print('Running system-costs with scenario = %s'%config.scenario)<line_sep>costs_main(locator=locator config=config)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main(cea.config.Configuration())<block_end>
|
"""Microsoft Azure VM extension event.
This module defines the :class:`AzVMExtensionEvent` class that
evaluates Azure VM extensions. This plugin works on the virtual
machine properties found in the ``ext`` bucket of ``vm_instance_view``
records.
"""<import_stmt>logging<import_from_stmt>cloudmarker util<line_sep>_log=logging.getLogger(__name__)<class_stmt>AzVMExtensionEvent<block_start>"""Az VM Data extension event plugin."""<def_stmt>__init__ self whitelisted=<none> blacklisted=<none> required=<none><block_start>"""Create an instance of :class:`AzVMExtensionEvent`.
Arguments:
whitelisted (list): List of whitelisted extensions.
blacklisted (list): List of blacklisted extensions.
required (list): List of required extensions.
"""<if_stmt>whitelisted<is><none><block_start>whitelisted=[]<block_end><if_stmt>blacklisted<is><none><block_start>blacklisted=[]<block_end><if_stmt>required<is><none><block_start>required=[]<block_end>self._whitelisted=whitelisted<line_sep>self._blacklisted=blacklisted<line_sep>self._required=required<block_end><def_stmt>eval self record<block_start>"""Evaluate Azure virtual machine for extensions.
Arguments:
record (dict): A virtual machine record.
Yields:
dict: An event record representing an Azure VM with
misconfigured extensions
"""<line_sep>com=record.get('com' {})<if_stmt>com<is><none><block_start><return><block_end><if_stmt>com.get('cloud_type')<ne>'azure'<block_start><return><block_end>ext=record.get('ext' {})<if_stmt>ext<is><none><block_start><return><block_end><if_stmt>ext.get('record_type')<ne>'vm_instance_view'<block_start><return><block_end>extensions=ext.get('extensions')<line_sep>added_extensions=set(extensions)<if_stmt>self._blacklisted<block_start>added_blacklisted_ext=list(set(self._blacklisted)&added_extensions)<line_sep><yield><from>_get_azure_vm_blacklisted_extension_event(com ext added_blacklisted_ext)<block_end><if_stmt>self._whitelisted<block_start>added_unapproved_ext=list(added_extensions-(set(self._whitelisted)-set(self._blacklisted)))<line_sep><yield><from>_get_azure_vm_unapproved_extension_event(com ext added_unapproved_ext)<block_end><if_stmt>self._required<block_start>missing_required_ext=list((set(self._required)-set(self._blacklisted))-added_extensions)<line_sep><yield><from>_get_azure_vm_required_extension_event(com ext missing_required_ext)<block_end><block_end><def_stmt>done self<block_start>"""Perform cleanup work.
Currently, this method does nothing. This may change in future.
"""<block_end><block_end><def_stmt>_get_azure_vm_blacklisted_extension_event com ext blacklisted<block_start>"""Evaluate Azure VM for blacklisted extensions.
Arguments:
com (dict): Virtual machine record `com` bucket
ext (dict): Virtual machine record `ext` bucket
blacklisted (list): Added blacklisted extension list
Returns:
dict: An event record representing VM with blacklisted extenstions
"""<if_stmt><not>blacklisted<block_start><return><block_end>friendly_cloud_type=util.friendly_string(com.get('cloud_type'))<line_sep>reference=com.get('reference')<line_sep>description=('{} virtual machine {} has blacklisted extensions {}'.format(friendly_cloud_type reference util.friendly_list(blacklisted)))<line_sep>recommendation=('Check {} virtual machine {} and remove blacklisted extensions {}'.format(friendly_cloud_type reference util.friendly_list(blacklisted)))<line_sep>event_record={# Preserve the extended properties from the virtual
# machine record because they provide useful context to
# locate the virtual machine that led to the event.
'ext':util.merge_dicts(ext {'record_type':'vm_blacklisted_extension_event'}) 'com':{'cloud_type':com.get('cloud_type') 'record_type':'vm_blacklisted_extension_event' 'reference':reference 'description':description 'recommendation':recommendation }}<line_sep>_log.info('Generating vm_blacklisted_extension_event; %r' event_record)<line_sep><yield>event_record<block_end><def_stmt>_get_azure_vm_unapproved_extension_event com ext not_whitelisted<block_start>"""Evaluate Azure VM for unapproved extensions.
Arguments:
com (dict): Virtual machine record `com` bucket
ext (dict): Virtual machine record `ext` bucket
not_whitelisted (list): Not whitelisted extension list
Returns:
dict: An event record representing VM with unapproved extenstions
"""<if_stmt><not>not_whitelisted<block_start><return><block_end>friendly_cloud_type=util.friendly_string(com.get('cloud_type'))<line_sep>reference=com.get('reference')<line_sep>description=('{} virtual machine {} has unapproved extensions {}'.format(friendly_cloud_type reference util.friendly_list(not_whitelisted)))<line_sep>recommendation=('Check {} virtual machine {} and remove unapproved extensions {}'.format(friendly_cloud_type reference util.friendly_list(not_whitelisted)))<line_sep>event_record={# Preserve the extended properties from the virtual
# machine record because they provide useful context to
# locate the virtual machine that led to the event.
'ext':util.merge_dicts(ext {'record_type':'vm_unapproved_extension_event'}) 'com':{'cloud_type':com.get('cloud_type') 'record_type':'vm_unapproved_extension_event' 'reference':reference 'description':description 'recommendation':recommendation }}<line_sep>_log.info('Generating vm_unapproved_extension_event; %r' event_record)<line_sep><yield>event_record<block_end><def_stmt>_get_azure_vm_required_extension_event com ext missing_required<block_start>"""Evaluate Azure VM for unapproved extensions.
Arguments:
com (dict): Virtual machine record `com` bucket
ext (dict): Virtual machine record `ext` bucket
missing_required (list): Missing required extension list
Returns:
dict: An event record representing VM with unapproved extenstions
"""<if_stmt><not>missing_required<block_start><return><block_end>friendly_cloud_type=util.friendly_string(com.get('cloud_type'))<line_sep>reference=com.get('reference')<line_sep>description=('{} virtual machine {} is missing required extensions {}'.format(friendly_cloud_type reference util.friendly_list(missing_required)))<line_sep>recommendation=('Check {} virtual machine {} and add required extensions {}'.format(friendly_cloud_type reference util.friendly_list(missing_required)))<line_sep>event_record={# Preserve the extended properties from the virtual
# machine record because they provide useful context to
# locate the virtual machine that led to the event.
'ext':util.merge_dicts(ext {'record_type':'vm_required_extension_event'}) 'com':{'cloud_type':com.get('cloud_type') 'record_type':'vm_required_extension_event' 'reference':reference 'description':description 'recommendation':recommendation }}<line_sep>_log.info('Generating vm_required_extension_event; %r' event_record)<line_sep><yield>event_record<block_end>
|
<import_from_stmt>HTMLParser HTMLParser<class_stmt>HTMLValidator(HTMLParser)<block_start>"""
super simple html validator : check that each opening tag is closed
with respect to tag hierarchy
"""<def_stmt>__init__ self<block_start>HTMLParser.__init__(self)<block_end><def_stmt>handle_starttag self tag attrs<block_start>self.tag_stack.append(tag)<block_end><def_stmt>handle_endtag self tag<block_start><try_stmt><block_start>open_tag=self.tag_stack.pop()<assert_stmt>open_tag<eq>tag<block_end><except_stmt>IndexError<block_start><raise>Exception("found an end tag but there was no more opened ones")<block_end><except_stmt>AssertionError<block_start><raise>Exception("mismatch between opened tag {} and closing tag {}".format(open_tag tag))<block_end><block_end><def_stmt>feed self data<block_start>self.tag_stack=[]<line_sep>HTMLParser.feed(self data)<block_end><block_end>
|
<import_from_stmt>malaya_boilerplate.utils available_device available_gpu close_session describe_availability <import_from_stmt>malaya_boilerplate.frozen_graph nodes_session generate_session get_device <import_from_stmt>malaya_boilerplate backblaze<import_from_stmt>malaya_boilerplate frozen_graph<import_from_stmt>malaya_boilerplate utils<import_from_stmt>malaya_speech package url<def_stmt>print_cache location=<none><block_start><return>utils.print_cache(package=package location=location)<block_end><def_stmt>delete_cache location<block_start><return>utils.delete_cache(package=package location=location)<block_end><def_stmt>delete_all_cache <block_start><return>utils.delete_all_cache(package=package)<block_end><def_stmt>check_file file s3_file=<none> **kwargs<block_start><return>backblaze.check_file(file package url s3_file=s3_file **kwargs)<block_end><def_stmt>load_graph frozen_graph_filename **kwargs<block_start><return>frozen_graph.load_graph(package frozen_graph_filename **kwargs)<block_end><import_from_stmt>. arange<import_from_stmt>. aligner<import_from_stmt>. astype<import_from_stmt>. char<import_from_stmt>. combine<import_from_stmt>. constant<import_from_stmt>. dist<import_from_stmt>. featurization<import_from_stmt>. generator<import_from_stmt>. griffin_lim<import_from_stmt>. group<import_from_stmt>. metrics<import_from_stmt>. outlier<import_from_stmt>. padding<import_from_stmt>. read<import_from_stmt>. speechsplit<import_from_stmt>. split<import_from_stmt>. text<import_from_stmt>. subword<import_from_stmt>. text<import_from_stmt>. tf_featurization<import_from_stmt>. validator<line_sep>
|
# Copyright 2021 The Deluca Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""deluca.agents._gpc"""<import_from_stmt>numbers Real<import_from_stmt>typing Callable<import_stmt>jax<import_stmt>jax.numpy<as>jnp<import_stmt>numpy<as>np<import_from_stmt>jax grad<import_from_stmt>jax jit<import_from_stmt>deluca.agents._lqr LQR<import_from_stmt>deluca.agents.core Agent<def_stmt>quad_loss x:jnp.ndarray u:jnp.ndarray<arrow>Real<block_start>"""
Quadratic loss.
Args:
x (jnp.ndarray):
u (jnp.ndarray):
Returns:
Real
"""<line_sep><return>jnp.sum(x.T@x+u.T@u)<block_end><class_stmt>GPC(Agent)<block_start><def_stmt>__init__ self A:jnp.ndarray B:jnp.ndarray Q:jnp.ndarray=<none> R:jnp.ndarray=<none> K:jnp.ndarray=<none> start_time:int=0 cost_fn:Callable[[jnp.ndarray jnp.ndarray] Real]=<none> H:int=3 HH:int=2 lr_scale:Real=0.005 decay:bool=<true> <arrow><none><block_start>"""
Description: Initialize the dynamics of the model.
Args:
A (jnp.ndarray): system dynamics
B (jnp.ndarray): system dynamics
Q (jnp.ndarray): cost matrices (i.e. cost = x^TQx + u^TRu)
R (jnp.ndarray): cost matrices (i.e. cost = x^TQx + u^TRu)
K (jnp.ndarray): Starting policy (optional). Defaults to LQR gain.
start_time (int):
cost_fn (Callable[[jnp.ndarray, jnp.ndarray], Real]):
H (postive int): history of the controller
HH (positive int): history of the system
lr_scale (Real):
lr_scale_decay (Real):
decay (Real):
"""<line_sep>cost_fn=cost_fn<or>quad_loss<line_sep>d_state,d_action=B.shape# State & Action Dimensions
self.A,self.B=A B# System Dynamics
self.t=0# Time Counter (for decaying learning rate)
self.H,self.HH=H HH<line_sep>self.lr_scale,self.decay=lr_scale decay<line_sep>self.bias=0<line_sep># Model Parameters
# initial linear policy / perturbation contributions / bias
# TODO: need to address problem of LQR with jax.lax.scan
self.K=K<if>K<is><not><none><else>LQR(self.A self.B Q R).K<line_sep>self.M=jnp.zeros((H d_action d_state))<line_sep># Past H + HH noises ordered increasing in time
self.noise_history=jnp.zeros((H+HH d_state 1))<line_sep># past state and past action
self.state,self.action=jnp.zeros((d_state 1)) jnp.zeros((d_action 1))<def_stmt>last_h_noises <block_start>"""Get noise history"""<line_sep><return>jax.lax.dynamic_slice_in_dim(self.noise_history -H H)<block_end>self.last_h_noises=last_h_noises<def_stmt>policy_loss M w<block_start>"""Surrogate cost function"""<def_stmt>action state h<block_start>"""Action function"""<line_sep><return>-self.K@state+jnp.tensordot(M jax.lax.dynamic_slice_in_dim(w h H) axes=([0 2] [0 1]))<block_end><def_stmt>evolve state h<block_start>"""Evolve function"""<line_sep><return>self.A@state+self.B@action(state h)+w[h+H] <none><block_end>final_state,_=jax.lax.scan(evolve np.zeros((d_state 1)) np.arange(H-1))<line_sep><return>cost_fn(final_state action(final_state HH-1))<block_end>self.policy_loss=policy_loss<line_sep>self.grad=jit(grad(policy_loss (0 1)))<block_end><def_stmt>__call__ self state:jnp.ndarray<arrow>jnp.ndarray<block_start>"""
Description: Return the action based on current state and internal parameters.
Args:
state (jnp.ndarray): current state
Returns:
jnp.ndarray: action to take
"""<line_sep>action=self.get_action(state)<line_sep>self.update(state action)<line_sep><return>action<block_end><def_stmt>update self state:jnp.ndarray u:jnp.ndarray<arrow><none><block_start>"""
Description: update agent internal state.
Args:
state (jnp.ndarray):
Returns:
None
"""<line_sep>[email protected]@u<line_sep>self.noise_history=self.noise_history.at[0].set(noise)<line_sep>self.noise_history=jnp.roll(self.noise_history -1 axis=0)<line_sep>delta_M,delta_bias=self.grad(self.M self.noise_history)<line_sep>lr=self.lr_scale<line_sep>lr<augmul>(1/(self.t+1))<if>self.decay<else>1<line_sep>self.M<augsub>lr<times>delta_M<line_sep>self.bias<augsub>lr<times>delta_bias<line_sep># update state
self.state=state<line_sep>self.t<augadd>1<block_end><def_stmt>get_action self state:jnp.ndarray<arrow>jnp.ndarray<block_start>"""
Description: get action from state.
Args:
state (jnp.ndarray):
Returns:
jnp.ndarray
"""<line_sep><return>-self.K@state+jnp.tensordot(self.M self.last_h_noises() axes=([0 2] [0 1]))<block_end><block_end>
|
# Copyright 2017 Adobe. All rights reserved.
"""
Builds a CFF2 variable font from a designspace file and its UFO masters.
"""<import_stmt>argparse<import_from_stmt>ast literal_eval<import_from_stmt>copy deepcopy<import_stmt>logging<import_stmt>os<import_stmt>re<import_stmt>sys<import_from_stmt>fontTools varLib<import_from_stmt>fontTools.cffLib.specializer commandsToProgram<import_from_stmt>fontTools.designspaceLib DesignSpaceDocument<import_from_stmt>fontTools.misc.fixedTools otRound<import_from_stmt>fontTools.misc.psCharStrings T2OutlineExtractor T2CharString<import_from_stmt>fontTools.ttLib TTFont<import_from_stmt>fontTools.varLib.cff CFF2CharStringMergePen VarLibCFFPointTypeMergeError <import_from_stmt>afdko.fdkutils validate_path<line_sep>__version__='2.0.2'<line_sep>STAT_FILENAME='override.STAT.ttx'<class_stmt>CFF2VFError(Exception)<block_start>"""Base exception for buildcff2vf"""<block_end># set up for printing progress notes
<def_stmt>progress self message *args **kws# Note: message must contain the format specifiers for any strings in args.
<block_start>level=self.getEffectiveLevel()<line_sep>self._log(level message args **kws)<block_end>PROGRESS_LEVEL=logging.INFO+5<line_sep>PROGESS_NAME="progress"<line_sep>logging.addLevelName(PROGRESS_LEVEL PROGESS_NAME)<line_sep>logger=logging.getLogger(__name__)<line_sep>logging.Logger.progress=progress<def_stmt>getSubset subset_Path<block_start><with_stmt>open(subset_Path "rt")<as>fp<block_start>text_lines=fp.readlines()<block_end>locationDict={}<line_sep>cur_key_list=<none><for_stmt>li,line enumerate(text_lines)<block_start>idx=line.find('#')<if_stmt>idx<ge>0<block_start>line=line[:idx]<block_end>line=line.strip()<if_stmt><not>line<block_start><continue><block_end><if_stmt>line[0]<eq>"("<block_start>cur_key_list=[]<line_sep>location_list=literal_eval(line)<for_stmt>location_entry location_list<block_start>cur_key_list.append(location_entry)<if_stmt>location_entry<not><in>locationDict<block_start>locationDict[location_entry]=[]<block_end><block_end><block_end><else_stmt><block_start>m=re.match(r"(\S+)" line)<if_stmt>m<block_start><if_stmt>cur_key_list<is><none><block_start>logger.error("Error parsing subset file. "<concat>"Seeing a glyph name record before "<concat>"seeing a location record.")<line_sep>logger.error(f'Line number: {li}.')<line_sep>logger.error(f'Line text: {line}.')<block_end><for_stmt>key cur_key_list<block_start>locationDict[key].append(m.group(1))<block_end><block_end><block_end><block_end><return>locationDict<block_end><def_stmt>subset_masters designspace subsetDict<block_start><import_from_stmt>fontTools subset<line_sep>subset_options=subset.Options(notdef_outline=<true> layout_features='*')<for_stmt>ds_source designspace.sources<block_start>key=tuple(ds_source.location.items())<line_sep>included=set(subsetDict[key])<line_sep>ttf_font=ds_source.font<line_sep>subsetter=subset.Subsetter(options=subset_options)<line_sep>subsetter.populate(glyphs=included)<line_sep>subsetter.subset(ttf_font)<line_sep>subset_path=f'{os.path.splitext(ds_source.path)[0]}.subset.otf'<line_sep>logger.progress(f'Saving subset font {subset_path}')<line_sep>ttf_font.save(subset_path)<line_sep>ds_source.font=TTFont(subset_path)<block_end><block_end><class_stmt>CompatibilityPen(CFF2CharStringMergePen)<block_start><def_stmt>__init__ self default_commands glyphName num_masters master_idx roundTolerance=0.5<block_start>super(CompatibilityPen self).__init__(default_commands glyphName num_masters master_idx roundTolerance=0.5)<line_sep>self.fixed=<false><block_end><def_stmt>add_point self point_type pt_coords<block_start><if_stmt>self.m_index<eq>0<block_start>self._commands.append([point_type [pt_coords]])<block_end><else_stmt><block_start>cmd=self._commands[self.pt_index]<if_stmt>cmd[0]<ne>point_type# Fix some issues that show up in some
# CFF workflows, even when fonts are
# topologically merge compatible.
<block_start>success,new_pt_coords=self.check_and_fix_flat_curve(cmd point_type pt_coords)<if_stmt>success<block_start>logger.progress(f"Converted between line and curve in "<concat>f"source font index '{self.m_index}' "<concat>f"glyph '{self.glyphName}', point index "<concat>f"'{self.pt_index}' at '{pt_coords}'. "<concat>f"Please check correction.")<line_sep>pt_coords=new_pt_coords<block_end><else_stmt><block_start>success=self.check_and_fix_closepath(cmd point_type pt_coords)<if_stmt>success# We may have incremented self.pt_index
<block_start>cmd=self._commands[self.pt_index]<if_stmt>cmd[0]<ne>point_type<block_start>success=<false><block_end><block_end><block_end><if_stmt><not>success<block_start><raise>VarLibCFFPointTypeMergeError(point_type self.pt_index self.m_index cmd[0] self.glyphName)<block_end>self.fixed=<true><block_end>cmd[1].append(pt_coords)<block_end>self.pt_index<augadd>1<block_end><def_stmt>make_flat_curve self cur_coords# Convert line coords to curve coords.
<block_start>dx=self.round(cur_coords[0]/3.0)<line_sep>dy=self.round(cur_coords[1]/3.0)<line_sep>new_coords=[dx dy dx dy cur_coords[0]-2<times>dx cur_coords[1]-2<times>dy]<line_sep><return>new_coords<block_end><def_stmt>make_curve_coords self coords is_default# Convert line coords to curve coords.
<block_start><if_stmt>is_default<block_start>new_coords=[]<for_stmt>cur_coords coords<block_start>master_coords=self.make_flat_curve(cur_coords)<line_sep>new_coords.append(master_coords)<block_end><block_end><else_stmt><block_start>cur_coords=coords<line_sep>new_coords=self.make_flat_curve(cur_coords)<block_end><return>new_coords<block_end><def_stmt>check_and_fix_flat_curve self cmd point_type pt_coords<block_start>success=<false><if_stmt>(point_type<eq>'rlineto')<and>(cmd[0]<eq>'rrcurveto')<block_start>is_default=<false># the line is in the master font we are adding
pt_coords=self.make_curve_coords(pt_coords is_default)<line_sep>success=<true><block_end><elif_stmt>(point_type<eq>'rrcurveto')<and>(cmd[0]<eq>'rlineto')<block_start>is_default=<true># the line is in the default font commands
expanded_coords=self.make_curve_coords(cmd[1] is_default)<line_sep>cmd[1]=expanded_coords<line_sep>cmd[0]=point_type<line_sep>success=<true><block_end><return>success pt_coords<block_end><def_stmt>check_and_fix_closepath self cmd point_type pt_coords<block_start>""" Some workflows drop a lineto which closes a path.
Also, if the last segment is a curve in one master,
and a flat curve in another, the flat curve can get
converted to a closing lineto, and then dropped.
Test if:
1) one master op is a moveto,
2) the previous op for this master does not close the path
3) in the other master the current op is not a moveto
4) the current op in the otehr master closes the current path
If the default font is missing the closing lineto, insert it,
then proceed with merging the current op and pt_coords.
If the current region is missing the closing lineto
and therefore the current op is a moveto,
then add closing coordinates to self._commands,
and increment self.pt_index.
Note that if this may insert a point in the default font list,
so after using it, 'cmd' needs to be reset.
return True if we can fix this issue.
"""<if_stmt>point_type<eq>'rmoveto'# If this is the case, we know that cmd[0] != 'rmoveto'
# The previous op must not close the path for this region font.
<block_start>prev_moveto_coords=self._commands[self.prev_move_idx][1][-1]<line_sep>prv_coords=self._commands[self.pt_index-1][1][-1]<if_stmt>prev_moveto_coords<eq>prv_coords[-2:]<block_start><return><false><block_end># The current op must close the path for the default font.
prev_moveto_coords2=self._commands[self.prev_move_idx][1][0]<line_sep>prv_coords=self._commands[self.pt_index][1][0]<if_stmt>prev_moveto_coords2<ne>prv_coords[-2:]<block_start><return><false><block_end># Add the closing line coords for this region
# so self._commands, then increment self.pt_index
# so that the current region op will get merged
# with the next default font moveto.
<if_stmt>cmd[0]<eq>'rrcurveto'<block_start>new_coords=self.make_curve_coords(prev_moveto_coords <false>)<line_sep>cmd[1].append(new_coords)<block_end>self.pt_index<augadd>1<line_sep><return><true><block_end><if_stmt>cmd[0]<eq>'rmoveto'# The previous op must not close the path for the default font.
<block_start>prev_moveto_coords=self._commands[self.prev_move_idx][1][0]<line_sep>prv_coords=self._commands[self.pt_index-1][1][0]<if_stmt>prev_moveto_coords<eq>prv_coords[-2:]<block_start><return><false><block_end># The current op must close the path for this region font.
prev_moveto_coords2=self._commands[self.prev_move_idx][1][-1]<if_stmt>prev_moveto_coords2<ne>pt_coords[-2:]<block_start><return><false><block_end># Insert the close path segment in the default font.
# We omit the last coords from the previous moveto
# is it will be supplied by the current region point.
# after this function returns.
new_cmd=[point_type <none>]<line_sep>prev_move_coords=self._commands[self.prev_move_idx][1][:-1]<line_sep># Note that we omit the last region's coord from prev_move_coords,
# as that is from the current region, and we will add the
# current pts' coords from the current region in its place.
<if_stmt>point_type<eq>'rlineto'<block_start>new_cmd[1]=prev_move_coords<block_end><else_stmt># We omit the last set of coords from the
# previous moveto, as it will be supplied by the coords
# for the current region pt.
<block_start>new_cmd[1]=self.make_curve_coords(prev_move_coords <true>)<block_end>self._commands.insert(self.pt_index new_cmd)<line_sep><return><true><block_end><return><false><block_end><def_stmt>getCharStrings self num_masters private=<none> globalSubrs=<none> default_idx=0<block_start>""" A command looks like:
[op_name, [
[source 0 arglist for op],
[source 1 arglist for op],
...
[source n arglist for op],
I am not optimizing this there, as that will be done when
the CFF2 Charstring is created in fontTools.varLib.build().
If I did, I would have to rearrange the arguments to:
[
[arg 0 for source 0 ... arg 0 for source n]
[arg 1 for source 0 ... arg 1 for source n]
...
[arg M for source 0 ... arg M for source n]
]
before calling specialize.
"""<line_sep>t2List=[]<line_sep>merged_commands=self._commands<for_stmt>i range(num_masters)<block_start>commands=[]<for_stmt>op merged_commands<block_start>source_op=[op[0] op[1][i]]<line_sep>commands.append(source_op)<block_end>program=commandsToProgram(commands)<if_stmt>self._width<is><not><none><block_start><assert_stmt><not>self._CFF2 ("CFF2 does not allow encoding glyph width in CharString.")<line_sep>program.insert(0 otRound(self._width))<block_end><if_stmt><not>self._CFF2<block_start>program.append('endchar')<block_end>charString=T2CharString(program=program private=private globalSubrs=globalSubrs)<line_sep>t2List.append(charString)<block_end># if default_idx is not 0, we need to move it to the right index.
<if_stmt>default_idx<block_start>default_font_cs=t2List.pop(0)<line_sep>t2List.insert(default_idx default_font_cs)<block_end><return>t2List<block_end><block_end><def_stmt>_get_cs charstrings glyphName<block_start><if_stmt>glyphName<not><in>charstrings<block_start><return><none><block_end><return>charstrings[glyphName]<block_end><def_stmt>do_compatibility vf master_fonts default_idx<block_start>default_font=vf<line_sep>default_charStrings=default_font['CFF '].cff.topDictIndex[0].CharStrings<line_sep>glyphOrder=default_font.getGlyphOrder()<line_sep>charStrings=[font['CFF '].cff.topDictIndex[0].CharStrings<for>font master_fonts]<for_stmt>gname glyphOrder<block_start>all_cs=[_get_cs(cs gname)<for>cs charStrings]<if_stmt>len([gs<for>gs all_cs<if>gs<is><not><none>])<l>2<block_start><continue><block_end># remove the None's from the list.
cs_list=[cs<for>cs all_cs<if>cs]<line_sep>num_masters=len(cs_list)<line_sep>default_charstring=default_charStrings[gname]<line_sep>compat_pen=CompatibilityPen([] gname num_masters 0)<line_sep>default_charstring.outlineExtractor=T2OutlineExtractor<line_sep>default_charstring.draw(compat_pen)<line_sep># Add the coordinates from all the other regions to the
# blend lists in the CFF2 charstring.
region_cs=cs_list[:]<del_stmt>region_cs[default_idx]<for_stmt>region_idx,region_charstring enumerate(region_cs start=1)<block_start>compat_pen.restart(region_idx)<line_sep>region_charstring.draw(compat_pen)<block_end><if_stmt>compat_pen.fixed<block_start>fixed_cs_list=compat_pen.getCharStrings(num_masters private=default_charstring.private globalSubrs=default_charstring.globalSubrs default_idx=default_idx)<line_sep>cs_list=list(cs_list)<for_stmt>i,cs enumerate(cs_list)<block_start>mi=all_cs.index(cs)<line_sep>charStrings[mi][gname]=fixed_cs_list[i]<block_end><block_end><block_end><block_end><def_stmt>otfFinder s<block_start><return>s.replace('.ufo' '.otf')<block_end><def_stmt>suppress_glyph_names tt_font<block_start>postTable=tt_font['post']<line_sep>postTable.formatType=3.0<line_sep>postTable.compile(tt_font)<block_end><def_stmt>remove_mac_names tt_font<block_start>name_tb=tt_font['name']<line_sep>name_tb.names=[nr<for>nr name_tb.names<if>nr.platformID<ne>1]<block_end><def_stmt>update_stat_name_ids tt_font<block_start>"""
The STAT spec says that axes must point to the same name ID used
in the fvar so check here and update if they are different.
"""<line_sep>fvar=tt_font['fvar']<line_sep>stat=tt_font['STAT']<line_sep>fvar_axis_names={}<for_stmt>axis fvar.axes<block_start>fvar_axis_names[axis.axisTag]=axis.axisNameID<block_end><for_stmt>axis stat.table.DesignAxisRecord.Axis<block_start>fvar_id=fvar_axis_names.get(axis.AxisTag)<if_stmt>fvar_id<is><none># Not required for all STAT axes to be in fvar
<block_start><continue><block_end><if_stmt>axis.AxisNameID<ne>fvar_id<block_start>axis.AxisNameID=fvar_id<block_end><block_end><block_end><def_stmt>validate_stat_axes tt_font<block_start>"""
Ensure all axes defined in fvar also exist in the STAT table
"""<line_sep>fvar=tt_font['fvar']<line_sep>stat=tt_font['STAT']<line_sep>fvar_axis_tags=[axis.axisTag<for>axis fvar.axes]<line_sep>stat_axis_tags=[axis.AxisTag<for>axis stat.table.DesignAxisRecord.Axis]<line_sep>diff=set(fvar_axis_tags)-set(stat_axis_tags)<if_stmt>diff<block_start><raise>CFF2VFError(f'All fvar axes must also be defined in the STAT table. '<concat>f'Axes for {str(list(diff))} are missing.')<block_end><block_end><def_stmt>validate_stat_values ttFont<block_start>"""
Check axis values in the STAT table to ensure they are within the ranges
defined in the fvar
"""<line_sep>fvar=ttFont['fvar']<line_sep>stat=ttFont['STAT']<line_sep>logger.progress('Validating STAT axis values...')<line_sep>errors=[]<line_sep>stat_range_vals={}<if_stmt>hasattr(stat.table "AxisValueArray.AxisValue")<block_start><for_stmt>av stat.table.AxisValueArray.AxisValue<block_start>axis_tag=stat.table.DesignAxisRecord.Axis[av.AxisIndex].AxisTag<if_stmt>axis_tag<not><in>stat_range_vals<block_start>stat_range_vals[axis_tag]=[]<block_end><if_stmt>hasattr(av 'NominalValue')<block_start>stat_range_vals[axis_tag].append(av.NominalValue)<if_stmt><not>av.RangeMinValue<le>av.NominalValue<le>av.RangeMaxValue<block_start>errors.append(f'Invalid default value {av.NominalValue} for range '<concat>f'{av.RangeMinValue} - {av.RangeMaxValue}')<block_end><block_end><if_stmt>hasattr(av 'RangeMaxValue')<block_start>stat_range_vals[axis_tag].append(av.RangeMaxValue)<block_end><if_stmt>hasattr(av 'RangeMinValue')<block_start>stat_range_vals[axis_tag].append(av.RangeMinValue)<block_end><if_stmt>hasattr(av 'Value')<block_start>stat_range_vals[axis_tag].append(av.Value)<block_end><block_end><block_end><for_stmt>axis fvar.axes<block_start>stat_ref=stat_range_vals.get(axis.axisTag)<if_stmt>stat_ref<is><none><block_start><continue><block_end>out_of_range=[]<for_stmt>val stat_ref<block_start><if_stmt>(val<g>axis.maxValue<and>int(val)<ne>32767)<or>(val<l>axis.minValue<and>int(val)<ne>-32767)<block_start>out_of_range.append(val)<block_end><block_end><if_stmt>out_of_range<block_start>expected_range=f'{axis.minValue} - {axis.maxValue}'<line_sep>errors.append(f'{axis.axisTag} values {str(sorted(set(out_of_range)))} are '<concat>f'outside of range {expected_range} specified in fvar')<block_end><block_end><if_stmt>errors<block_start>msg='\n'.join(errors)<line_sep><raise>CFF2VFError(f'Invalid STAT table. {msg}')<block_end><block_end><def_stmt>import_stat_override tt_font stat_file_path<block_start><if_stmt>'STAT'<in>tt_font<block_start>logger.warning(f'Overwriting existing STAT table with {stat_file_path}.')<block_end>tt_font.importXML(stat_file_path)<block_end><def_stmt>get_options args<block_start>parser=argparse.ArgumentParser(formatter_class=argparse.RawTextHelpFormatter description=__doc__)<line_sep>parser.add_argument('--version' action='version' version=__version__)<line_sep>parser.add_argument('-v' '--verbose' action='count' default=0 help='verbose mode\n'<concat>'Use -vv for debug mode')<line_sep>parser.add_argument('-d' '--designspace' metavar='PATH' dest='design_space_path' type=validate_path help='path to design space file' required=<true>)<line_sep>parser.add_argument('-o' '--output' dest='var_font_path' metavar='PATH' help='path to output variable font file. Default is base name\n'<concat>'of the design space file.' )<line_sep>parser.add_argument('-k' '--keep-glyph-names' action='store_true' help='Preserve glyph names in output variable font\n'<concat>"(using 'post' table format 2)." )<line_sep>parser.add_argument('--omit-mac-names' action='store_true' help="Omit Macintosh strings from 'name' table." )<line_sep>parser.add_argument('-c' '--check-compat' dest='check_compatibility' action='store_true' help='Check outline compatibility in source fonts, and fix flat\n'<concat>'curves.' )<line_sep>parser.add_argument('-i' '--include-glyphs' dest='include_glyphs_path' metavar='PATH' type=validate_path help='Path to file containing a python dict specifying which\n'<concat>'glyph names should be included from which source fonts.')<line_sep>options=parser.parse_args(args)<if_stmt><not>options.var_font_path<block_start>var_font_path=f'{os.path.splitext(options.design_space_path)[0]}.otf'<line_sep>options.var_font_path=var_font_path<block_end><if_stmt><not>options.verbose<block_start>level=PROGRESS_LEVEL<line_sep>logging.basicConfig(level=level format="%(message)s")<block_end><else_stmt><block_start>level=logging.INFO<line_sep>logging.basicConfig(level=level)<block_end>logger.setLevel(level)<line_sep><return>options<block_end><def_stmt>main args=<none><block_start>options=get_options(args)<if_stmt>os.path.exists(options.var_font_path)<block_start>os.remove(options.var_font_path)<block_end>designspace=DesignSpaceDocument.fromfile(options.design_space_path)<line_sep>ds_data=varLib.load_designspace(designspace)<line_sep>master_fonts=varLib.load_masters(designspace otfFinder)<line_sep>logger.progress("Reading source fonts...")<for_stmt>i,master_font enumerate(master_fonts)<block_start>designspace.sources[i].font=master_font<block_end># Subset source fonts
<if_stmt>options.include_glyphs_path<block_start>logger.progress("Subsetting source fonts...")<line_sep>subsetDict=getSubset(options.include_glyphs_path)<line_sep>subset_masters(designspace subsetDict)<block_end><if_stmt>options.check_compatibility<block_start>logger.progress("Checking outline compatibility in source fonts...")<line_sep>font_list=[src.font<for>src designspace.sources]<line_sep>default_font=designspace.sources[ds_data.base_idx].font<line_sep>vf=deepcopy(default_font)<line_sep># We copy vf from default_font, because we use VF to hold
# merged arguments from each source font charstring - this alters
# the font, which we don't want to do to the default font.
do_compatibility(vf font_list ds_data.base_idx)<block_end>logger.progress("Building variable OTF (CFF2) font...")<line_sep># Note that we now pass in the design space object, rather than a path to
# the design space file, in order to pass in the modified source fonts
# fonts without having to recompile and save them.
<try_stmt><block_start>varFont,_,_=varLib.build(designspace otfFinder)<block_end><except_stmt>VarLibCFFPointTypeMergeError<block_start>logger.error("The input set requires compatibilization. Please try "<concat>"again with the -c (--check-compat) option.")<line_sep><return>0<block_end><if_stmt><not>options.keep_glyph_names<block_start>suppress_glyph_names(varFont)<block_end><if_stmt>options.omit_mac_names<block_start>remove_mac_names(varFont)<block_end>stat_file_path=os.path.join(os.path.dirname(options.var_font_path) STAT_FILENAME)<if_stmt>os.path.exists(stat_file_path)<block_start>logger.progress("Importing STAT table override...")<line_sep>import_stat_override(varFont stat_file_path)<block_end>validate_stat_axes(varFont)<line_sep>validate_stat_values(varFont)<line_sep>update_stat_name_ids(varFont)<line_sep>varFont.save(options.var_font_path)<line_sep>logger.progress(f"Built variable font '{options.var_font_path}'")<block_end><if_stmt>__name__<eq>'__main__'<block_start>sys.exit(main())<block_end>
|
<import_from_stmt>selenium.webdriver Firefox<import_from_stmt>time sleep<line_sep>url='https://curso-python-selenium.netlify.app/aula_03.html'<line_sep>navegador=Firefox()<line_sep>navegador.get(url)<line_sep>sleep(1)<line_sep>a=navegador.find_element_by_tag_name('a')<for_stmt>click range(10)<block_start>ps=navegador.find_elements_by_tag_name('p')<line_sep>a.click()<line_sep>print(f'Valor do ultimo p: {ps[-1].text} valor do click: {click}')<line_sep>print(f'Os valors são iguais {ps[-1].text<eq>str(click)}')<block_end>navegador.quit()<line_sep>
|
<import_stmt>GRT<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>argparse<def_stmt>main # Parse the data filename from the argument list
<block_start>parser=argparse.ArgumentParser(description='Process some data.')<line_sep>parser.add_argument('filename' help='A data file')<line_sep>args=parser.parse_args()<line_sep>filename=args.filename<line_sep># Load some training data to train the ClusterTree model
trainingData=np.loadtxt(filename delimiter=',')<line_sep># Create a new ClusterTree instance
ctree=GRT.ClusterTree()<line_sep># Set the number of steps that will be used to choose the best splitting values
# More steps will give you a better model, but will take longer to train
ctree.setNumSplittingSteps(100)<line_sep># Set the maximum depth of the tree
ctree.setMaxDepth(10)<line_sep># Set the minimum number of samples allowed per node
ctree.setMinNumSamplesPerNode(10)<line_sep># Set the minimum RMS error allowed per node
ctree.setMinRMSErrorPerNode(0.1)<line_sep># Train a cluster tree model
<if_stmt><not>ctree.train(trainingData)<block_start>print("Failed to train model!")<line_sep>sys.exit(1)<block_end># if not ctree.save("CTreeModel.grt"): # this fails for some reason
# print("Failed to save model!")
# sys.exit(1)
# if not ctree.load("CTreeModel.grt"):
# print("Failed to train model!")
# sys.exit(1)
# Print the tree
ctree._print()<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<line_sep>sys.exit(0)<block_end>
|
<import_stmt>pytest<import_from_stmt>awx.api.serializers OAuth2TokenSerializer<line_sep>@pytest.mark.parametrize('scope, expect' [('' <false>) ('read' <true>) ('read read' <false>) ('write read' <true>) ('read rainbow' <false>)])<def_stmt>test_invalid_scopes scope expect<block_start><assert_stmt>OAuth2TokenSerializer()._is_valid_scope(scope)<is>expect<block_end>
|
<import_stmt>io<import_from_stmt>django.http HttpResponse HttpResponseRedirect<import_from_stmt>django.core.files.base ContentFile<import_from_stmt>django.shortcuts render<import_stmt>segno<import_from_stmt>.models Ticket<import_from_stmt>.forms TicketForm<def_stmt>index request<block_start>"""Renders the form to create a ticket"""<if_stmt>request.method<eq>'POST'<block_start>form=TicketForm(request.POST)<if_stmt>form.is_valid()<block_start>name=form.cleaned_data['name']<line_sep>qr=segno.make_qr(name)<line_sep>buff=io.BytesIO()<line_sep>qr.save(buff kind='png' scale=3 dark='darkblue')<line_sep>ticket=Ticket(name=name)<line_sep>ticket.qrcode.save(name+'.png' ContentFile(buff.getvalue()) save=<true>)<line_sep><return>HttpResponseRedirect('/thanks/')<block_end><block_end><else_stmt><block_start>form=TicketForm()<block_end><return>render(request 'qrcode/example.html' {'form':form})<block_end><def_stmt>thanks request<block_start><return>HttpResponse('Thanks, a new ticket was created')<block_end>
|
<import_stmt>re<import_from_stmt>datetime date<import_from_stmt>html.parser HTMLParser<import_stmt>requests<import_from_stmt>waste_collection_schedule Collection# type: ignore[attr-defined]
TITLE="RH Entsorgung"<line_sep>DESCRIPTION="Source for RHE (Rhein Hunsrück Entsorgung)."<line_sep>URL="https://www.rh-entsorgung.de"<line_sep>TEST_CASES={"Horn":{"city":"Rheinböllen" "street":"Erbacher Straße" "house_number":13 "address_suffix":"A" } "Bärenbach":{"city":"Bärenbach" "street":"Schwarzener Straße" "house_number":10 } }<line_sep># Parser for HTML input (hidden) text
<class_stmt>HiddenInputParser(HTMLParser)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self._args={}<block_end>@property<def_stmt>args self<block_start><return>self._args<block_end><def_stmt>handle_starttag self tag attrs<block_start><if_stmt>tag<eq>"input"<block_start>d=dict(attrs)<if_stmt>str(d["type"]).lower()<eq>"hidden"<block_start>self._args[d["name"]]=d["value"]<if>"value"<in>d<else>""<block_end><block_end><block_end><block_end><class_stmt>CollectionParser(HTMLParser)<block_start><def_stmt>__init__ self<arrow><none><block_start>super().__init__()<line_sep>self._entries:list[Collection]=[]<line_sep>self._current_type:str=<none><line_sep>self._capture_type:bool=<false><line_sep>self._capture_date:bool=<false><line_sep>self._date_pattern=re.compile(r"(?P<day>\d{2})\.(?P<month>\d{2})\.(?P<year>\d{4})")<block_end>@property<def_stmt>entries self<block_start><return>self._entries<block_end><def_stmt>handle_starttag self tag:str attrs<arrow><none><block_start><if_stmt>tag<eq>"p"<block_start>d=dict(attrs)<if_stmt>str(d["class"]).lower()<eq>"work"<block_start>self._capture_type=<true><block_end><block_end><if_stmt>self._current_type<is><not><none><and>tag<eq>"td"<block_start>d=dict(attrs)<if_stmt>("class"<in>d)<and>("dia_c_abfuhrdatum"<in>str(d["class"]))<block_start>self._capture_date=<true><block_end><block_end><block_end><def_stmt>handle_data self data:str<arrow><none><block_start><if_stmt>self._capture_type<block_start>self._current_type=data<block_end><if_stmt>self._capture_date<block_start>match=self._date_pattern.match(data)<line_sep>self._entries.append(Collection(date(int(match.group(3)) int(match.group(2)) int(match.group(1))) self._current_type ))<block_end><block_end><def_stmt>handle_endtag self tag:str<arrow><none><block_start><if_stmt>tag<eq>"p"<and>self._capture_type<block_start>self._capture_type=<false><block_end><if_stmt>tag<eq>"td"<and>self._capture_date<block_start>self._capture_date=<false><block_end><block_end><block_end><class_stmt>Source<block_start><def_stmt>__init__ self city:str street:str house_number:int address_suffix:str="" garbage_types:list[int]=[1 2 3 4 5] <block_start>self._city=city<line_sep>self._street=street<line_sep>self._hnr=house_number<line_sep>self._suffix=address_suffix<line_sep>self._garbage_types=garbage_types<block_end><def_stmt>fetch self<block_start>r=requests.get("https://aao.rh-entsorgung.de/WasteManagementRheinhunsrueck/WasteManagementServlet" params={"SubmitAction":"wasteDisposalServices" "InFrameMode":"TRUE"} )<line_sep>r.encoding="utf-8"<line_sep>parser=HiddenInputParser()<line_sep>parser.feed(r.text)<line_sep>args=parser.args<line_sep>args["Ort"]=self._city<line_sep>args["Strasse"]=self._street<line_sep>args["Hausnummer"]=str(self._hnr)<line_sep>args["Hausnummerzusatz"]=self._suffix<line_sep>args["Zeitraum"]="Die Leerungen der nächsten 3 Monate"<line_sep>args["SubmitAction"]="forward"<for_stmt>type range(1 6)<block_start>args[f"ContainerGewaehlt_{type}"]=("on"<if>type<in>self._garbage_types<else>"off")<block_end># First request returns wrong city. has to be called twice!
r=requests.post("https://aao.rh-entsorgung.de/WasteManagementRheinhunsrueck/WasteManagementServlet" data=args )<line_sep>r=requests.post("https://aao.rh-entsorgung.de/WasteManagementRheinhunsrueck/WasteManagementServlet" data=args )<line_sep>r.encoding="utf-8"<line_sep>date_parser=CollectionParser()<line_sep>date_parser.feed(r.text)<line_sep><return>date_parser.entries<block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>csv<import_stmt>numpy<as>np<import_stmt>os<import_stmt>sys<import_from_stmt>observations.util maybe_download_and_extract<def_stmt>mathpnl path<block_start>"""mathpnl
Data loads lazily. Type data(mathpnl) into the console.
A data.frame with 3850 rows and 52 variables:
- distid. district identifier
- intid. intermediate school district
- lunch. percent eligible for free lunch
- enrol. school enrollment
- ptr. pupil/teacher: 1995-98
- found. foundation grant, $: 1995-98
- expp. expenditure per pupil
- revpp. revenue per pupil
- avgsal. average teacher salary
- drop. high school dropout rate, percent
- grad. high school grad. rate, percent
- math4. percent satisfactory, 4th grade math
- math7. percent satisfactory, 7th grade math
- choice. number choice students
- psa. # public school academy studs.
- year. 1992-1998
- staff. staff per 1000 students
- avgben. avg teacher fringe benefits
- y92. =1 if year == 1992
- y93. =1 if year == 1993
- y94. =1 if year == 1994
- y95. =1 if year == 1995
- y96. =1 if year == 1996
- y97. =1 if year == 1997
- y98. =1 if year == 1998
- lexpp. log(expp)
- lfound. log(found)
- lexpp\_1. lexpp[\_n-1]
- lfnd\_1. lfnd[\_n-1]
- lenrol. log(enrol)
- lenrolsq. lenrol^2
- lunchsq. lunch^2
- lfndsq. lfnd^2
- math4\_1. math4[\_n-1]
- cmath4. math4 - math4\_1
- gexpp. lexpp - lexpp\_1
- gexpp\_1. gexpp[\_n-1
- gfound. lfound - lfnd\_1
- gfnd\_1. gfound[\_n-1]
- clunch. lunch - lunch[\_n-1]
- clnchsq. lunchsq - lunchsq[\_n-1]
- genrol. lenrol - lenrol[\_n-1]
- genrolsq. genrol^2
- expp92. expp in 1992
- lexpp92. log(expp92)
- math4\_92. math4 in 1992
- cpi. consumer price index
- rexpp. real spending per pupil, 1997$
- lrexpp. log(rexpp)
- lrexpp\_1. lrexpp[\_n-1]
- grexpp. lrexpp - lrexpp\_1
- grexpp\_1. grexpp[\_n-1]
https://www.cengage.com/cgi-wadsworth/course_products_wp.pl?fid=M20b&product_
isbn_issn=9781111531041
Args:
path: str.
Path to directory which either stores file or otherwise file will
be downloaded and extracted there.
Filename is `mathpnl.csv`.
Returns:
Tuple of np.ndarray `x_train` with 3850 rows and 52 columns and
dictionary `metadata` of column headers (feature names).
"""<import_stmt>pandas<as>pd<line_sep>path=os.path.expanduser(path)<line_sep>filename='mathpnl.csv'<if_stmt><not>os.path.exists(os.path.join(path filename))<block_start>url='http://dustintran.com/data/r/wooldridge/mathpnl.csv'<line_sep>maybe_download_and_extract(path url save_file_name='mathpnl.csv' resume=<false>)<block_end>data=pd.read_csv(os.path.join(path filename) index_col=0 parse_dates=<true>)<line_sep>x_train=data.values<line_sep>metadata={'columns':data.columns}<line_sep><return>x_train metadata<block_end>
|
# Copyright 2014, Sandia Corporation. Under the terms of Contract
# DE-AC04-94AL85000 with Sandia Corporation, the U.S. Government retains certain
# rights in this software.
"""Support functions for rendering using ReportLab."""<import_stmt>base64<import_stmt>io<import_stmt>re<import_stmt>numpy<import_stmt>reportlab.lib.colors<import_stmt>reportlab.lib.utils<import_stmt>toyplot.color<import_stmt>toyplot.units<def_stmt>render svg canvas<block_start>"""Render the SVG representation of a toyplot canvas to a ReportLab canvas.
Parameters
----------
svg: xml.etree.ElementTree.Element
SVG representation of a :class:`toyplot.canvas.Canvas` returned by
:func:`toyplot.svg.render()`.
canvas: reportlab.pdfgen.canvas.Canvas
ReportLab canvas that will be used to render the plot.
"""<def_stmt>get_fill root style<block_start><if_stmt>"fill"<not><in>style<block_start><return><none> <none><block_end># pragma: no cover
gradient_id=re.match("^url[(]#(.*)[)]$" style["fill"])<if_stmt>gradient_id<block_start>gradient_id=gradient_id.group(1)<line_sep>gradient_xml=root.find(".//*[@id='%s']"%gradient_id)<if_stmt>gradient_xml.tag<ne>"linearGradient"<block_start><raise>NotImplementedError("Only linear gradients are implemented.")# pragma: no cover
<block_end><if_stmt>gradient_xml.get("gradientUnits")<ne>"userSpaceOnUse"<block_start><raise>NotImplementedError("Only userSpaceOnUse gradients are implemented.")# pragma: no cover
<block_end><return><none> gradient_xml<block_end>color=toyplot.color.css(style["fill"])<if_stmt>color<is><none><block_start><return><none> <none><block_end>fill_opacity=float(style.get("fill-opacity" 1.0))<line_sep>opacity=float(style.get("opacity" 1.0))<line_sep>fill=toyplot.color.rgba(color["r"] color["g"] color["b"] color["a"]<times>fill_opacity<times>opacity )<line_sep><return>fill <none><block_end><def_stmt>get_stroke style<block_start><if_stmt>"stroke"<not><in>style<block_start><return><none><block_end># pragma: no cover
color=toyplot.color.css(style["stroke"])<if_stmt>color<is><none><block_start><return><none><block_end>stroke_opacity=float(style.get("stroke-opacity" 1.0))<line_sep>opacity=float(style.get("opacity" 1.0))<line_sep><return>toyplot.color.rgba(color["r"] color["g"] color["b"] color["a"]<times>stroke_opacity<times>opacity )<block_end><def_stmt>get_line_cap style<block_start><if_stmt>"stroke-linecap"<not><in>style<block_start><return>0<block_end><elif_stmt>style["stroke-linecap"]<eq>"butt"<block_start><return>0<block_end><elif_stmt>style["stroke-linecap"]<eq>"round"<block_start><return>1<block_end><elif_stmt>style["stroke-linecap"]<eq>"square"<block_start><return>2<block_end><block_end><def_stmt>get_font_family style<block_start><if_stmt>"font-family"<not><in>style<block_start><return><none><block_end># pragma: no cover
bold=<true><if>style.get("font-weight" "")<eq>"bold"<else><false><line_sep>italic=<true><if>style.get("font-style" "")<eq>"italic"<else><false><for_stmt>font_family style["font-family"].split(",")<block_start>font_family=font_family.lower()<if_stmt>font_family<in>get_font_family.substitutions<block_start>font_family=get_font_family.substitutions[font_family]<line_sep><return>get_font_family.font_table[(font_family bold italic)]<block_end><block_end><raise>ValueError("Unknown font family: %s"%style["font-family"])<block_end># pragma: no cover
get_font_family.font_table={("courier" <false> <false>):"Courier" ("courier" <true> <false>):"Courier-Bold" ("courier" <false> <true>):"Courier-Oblique" ("courier" <true> <true>):"Courier-BoldOblique" ("helvetica" <false> <false>):"Helvetica" ("helvetica" <true> <false>):"Helvetica-Bold" ("helvetica" <false> <true>):"Helvetica-Oblique" ("helvetica" <true> <true>):"Helvetica-BoldOblique" ("times" <false> <false>):"Times-Roman" ("times" <true> <false>):"Times-Bold" ("times" <false> <true>):"Times-Italic" ("times" <true> <true>):"Times-BoldItalic" }<line_sep>get_font_family.substitutions={"courier":"courier" "helvetica":"helvetica" "monospace":"courier" "sans-serif":"helvetica" "serif":"times" "times":"times" }<def_stmt>set_fill_color canvas color<block_start>canvas.setFillColorRGB(color["r"] color["g"] color["b"])<line_sep>canvas.setFillAlpha(numpy.asscalar(color["a"]))<block_end><def_stmt>set_stroke_color canvas color<block_start>canvas.setStrokeColorRGB(color["r"] color["g"] color["b"])<line_sep>canvas.setStrokeAlpha(numpy.asscalar(color["a"]))<block_end><def_stmt>render_element root element canvas styles<block_start>canvas.saveState()<line_sep>current_style={}<if_stmt>styles<block_start>current_style.update(styles[-1])<block_end><for_stmt>declaration element.get("style" "").split(";")<block_start><if_stmt>declaration<eq>""<block_start><continue><block_end>key,value=declaration.split(":")<line_sep>current_style[key]=value<block_end>styles.append(current_style)<if_stmt>"stroke-width"<in>current_style<block_start>canvas.setLineWidth(float(current_style["stroke-width"]))<block_end><if_stmt>"stroke-dasharray"<in>current_style<block_start>canvas.setDash([float(length)<for>length current_style["stroke-dasharray"].split(",")])<block_end><if_stmt>current_style.get("visibility")<ne>"hidden"<block_start><if_stmt>"transform"<in>element.attrib<block_start><for_stmt>transformation element.get("transform").split(")")[::1]<block_start><if_stmt>transformation<block_start>transform,arguments=transformation.split("(")<line_sep>arguments=arguments.split(",")<if_stmt>transform.strip()<eq>"translate"<block_start><if_stmt>len(arguments)<eq>2<block_start>canvas.translate(float(arguments[0]) float(arguments[1]))<block_end><block_end><elif_stmt>transform.strip()<eq>"rotate"<block_start><if_stmt>len(arguments)<eq>1<block_start>canvas.rotate(float(arguments[0]))<block_end><if_stmt>len(arguments)<eq>3<block_start>canvas.translate(float(arguments[1]) float(arguments[2]))<line_sep>canvas.rotate(float(arguments[0]))<line_sep>canvas.translate(-float(arguments[1]) -float(arguments[2]))<block_end><block_end><block_end><block_end><block_end><if_stmt>element.tag<eq>"svg"<block_start><if_stmt>"background-color"<in>current_style<block_start>set_fill_color(canvas toyplot.color.css(current_style["background-color"]))<line_sep>canvas.rect(0 0 float(element.get("width")[:-2]) float(element.get("height")[:-2]) stroke=0 fill=1 )<block_end><if_stmt>current_style["border-style"]<ne>"none"<block_start>set_stroke_color(canvas toyplot.color.css(current_style["border-color"]))<line_sep>canvas.setLineWidth(float(current_style["border-width"]))<line_sep>canvas.rect(0 0 float(element.get("width")[:-2]) float(element.get("height")[:-2]) stroke=1 fill=0 )<block_end><for_stmt>child element<block_start>render_element(root child canvas styles)<block_end><block_end><elif_stmt>element.tag<eq>"a"# At the moment, it doesn't look like reportlab supports external hyperlinks.
<block_start><for_stmt>child element<block_start>render_element(root child canvas styles)<block_end><block_end><elif_stmt>element.tag<eq>"g"<block_start><if_stmt>element.get("clip-path" <none>)<is><not><none><block_start>clip_id=element.get("clip-path")[5:-1]<line_sep>clip_path=root.find(".//*[@id='%s']"%clip_id)<for_stmt>child clip_path<block_start><if_stmt>child.tag<eq>"rect"<block_start>x=float(child.get("x"))<line_sep>y=float(child.get("y"))<line_sep>width=float(child.get("width"))<line_sep>height=float(child.get("height"))<line_sep>path=canvas.beginPath()<line_sep>path.moveTo(x y)<line_sep>path.lineTo(x+width y)<line_sep>path.lineTo(x+width y+height)<line_sep>path.lineTo(x y+height)<line_sep>path.close()<line_sep>canvas.clipPath(path stroke=0 fill=1)<block_end><else_stmt><block_start>toyplot.log.error("Unhandled clip tag: %s" child.tag)<block_end><block_end><block_end># pragma: no cover
<for_stmt>child element<block_start>render_element(root child canvas styles)<block_end><block_end><elif_stmt>element.tag<eq>"clipPath"<block_start><pass><block_end><elif_stmt>element.tag<eq>"line"<block_start>stroke=get_stroke(current_style)<if_stmt>stroke<is><not><none><block_start>set_stroke_color(canvas stroke)<line_sep>canvas.setLineCap(get_line_cap(current_style))<line_sep>canvas.line(float(element.get("x1" 0)) float(element.get("y1" 0)) float(element.get("x2" 0)) float(element.get("y2" 0)) )<block_end><block_end><elif_stmt>element.tag<eq>"path"<block_start>stroke=get_stroke(current_style)<if_stmt>stroke<is><not><none><block_start>set_stroke_color(canvas stroke)<line_sep>canvas.setLineCap(get_line_cap(current_style))<line_sep>path=canvas.beginPath()<line_sep>commands=element.get("d").split()<while_stmt>commands<block_start>command=commands.pop(0)<if_stmt>command<eq>"L"<block_start>path.lineTo(float(commands.pop(0)) float(commands.pop(0)))<block_end><elif_stmt>command<eq>"M"<block_start>path.moveTo(float(commands.pop(0)) float(commands.pop(0)))<block_end><block_end>canvas.drawPath(path)<block_end><block_end><elif_stmt>element.tag<eq>"polygon"<block_start>fill,fill_gradient=get_fill(root current_style)<if_stmt>fill_gradient<is><not><none><block_start><raise>NotImplementedError("Gradient <polygon> not implemented.")# pragma: no cover
<block_end><if_stmt>fill<is><not><none><block_start>set_fill_color(canvas fill)<block_end>stroke=get_stroke(current_style)<if_stmt>stroke<is><not><none><block_start>set_stroke_color(canvas stroke)<block_end>points=[point.split(",")<for>point element.get("points").split()]<line_sep>path=canvas.beginPath()<for_stmt>point points[:1]<block_start>path.moveTo(float(point[0]) float(point[1]))<block_end><for_stmt>point points[1:]<block_start>path.lineTo(float(point[0]) float(point[1]))<block_end>path.close()<line_sep>canvas.drawPath(path stroke=stroke<is><not><none> fill=fill<is><not><none>)<block_end><elif_stmt>element.tag<eq>"rect"<block_start>fill,fill_gradient=get_fill(root current_style)<if_stmt>fill<is><not><none><block_start>set_fill_color(canvas fill)<block_end>stroke=get_stroke(current_style)<if_stmt>stroke<is><not><none><block_start>set_stroke_color(canvas stroke)<block_end>x=float(element.get("x" 0))<line_sep>y=float(element.get("y" 0))<line_sep>width=float(element.get("width"))<line_sep>height=float(element.get("height"))<line_sep>path=canvas.beginPath()<line_sep>path.moveTo(x y)<line_sep>path.lineTo(x+width y)<line_sep>path.lineTo(x+width y+height)<line_sep>path.lineTo(x y+height)<line_sep>path.close()<if_stmt>fill_gradient<is><not><none><block_start>pdf_colors=[]<line_sep>pdf_offsets=[]<for_stmt>stop fill_gradient<block_start>offset=float(stop.get("offset"))<line_sep>color=toyplot.color.css(stop.get("stop-color"))<line_sep>opacity=float(stop.get("stop-opacity"))<line_sep>pdf_colors.append(reportlab.lib.colors.Color(color["r"] color["g"] color["b"] color["a"]<times>opacity))<line_sep>pdf_offsets.append(offset)<block_end>canvas.saveState()<line_sep>canvas.clipPath(path stroke=0 fill=1)<line_sep>canvas.setFillAlpha(1)<line_sep>canvas.linearGradient(float(fill_gradient.get("x1")) float(fill_gradient.get("y1")) float(fill_gradient.get("x2")) float(fill_gradient.get("y2")) pdf_colors pdf_offsets )<line_sep>canvas.restoreState()<block_end>canvas.drawPath(path stroke=stroke<is><not><none> fill=fill<is><not><none>)<block_end><elif_stmt>element.tag<eq>"circle"<block_start>fill,fill_gradient=get_fill(root current_style)<if_stmt>fill_gradient<is><not><none><block_start><raise>NotImplementedError("Gradient <circle> not implemented.")# pragma: no cover
<block_end><if_stmt>fill<is><not><none><block_start>set_fill_color(canvas fill)<block_end>stroke=get_stroke(current_style)<if_stmt>stroke<is><not><none><block_start>set_stroke_color(canvas stroke)<block_end>cx=float(element.get("cx" 0))<line_sep>cy=float(element.get("cy" 0))<line_sep>r=float(element.get("r"))<line_sep>canvas.circle(cx cy r stroke=stroke<is><not><none> fill=fill<is><not><none>)<block_end><elif_stmt>element.tag<eq>"text"<block_start>x=float(element.get("x" 0))<line_sep>y=float(element.get("y" 0))<line_sep>fill,fill_gradient=get_fill(element current_style)<line_sep>stroke=get_stroke(current_style)<line_sep>font_family=get_font_family(current_style)<line_sep>font_size=toyplot.units.convert(current_style["font-size"] target="px")<line_sep>text=element.text<line_sep>canvas.saveState()<line_sep>canvas.setFont(font_family font_size)<if_stmt>fill<is><not><none><block_start>set_fill_color(canvas fill)<block_end><if_stmt>stroke<is><not><none><block_start>set_stroke_color(canvas stroke)<block_end>canvas.translate(x y)<line_sep>canvas.scale(1 -1)<line_sep>canvas.drawString(0 0 text)<line_sep>canvas.restoreState()<block_end><elif_stmt>element.tag<eq>"image"<block_start><import_stmt>PIL.Image<line_sep>image=element.get("xlink:href")<if_stmt><not>image.startswith("data:image/png;base64,")<block_start><raise>ValueError("Unsupported image type.")# pragma: no cover
<block_end>image=base64.standard_b64decode(image[22:])<line_sep>image=io.BytesIO(image)<line_sep>image=PIL.Image.open(image)<line_sep>image=reportlab.lib.utils.ImageReader(image)<line_sep>x=float(element.get("x" 0))<line_sep>y=float(element.get("y" 0))<line_sep>width=float(element.get("width"))<line_sep>height=float(element.get("height"))<line_sep>canvas.saveState()<line_sep>path=canvas.beginPath()<line_sep>set_fill_color(canvas toyplot.color.rgb(1 1 1))<line_sep>canvas.rect(x y width height stroke=0 fill=1)<line_sep>canvas.translate(x y+height)<line_sep>canvas.scale(1 -1)<line_sep>canvas.drawImage(image=image x=0 y=0 width=width height=height mask=<none>)<line_sep>canvas.restoreState()<block_end><elif_stmt>element.tag<in>["defs" "title"]<block_start><pass><block_end><else_stmt><block_start><raise>Exception("unhandled tag: %s"%element.tag)<block_end><block_end># pragma: no cover
styles.pop()<line_sep>canvas.restoreState()<block_end>render_element(svg svg canvas [])<block_end>
|
<import_from_stmt>.builder build_model<import_from_stmt>.registry MODELS<line_sep>
|
<import_stmt>gym<import_stmt>numpy<as>np<import_from_stmt>gym Wrapper<def_stmt>make_non_absorbing observation<block_start><return>np.concatenate([observation [0.0]] -1)<block_end><class_stmt>AbsorbingStatesWrapper(Wrapper)<block_start><def_stmt>__init__ self env<block_start>super().__init__(env)<line_sep>low=env.observation_space.low<line_sep>high=env.observation_space.high<line_sep>self._absorbing_state=np.concatenate([np.zeros_like(low) [1.0]] 0)<line_sep>low=np.concatenate([low [0]] 0)<line_sep>high=np.concatenate([high [1]] 0)<line_sep>self.observation_space=gym.spaces.Box(low=low high=high dtype=env.observation_space.dtype)<block_end><def_stmt>reset self **kwargs<block_start>self._done=<false><line_sep>self._absorbing=<false><line_sep>self._info={}<line_sep><return>make_non_absorbing(self.env.reset(**kwargs))<block_end><def_stmt>step self action<block_start><if_stmt><not>self._done<block_start>observation,reward,done,info=self.env.step(action)<line_sep>observation=make_non_absorbing(observation)<line_sep>self._done=done<line_sep>self._info=info<line_sep>truncated_done='TimeLimit.truncated'<in>info<line_sep><return>observation reward truncated_done info<block_end><else_stmt><block_start><if_stmt><not>self._absorbing<block_start>self._absorbing=<true><line_sep><return>self._absorbing_state 0.0 <false> self._info<block_end><else_stmt><block_start><return>self._absorbing_state 0.0 <true> self._info<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>env=gym.make('Hopper-v2')<line_sep>env=AbsorbingStatesWrapper(env)<line_sep>env.reset()<line_sep>done=<false><while_stmt><not>done<block_start>action=env.action_space.sample()<line_sep>obs,reward,done,info=env.step(action)<line_sep>print(obs done)<block_end><block_end>
|
'''Data channels for calls.
'''<import_from_stmt>utils *<import_from_stmt>enums *<import_from_stmt>errors ISkypeError<import_stmt>time<class_stmt>ICallChannel(object)<block_start>'''Represents a call channel.
'''<def_stmt>__init__ self Manager Call Stream Type<block_start>'''__init__.
@param Manager: Manager
@type Manager: L{ICallChannelManager}
@param Call: Call
@type Call: L{ICall}
@param Stream: Stream
@type Stream: L{IApplicationStream}
@param Type: Type
@type Type: L{Call channel type<enums.cctUnknown>}
'''<line_sep>self._Manager=Manager<line_sep>self._Call=Call<line_sep>self._Stream=Stream<line_sep>self._Type=Type<block_end><def_stmt>__repr__ self<block_start><return>'<%s with Manager=%s, Call=%s, Stream=%s>'%(object.__repr__(self)[1:-1] repr(self.Manager) repr(self.Call) repr(self.Stream))<block_end><def_stmt>SendTextMessage self Text<block_start>'''Sends text message over channel.
@param Text: Text
@type Text: unicode
'''<if_stmt>self._Type<eq>cctReliable<block_start>self._Stream.Write(Text)<block_end><elif_stmt>self._Type<eq>cctDatagram<block_start>self._Stream.SendDatagram(Text)<block_end><else_stmt><block_start><raise>ISkypeError(0 'Cannot send using %s channel type'&repr(self._Type))<block_end><block_end><def_stmt>_GetCall self<block_start><return>self._Call<block_end>Call=property(_GetCall doc='''Call.
@type: L{ICall}
''')<def_stmt>_GetManager self<block_start><return>self._Manager<block_end>Manager=property(_GetManager doc='''Manager.
@type: L{ICallChannelManager}
''')<def_stmt>_GetStream self<block_start><return>self._Stream<block_end>Stream=property(_GetStream doc='''Stream.
@type: L{IApplicationStream}
''')<def_stmt>_GetType self<block_start><return>self._Type<block_end>Type=property(_GetType doc='''Type.
@type: L{Call channel type<enums.cctUnknown>}
''')<block_end><class_stmt>ICallChannelManager(EventHandlingBase)<block_start>'''Instatinate this class to create a call channel manager. A call channel manager will
automatically create a data channel for voice calls based on the APP2APP protocol.
1. Usage.
You should access this class using the alias at the package level::
import Skype4Py
skype = Skype4Py.Skype()
ccm = Skype4Py.CallChannelManager()
ccm.Connect(skype)
For possible constructor arguments, read the L{ICallChannelManager.__init__} description.
2. Events.
This class provides events.
The events names and their arguments lists can be found in L{ICallChannelManagerEvents} class.
The usage of events is described in L{EventHandlingBase} class which is a superclass of
this class. Follow the link for more information.
@ivar OnChannels: Event handler for L{ICallChannelManagerEvents.Channels} event. See L{EventHandlingBase} for more information on events.
@type OnChannels: callable
@ivar OnMessage: Event handler for L{ICallChannelManagerEvents.Message} event. See L{EventHandlingBase} for more information on events.
@type OnMessage: callable
@ivar OnCreated: Event handler for L{ICallChannelManagerEvents.Created} event. See L{EventHandlingBase} for more information on events.
@type OnCreated: callable
'''<def_stmt>__del__ self<block_start><if_stmt>self._Application<block_start>self._Application.Delete()<line_sep>self._Application=<none><line_sep>self._Skype.UnregisterEventHandler('ApplicationStreams' self._OnApplicationStreams)<line_sep>self._Skype.UnregisterEventHandler('ApplicationReceiving' self._OnApplicationReceiving)<line_sep>self._Skype.UnregisterEventHandler('ApplicationDatagram' self._OnApplicationDatagram)<block_end><block_end><def_stmt>__init__ self Events=<none><block_start>'''__init__.
@param Events: Events
@type Events: An optional object with event handlers. See L{EventHandlingBase} for more information on events.
'''<line_sep>EventHandlingBase.__init__(self)<if_stmt>Events<block_start>self._SetEventHandlerObj(Events)<block_end>self._Skype=<none><line_sep>self._CallStatusEventHandler=<none><line_sep>self._ApplicationStreamsEventHandler=<none><line_sep>self._ApplicationReceivingEventHandler=<none><line_sep>self._ApplicationDatagramEventHandler=<none><line_sep>self._Application=<none><line_sep>self._Name=u'CallChannelManager'<line_sep>self._ChannelType=cctReliable<line_sep>self._Channels=[]<block_end><def_stmt>_OnApplicationDatagram self pApp pStream Text<block_start><if_stmt>pApp<eq>self._Application<block_start><for_stmt>ch self_Channels<block_start><if_stmt>ch.Stream<eq>pStream<block_start>msg=ICallChannelMessage(Text)<line_sep>self._CallEventHandler('Message' self ch msg)<line_sep><break><block_end><block_end><block_end><block_end><def_stmt>_OnApplicationReceiving self pApp pStreams<block_start><if_stmt>pApp<eq>self._Application<block_start><for_stmt>ch self._Channels<block_start><if_stmt>ch.Stream<in>pStreams<block_start>msg=ICallChannelMessage(ch.Stream.Read())<line_sep>self._CallEventHandler('Message' self ch msg)<block_end><block_end><block_end><block_end><def_stmt>_OnApplicationStreams self pApp pStreams<block_start><if_stmt>pApp<eq>self._Application<block_start><for_stmt>ch self._Channels<block_start><if_stmt>ch.Stream<not><in>pStreams<block_start>self._Channels.remove(ch)<line_sep>self._CallEventHandler('Channels' self tuple(self._Channels))<block_end><block_end><block_end><block_end><def_stmt>_OnCallStatus self pCall Status<block_start><if_stmt>Status<eq>clsRinging<block_start><if_stmt>self._Application<is><none><block_start>self.CreateApplication()<block_end>self._Application.Connect(pCall.PartnerHandle <true>)<for_stmt>stream self._Application.Streams<block_start><if_stmt>stream.PartnerHandle<eq>pCall.PartnerHandle<block_start>self._Channels.append(ICallChannel(self pCall stream self._ChannelType))<line_sep>self._CallEventHandler('Channels' self tuple(self._Channels))<line_sep><break><block_end><block_end><block_end><elif_stmt>Status<in>(clsCancelled clsFailed clsFinished clsRefused clsMissed)<block_start><for_stmt>ch self._Channels<block_start><if_stmt>ch.Call<eq>pCall<block_start>self._Channels.remove(ch)<line_sep>self._CallEventHandler('Channels' self tuple(self._Channels))<try_stmt><block_start>ch.Stream.Disconnect()<block_end><except_stmt>ISkypeError<block_start><pass><block_end><break><block_end><block_end><block_end><block_end><def_stmt>Connect self Skype<block_start>'''Connects this call channel manager instance to Skype. This is the first thing you should
do after creating this object.
@param Skype: Skype object
@type Skype: L{ISkype}
@see: L{Disconnect}
'''<line_sep>self._Skype=Skype<line_sep>self._Skype.RegisterEventHandler('CallStatus' self._OnCallStatus)<block_end><def_stmt>CreateApplication self ApplicationName=<none><block_start>'''Creates an APP2APP application context. The application is automatically created using
L{IApplication.Create<application.IApplication.Create>}.
@param ApplicationName: Application name
@type ApplicationName: unicode
'''<if_stmt>ApplicationName<is><not><none><block_start>self.Name=ApplicationName<block_end>self._Application=self._Skype.Application(self.Name)<line_sep>self._Skype.RegisterEventHandler('ApplicationStreams' self._OnApplicationStreams)<line_sep>self._Skype.RegisterEventHandler('ApplicationReceiving' self._OnApplicationReceiving)<line_sep>self._Skype.RegisterEventHandler('ApplicationDatagram' self._OnApplicationDatagram)<line_sep>self._Application.Create()<line_sep>self._CallEventHandler('Created' self)<block_end><def_stmt>Disconnect self<block_start>'''Disconnects from Skype.
@see: L{Connect}
'''<line_sep>self._Skype.UnregisterEventHandler('CallStatus' self._OnCallStatus)<line_sep>self._Skype=<none><block_end><def_stmt>_GetChannels self<block_start><return>tuple(self._Channels)<block_end>Channels=property(_GetChannels doc='''All call data channels.
@type: tuple of L{ICallChannel}
''')<def_stmt>_GetChannelType self<block_start><return>self._ChannelType<block_end><def_stmt>_SetChannelType self ChannelType<block_start>self._ChannelType=ChannelType<block_end>ChannelType=property(_GetChannelType _SetChannelType doc='''Queries/sets the default channel type.
@type: L{Call channel type<enums.cctUnknown>}
''')<def_stmt>_GetCreated self<block_start><return>bool(self._Application)<block_end>Created=property(_GetCreated doc='''Returns True if the application context has been created.
@type: bool
''')<def_stmt>_GetName self<block_start><return>self._Name<block_end><def_stmt>_SetName self Name<block_start>self._Name=unicode(Name)<block_end>Name=property(_GetName _SetName doc='''Queries/sets the application context name.
@type: unicode
''')<block_end><class_stmt>ICallChannelManagerEvents(object)<block_start>'''Events defined in L{ICallChannelManager}.
See L{EventHandlingBase} for more information on events.
'''<def_stmt>Channels self Manager Channels<block_start>'''This event is triggered when list of call channels changes.
@param Manager: Manager
@type Manager: L{ICallChannelManager}
@param Channels: Channels
@type Channels: tuple of L{ICallChannel}
'''<block_end><def_stmt>Created self Manager<block_start>'''This event is triggered when the application context has successfuly been created.
@param Manager: Manager
@type Manager: L{ICallChannelManager}
'''<block_end><def_stmt>Message self Manager Channel Message<block_start>'''This event is triggered when a call channel message has been received.
@param Manager: Manager
@type Manager: L{ICallChannelManager}
@param Channel: Channel
@type Channel: L{ICallChannel}
@param Message: Message
@type Message: L{ICallChannelMessage}
'''<block_end><block_end>ICallChannelManager._AddEvents(ICallChannelManagerEvents)<class_stmt>ICallChannelMessage(object)<block_start>'''Represents a call channel message.
'''<def_stmt>__init__ self Text<block_start>'''__init__.
@param Text: Text
@type Text: unicode
'''<line_sep>self._Text=Text<block_end><def_stmt>_GetText self<block_start><return>self._Text<block_end><def_stmt>_SetText self Text<block_start>self._Text=Text<block_end>Text=property(_GetText _SetText doc='''Queries/sets message text.
@type: unicode
''')<block_end>
|
<import_stmt>os<import_from_stmt>..tempdir NamedFileInTemporaryDirectory<import_from_stmt>..tempdir TemporaryWorkingDirectory<def_stmt>test_named_file_in_temporary_directory <block_start><with_stmt>NamedFileInTemporaryDirectory('filename')<as>file<block_start>name=file.name<assert_stmt><not>file.closed<assert_stmt>os.path.exists(name)<line_sep>file.write(b'test')<block_end><assert_stmt>file.closed<assert_stmt><not>os.path.exists(name)<block_end><def_stmt>test_temporary_working_directory <block_start><with_stmt>TemporaryWorkingDirectory()<as>dir<block_start><assert_stmt>os.path.exists(dir)<assert_stmt>os.path.realpath(os.curdir)<eq>os.path.realpath(dir)<block_end><assert_stmt><not>os.path.exists(dir)<assert_stmt>os.path.abspath(os.curdir)<ne>dir<block_end>
|
# Lint as: python3
"""
Texture synth experiments.
"""<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>absl logging<line_sep>FLAGS=flags.FLAGS<import_from_stmt>self_organising_systems.texture_ca.config cfg<line_sep>tcfg=cfg.texture_ca<import_from_stmt>self_organising_systems.texture_ca.losses StyleModel Inception<import_from_stmt>self_organising_systems.shared.video VideoWriter<import_from_stmt>self_organising_systems.shared.util tile2d Bunch<import_from_stmt>self_organising_systems.texture_ca.ca CAModel to_rgb<import_stmt>tensorflow<as>tf<line_sep># TF voodoo during migration period...
tf.compat.v1.enable_v2_behavior()<import_stmt>numpy<as>np<def_stmt>main _<block_start>texture_synth_trainer=TextureSynthTrainer()<line_sep>texture_synth_trainer.train()<block_end><class_stmt>SamplePool<block_start><def_stmt>__init__ self * _parent=<none> _parent_idx=<none> **slots<block_start>self._parent=_parent<line_sep>self._parent_idx=_parent_idx<line_sep>self._slot_names=slots.keys()<line_sep>self._size=<none><for_stmt>k,v slots.items()<block_start><if_stmt>self._size<is><none><block_start>self._size=len(v)<block_end><assert_stmt>self._size<eq>len(v)<line_sep>setattr(self k np.asarray(v))<block_end><block_end><def_stmt>sample self n<block_start>idx=np.random.choice(self._size n <false>)<line_sep>batch={k:getattr(self k)[idx]<for>k self._slot_names}<line_sep>batch=SamplePool(**batch _parent=self _parent_idx=idx)<line_sep><return>batch<block_end><def_stmt>commit self<block_start><for_stmt>k self._slot_names<block_start>getattr(self._parent k)[self._parent_idx]=getattr(self k)<block_end><block_end><block_end><def_stmt>create_loss_model <block_start>loss_type,loss_params=tcfg.objective.split(':' 1)<if_stmt>loss_type<eq>"style"<block_start>texture_fn=loss_params<line_sep>input_texture_path="%s/%s"%(tcfg.texture_dir texture_fn)<line_sep>loss_model=StyleModel(input_texture_path)<block_end><elif_stmt>loss_type<eq>"inception"<block_start>layer_name,ch=loss_params.split(':')<line_sep>loss_model=Inception(layer_name int(ch))<block_end><return>loss_model<block_end><class_stmt>TextureSynthTrainer<block_start><def_stmt>__init__ self loss_model=<none><block_start>self.experiment_log_dir="%s/%s"%(cfg.logdir cfg.experiment_name)<line_sep>self.writer=tf.summary.create_file_writer(self.experiment_log_dir)<if_stmt>loss_model<is><none><block_start>loss_model=create_loss_model()<block_end>self.loss_model=loss_model<line_sep>self.ca=CAModel()<if_stmt>tcfg.ancestor_npy<block_start>self.ancestor_ca=CAModel()<line_sep>ancestor_fn="%s/%s"%(tcfg.ancestor_dir tcfg.ancestor_npy)<line_sep>self.ancestor_ca.load_params(ancestor_fn)<line_sep>self.ca.load_params(ancestor_fn)<line_sep>logging.info("loaded pre-trained model %s"%tcfg.ancestor_npy)<block_end>self.loss_log=[]<line_sep>self.pool=SamplePool(x=self.seed_fn(tcfg.pool_size))<line_sep>lr_sched=tf.keras.optimizers.schedules.PiecewiseConstantDecay([1000] [tcfg.lr tcfg.lr<times>0.1])<line_sep>self.trainer=tf.keras.optimizers.Adam(lr_sched)<block_end><def_stmt>visualize_batch_tf self x0 x step_num<block_start>vis0=np.hstack(to_rgb(x0))<line_sep>vis1=np.hstack(to_rgb(x))<line_sep>vis=np.vstack([vis0 vis1])<line_sep>tf.summary.image("batch_vis" vis[<none> <ellipsis>])<block_end><def_stmt>train self<block_start><with_stmt>self.writer.as_default()<block_start><for_stmt>_ range(tcfg.train_steps+1)<block_start>step_num=len(self.loss_log)<line_sep>step=self.train_step()<if_stmt>step_num%50<eq>0<or>step_num<eq>tcfg.train_steps<block_start>self.visualize_batch_tf(step.x0 step.batch.x step_num)<line_sep>self.ca.save_params("%s/%s.npy"%(cfg.logdir cfg.experiment_name))<block_end>logging.info('step: %d, log10(loss): %s, loss: %s'%(len(self.loss_log) np.log10(step.loss) step.loss.numpy()))<block_end>self.save_video("%s/%s.mp4"%(cfg.logdir cfg.experiment_name) self.ca.embody)<block_end><block_end><def_stmt>train_step self<block_start>step_num=len(self.loss_log)<line_sep>tf.summary.experimental.set_step(step_num)<line_sep>batch=self.pool.sample(tcfg.batch_size)<line_sep>x0=batch.x.copy()<if_stmt>step_num%2<eq>0<block_start>x0[:1]=self.seed_fn(1)<block_end>batch.x[:],loss=self._train_step(x0)<line_sep>batch.commit()<line_sep>tf.summary.scalar("loss" loss)<line_sep>self.loss_log.append(loss.numpy())<line_sep><return>Bunch(batch=batch x0=x0 loss=loss step_num=step_num)<block_end>@tf.function<def_stmt>_train_step self x<block_start>iter_n=tf.random.uniform([] tcfg.rollout_len_min tcfg.rollout_len_max tf.int32)<with_stmt>tf.GradientTape(persistent=<false>)<as>g<block_start>f=self.ca.embody()<for_stmt>i tf.range(iter_n)<block_start>x=f(x)<block_end>loss=self.loss_model(to_rgb(x))<block_end>grads=g.gradient(loss self.ca.params)<line_sep>grads=[g/(tf.norm(g)+1e-8)<for>g grads]<line_sep>self.trainer.apply_gradients(zip(grads self.ca.params))<line_sep><return>x loss<block_end><def_stmt>seed_fn self n<block_start>states=np.zeros([n tcfg.img_size tcfg.img_size tcfg.channel_n] np.float32)<line_sep><return>states<block_end><def_stmt>save_video self path f<block_start>state=self.seed_fn(1)<line_sep>f=self.ca.embody()<if_stmt>tcfg.ancestor_npy<block_start>state_ancestor=self.seed_fn(1)<line_sep>f_ancestor=self.ancestor_ca.embody()<block_end><with_stmt>VideoWriter(path 60.0)<as>vid<block_start><for_stmt>i range(tcfg.viz_rollout_len)# visualize the RGB + hidden states.
<block_start><if_stmt>tcfg.hidden_viz_group<block_start>padding_channel_len=(3-state[0].shape[2]%3)%3<line_sep>splitframe=np.split(np.pad(state[0] ((0 0) (0 0) (0 padding_channel_len)) mode='constant') (state[0].shape[2]+padding_channel_len)/3 2)<block_end><else_stmt><block_start>hidden=np.transpose(np.repeat(state[0][<ellipsis> 3: <none>] 3 -1) (2 0 1 3))<line_sep>splitframe=np.concatenate([state[0][<none> <ellipsis> :3] hidden] 0)<block_end>frame=to_rgb(tile2d(splitframe))<line_sep>vid.add(frame)<if_stmt>tcfg.ancestor_npy<block_start>c_state=f(state fire_rate=0.5)<line_sep>a_state=f_ancestor(state fire_rate=0.5)<line_sep>progress=max(1.25<times>(i/tcfg.viz_rollout_len)-0.25 0.0)<line_sep>state=(1-progress)<times>c_state+progress<times>a_state<block_end><else_stmt><block_start>state=f(state fire_rate=0.5)<block_end><block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end>
|
# encoding=utf8
<import_stmt>numpy<as>np<import_from_stmt>datasets load_metric<line_sep># the code below refers to the https://github.com/Yale-LILY/FeTaQA/blob/main/end2end/train.py
<def_stmt>postprocess_text preds references_s metric_name<block_start>preds=[pred.strip()<for>pred preds]<line_sep>references_s=[[reference.strip()<for>reference references]<for>references references_s]<line_sep># rougeLSum expects newline after each sentence
<if_stmt>metric_name<in>["sacrebleu"]# since hf sacrebleu only support references with same length, we have to pad them into the same length
<block_start>ref_max_len=max([len(ref)<for>ref references_s])<for_stmt>ref references_s<block_start><for_stmt>_ range(ref_max_len-len(ref))<block_start>ref.append(<none>)# see https://github.com/mjpost/sacrebleu/pull/132
print(ref)<block_end><block_end><block_end><elif_stmt>metric_name<eq>"bleu"<block_start>preds=[pred.split(' ')<for>pred preds]<line_sep>references_s=[[reference.split(' ')<for>reference references]<for>references references_s]<block_end><else_stmt><block_start><pass><block_end><return>preds references_s<block_end><class_stmt>EvaluateTool(object)<block_start><def_stmt>__init__ self args<block_start>self.args=args<block_end><def_stmt>evaluate self preds golds section<block_start>summary={}<line_sep>references_s=[item["final_sentences"]<for>item golds]<assert_stmt>len(preds)<eq>len(references_s)<line_sep>metric_list=[]<if_stmt>section<in>['train' 'dev']<block_start>metric_list=['sacrebleu']<block_end><elif_stmt>section<eq>'test'<block_start>metric_list=["sacrebleu" "bleurt"]<block_end># TODO: add PARENT
<for_stmt>metric_name metric_list<block_start>metric=load_metric(metric_name)<line_sep>processed_preds,processed_golds=postprocess_text(preds references_s metric_name)<if_stmt>metric_name<eq>"sacrebleu"<block_start>res=metric.compute(predictions=processed_preds references=processed_golds)<line_sep>summary[metric_name]=res["score"]<times>0.01<block_end><elif_stmt>metric_name<eq>"bleurt"# We refer to the realization in https://github.com/google-research/language/blob/13fd14e1b285002412252097586f8fe405ba8a24/language/totto/totto_bleurt_eval.py#L94-L131
<block_start>multi_references=[[] [] []]<for_stmt>references processed_golds# here "references" mean references for one prediction string.
<block_start><if_stmt>len(references)<eq>2<block_start>multi_references[2].append('')<block_end><elif_stmt>len(references)<eq>3<block_start>multi_references[2].append(references[2])<block_end><else_stmt><block_start><raise>ValueError("The references num for each candidate should be 2 or 3 in ToTTo dataset.")<block_end>multi_references[0].append(references[0])<line_sep>multi_references[1].append(references[1])<block_end>multi_bleurt_scores=[]<for_stmt>references multi_references<block_start>multi_bleurt_scores.append(metric.compute(predictions=processed_preds references=references))<block_end><assert_stmt>len(multi_references)<eq>3<line_sep>avg_bleurt_scores=[]<for_stmt>i range(len(processed_preds))# All examples have atleast two references but some do not have three.
<block_start><assert_stmt>multi_references[0][i]<and>multi_references[1][i]<line_sep>r2=multi_references[2][i]<if_stmt>r2# Take average over 3 references.
<block_start>score_i=(multi_bleurt_scores[0][i]+multi_bleurt_scores[1][i]+multi_bleurt_scores[2][i])/3<block_end><else_stmt># print("only two refs")
# Take average over two references.
<block_start>score_i=(multi_bleurt_scores[0][i]+multi_bleurt_scores[1][i])/2<block_end>avg_bleurt_scores.append(score_i)<block_end>summary["bleurt"]=np.mean(avg_bleurt_scores)<block_end><else_stmt><block_start>res=metric.compute(predictions=processed_preds references=processed_golds)<line_sep>summary[metric_name]=res[metric_name]<block_end><block_end><return>summary<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>json<with_stmt>open("predictions_eval_3.179650238473768.json")<as>f<block_start>test_data=json.load(f)<block_end><with_stmt>open("dev_result.txt")<as>f<block_start>preds=[line.strip()<for>line f.readlines()]<block_end>evaluator=EvaluateTool(args=<none>)<line_sep>score=evaluator.evaluate(preds test_data section="test")<line_sep>print(score)<block_end>
|
<import_from_stmt>logging getLogger<import_from_stmt>src.configurations PlatformConfigurations<import_from_stmt>src.db cruds models schemas<import_from_stmt>src.db.database get_context_db<line_sep>logger=getLogger(__name__)<def_stmt>initialize_database engine checkfirst:bool=<true><block_start>models.create_tables(engine=engine checkfirst=checkfirst)<with_stmt>get_context_db()<as>db<block_start>sample_data=PlatformConfigurations.sample_data<line_sep>items=[schemas.ItemBase(values=values)<for>values sample_data]<line_sep>cruds.register_items(db=db items=items commit=<true>)<block_end><block_end>
|
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Type checking functionality"""<import_stmt>functools<import_stmt>inspect<import_from_stmt>typing Any Callable Dict List Optional Tuple Union<import_stmt>typing<def_stmt>_is_none_type type_:Any<arrow>bool<block_start><return>type_<is><none><or>type_<is>type(<none>)<block_end><if_stmt>hasattr(typing "_GenericAlias")<block_start><class_stmt>_Subtype<block_start>@staticmethod<def_stmt>_origin type_:Any<arrow>Any<block_start><if_stmt>isinstance(type_ typing._GenericAlias)# type: ignore # pylint: disable=protected-access
<block_start><return>type_.__origin__<block_end><return><none><block_end>@staticmethod<def_stmt>list_ type_:Any<arrow>Any<block_start><if_stmt>_Subtype._origin(type_)<is>list<block_start>(subtype )=type_.__args__<line_sep><return>[subtype]<block_end><return><none><block_end>@staticmethod<def_stmt>tuple_ type_:Any<arrow>Optional[List[type]]<block_start><if_stmt>_Subtype._origin(type_)<is>tuple<block_start>subtypes=type_.__args__<line_sep><return>subtypes<block_end><return><none><block_end>@staticmethod<def_stmt>optional type_:Any<arrow>Optional[List[type]]<block_start><if_stmt>_Subtype._origin(type_)<is>Union<block_start>subtypes=type_.__args__<if_stmt>len(subtypes)<eq>2<and>_is_none_type(subtypes[1])<block_start><return>[subtypes[0]]<block_end><block_end><return><none><block_end>@staticmethod<def_stmt>union type_:Any<arrow>Optional[List[type]]<block_start><if_stmt>_Subtype._origin(type_)<is>Union<block_start>subtypes=type_.__args__<if_stmt>len(subtypes)<ne>2<or><not>_is_none_type(subtypes[1])<block_start><return>list(subtypes)<block_end><block_end><return><none><block_end><block_end><block_end><elif_stmt>hasattr(typing "_Union")<block_start><class_stmt>_Subtype# type: ignore
<block_start>@staticmethod<def_stmt>list_ type_:Any<arrow>Optional[List[type]]<block_start><if_stmt>isinstance(type_ typing.GenericMeta)# type: ignore # pylint: disable=no-member
<block_start><if_stmt>type_.__name__<eq>"List"<block_start>(subtype )=type_.__args__# type: ignore # pylint: disable=no-member
<return>[subtype]<block_end><block_end><return><none><block_end>@staticmethod<def_stmt>tuple_ type_:Any<arrow>Optional[List[type]]<block_start><if_stmt>isinstance(type_ typing.GenericMeta)# type: ignore # pylint: disable=no-member
<block_start><if_stmt>type_.__name__<eq>"Tuple"<block_start>subtypes=type_.__args__# type: ignore # pylint: disable=no-member
<return>subtypes<block_end><block_end><return><none><block_end>@staticmethod<def_stmt>optional type_:Any<arrow>Optional[List[type]]<block_start><if_stmt>isinstance(type_ typing._Union)# type: ignore # pylint: disable=no-member,protected-access
<block_start>subtypes=type_.__args__<if_stmt>len(subtypes)<eq>2<and>_is_none_type(subtypes[1])<block_start><return>[subtypes[0]]<block_end><block_end><return><none><block_end>@staticmethod<def_stmt>union type_:Any<arrow>Optional[List[type]]<block_start><if_stmt>isinstance(type_ typing._Union)# type: ignore # pylint: disable=no-member,protected-access
<block_start>subtypes=type_.__args__<if_stmt>len(subtypes)<ne>2<or><not>_is_none_type(subtypes[1])<block_start><return>list(subtypes)<block_end><block_end><return><none><block_end><block_end><block_end><def_stmt>_dispatcher type_:Any<arrow>Tuple[str List[type]]<block_start><if_stmt>_is_none_type(type_)<block_start><return>"none" []<block_end>subtype=_Subtype.list_(type_)<if_stmt>subtype<is><not><none><block_start><return>"list" subtype<block_end>subtype=_Subtype.tuple_(type_)<if_stmt>subtype<is><not><none><block_start><return>"tuple" subtype<block_end>subtype=_Subtype.optional(type_)<if_stmt>subtype<is><not><none><block_start><return>"optional" subtype<block_end>subtype=_Subtype.union(type_)<if_stmt>subtype<is><not><none><block_start><return>"union" subtype<block_end><return>"atomic" [type_]<block_end>_TYPE2STR:Dict[Any Callable]={"none":<lambda>:"None" "atomic":<lambda>t:str(t.__name__) "list":<lambda>t:f"List[{_type2str(t)}]" "tuple":<lambda>*t:f"Tuple[{', '.join([_type2str(x)<for>x t])}]" "optional":<lambda>t:f"Optional[{_type2str(t)}]" "union":<lambda>*t:f"Union[{', '.join([_type2str(x)<for>x t])}]" }<def_stmt>_type2str type_:Any<arrow>str<block_start>key,subtypes=_dispatcher(type_)<line_sep><return>_TYPE2STR[key](*subtypes)<block_end><def_stmt>_val2type value:Any<block_start><if_stmt>isinstance(value list)<block_start>types=set(_val2type(x)<for>x value)<if_stmt>len(types)<eq>1<block_start><return>List[types.pop()]<block_end># type: ignore
<return>List[Union[tuple(types)]]<block_end># type: ignore
<if_stmt>isinstance(value tuple)<block_start>types=tuple(_val2type(x)<for>x value)# type: ignore
<return>Tuple[types]<block_end><return>type(value)<block_end><def_stmt>_type_check_err x:Any name:str expected:Any<arrow>str<block_start><return>(f'"{name}" has wrong type. '<concat>f'Expected "{_type2str(expected)}", '<concat>f'but gets: "{_type2str(_val2type(x))}"')<block_end><def_stmt>_type_check_vtable <arrow>Dict[str Callable]<block_start><def_stmt>_type_check_none v:Any name:str<arrow>Optional[str]<block_start><return><none><if>v<is><none><else>_type_check_err(v name <none>)<block_end><def_stmt>_type_check_atomic v:Any name:str type_:Any<arrow>Optional[str]<block_start><return><none><if>isinstance(v type_)<else>_type_check_err(v name type_)<block_end><def_stmt>_type_check_list v:List[Any] name:str type_:Any<arrow>Optional[str]<block_start><if_stmt><not>isinstance(v (list tuple))<block_start><return>_type_check_err(v name list)<block_end><for_stmt>i,x enumerate(v)<block_start>error_msg=_type_check(x f"{name}[{i}]" type_)<if_stmt>error_msg<is><not><none><block_start><return>error_msg<block_end><block_end><return><none><block_end><def_stmt>_type_check_tuple v:Any name:str *types:Any<arrow>Optional[str]<block_start><if_stmt><not>isinstance(v tuple)<block_start><return>_type_check_err(v name Tuple[types])<block_end><if_stmt>len(types)<ne>len(v)<block_start><return>_type_check_err(v name Tuple[types])<block_end><for_stmt>i,(x type_) enumerate(zip(v types))<block_start>error_msg=_type_check(x f"{name}[{i}]" type_)<if_stmt>error_msg<is><not><none><block_start><return>error_msg<block_end><block_end><return><none><block_end><def_stmt>_type_check_optional v:Any name:str type_:Any<arrow>Optional[str]<block_start><return><none><if>v<is><none><else>_type_check(v name type_)<block_end><def_stmt>_type_check_union v:Any name:str *types:Any<arrow>Optional[str]<block_start><for_stmt>type_ types<block_start>error_msg=_type_check(v name type_)<if_stmt>error_msg<is><none><block_start><return><none><block_end><block_end><return>_type_check_err(v name Union[types])<block_end><return>{"none":_type_check_none "atomic":_type_check_atomic "list":_type_check_list "tuple":_type_check_tuple "optional":_type_check_optional "union":_type_check_union }<block_end>_TYPE_CHECK:Dict[Any Callable]=_type_check_vtable()<def_stmt>_type_check v:Any name:str type_:Any<arrow>Optional[str]<block_start>key,subtypes=_dispatcher(type_)<line_sep><return>_TYPE_CHECK[key](v name *subtypes)<block_end><def_stmt>type_checked func:Callable<arrow>Callable<block_start>"""Type check the input arguments of a function."""<line_sep>sig=inspect.signature(func)<line_sep>@functools.wraps(func)<def_stmt>wrap *args **kwargs<block_start>bound_args=sig.bind(*args **kwargs)<line_sep>bound_args.apply_defaults()<for_stmt>param sig.parameters.values()<block_start><if_stmt>param.annotation<ne>inspect.Signature.empty<block_start>error_msg=_type_check(bound_args.arguments[param.name] param.name param.annotation )<if_stmt>error_msg<is><not><none><block_start>error_msg=f'In "{func.__qualname__}", {error_msg}'<line_sep><raise>TypeError(error_msg)<block_end><block_end><block_end><return>func(*args **kwargs)<block_end><return>wrap<block_end>
|
<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>itertools<line_sep>logger=logging.getLogger(__name__)<line_sep># #######################################
# ############ set_action ###############
# #######################################
<def_stmt>ctrl_set_action sim action<block_start>"""
For torque actuators it copies the action into mujoco ctrl field.
For position actuators it sets the target relative to the current qpos.
"""<if_stmt>sim.model.nmocap<g>0<block_start>_,action=np.split(action (sim.model.nmocap<times>7 ))<block_end><if_stmt>sim.data.ctrl<is><not><none><block_start><for_stmt>i range(action.shape[0])<block_start><if_stmt>sim.model.actuator_biastype[i]<eq>0<block_start>sim.data.ctrl[i]=action[i]<block_end><else_stmt><block_start>idx=sim.model.jnt_qposadr[sim.model.actuator_trnid[i 0]]<line_sep>sim.data.ctrl[i]=sim.data.qpos[idx]+action[i]<block_end><block_end><block_end><block_end># #######################################
# ############ get_reward ###############
# #######################################
<def_stmt>zero_get_reward sim<block_start><return>0.0<block_end><def_stmt>gps_dist sim obj0 obj1<block_start>obj0=sim.data.get_site_xpos(obj0)<line_sep>obj1=sim.data.get_site_xpos(obj1)<line_sep>diff=np.sum(np.square(obj0-obj1))<line_sep><return>diff+0.3<times>np.log(diff+1e-4)<block_end><def_stmt>l2_dist sim obj0 obj1<block_start>obj0=sim.data.get_site_xpos(obj0)<line_sep>obj1=sim.data.get_site_xpos(obj1)<line_sep><return>np.sqrt(np.mean(np.square(obj0-obj1)))<block_end># #######################################
# ########### get_diverged ##############
# #######################################
<def_stmt>false_get_diverged sim<block_start><return><false> 0.0<block_end><def_stmt>simple_get_diverged sim<block_start><if_stmt>sim.data.qpos<is><not><none><and>(np.max(np.abs(sim.data.qpos))<g>1000.0<or>np.max(np.abs(sim.data.qvel))<g>100.0)<block_start><return><true> -20.0<block_end><return><false> 0.0<block_end># #######################################
# ########### get_info ##############
# #######################################
<def_stmt>empty_get_info sim<block_start><return>{}<block_end># #######################################
# ############## get_obs ################
# #######################################
<def_stmt>flatten_get_obs sim<block_start><if_stmt>sim.data.qpos<is><none><block_start><return>np.zeros(0)<block_end><return>np.concatenate([sim.data.qpos sim.data.qvel])<block_end><def_stmt>image_get_obs sim<block_start><return>sim.render(100 100 camera_name="rgb")<block_end># Helpers
<def_stmt>get_body_geom_ids model body_name<block_start>""" Returns geom_ids in the body. """<line_sep>body_id=model.body_name2id(body_name)<line_sep>geom_ids=[]<for_stmt>geom_id range(model.ngeom)<block_start><if_stmt>model.geom_bodyid[geom_id]<eq>body_id<block_start>geom_ids.append(geom_id)<block_end><block_end><return>geom_ids<block_end><def_stmt>change_geom_alpha model body_name_prefix new_alpha<block_start>''' Changes the visual transparency (alpha) of an object'''<for_stmt>body_name model.body_names<block_start><if_stmt>body_name.startswith(body_name_prefix)<block_start><for_stmt>geom_id get_body_geom_ids(model body_name)<block_start>model.geom_rgba[geom_id 3]=new_alpha<block_end><block_end><block_end><block_end><def_stmt>joint_qpos_idxs sim joint_name<block_start>''' Gets indexes for the specified joint's qpos values'''<line_sep>addr=sim.model.get_joint_qpos_addr(joint_name)<if_stmt>isinstance(addr tuple)<block_start><return>list(range(addr[0] addr[1]))<block_end><else_stmt><block_start><return>[addr]<block_end><block_end><def_stmt>qpos_idxs_from_joint_prefix sim prefix<block_start>''' Gets indexes for the qpos values of all joints matching the prefix'''<line_sep>qpos_idxs_list=[joint_qpos_idxs(sim name)<for>name sim.model.joint_names<if>name.startswith(prefix)]<line_sep><return>list(itertools.chain.from_iterable(qpos_idxs_list))<block_end><def_stmt>joint_qvel_idxs sim joint_name<block_start>''' Gets indexes for the specified joint's qvel values'''<line_sep>addr=sim.model.get_joint_qvel_addr(joint_name)<if_stmt>isinstance(addr tuple)<block_start><return>list(range(addr[0] addr[1]))<block_end><else_stmt><block_start><return>[addr]<block_end><block_end><def_stmt>qvel_idxs_from_joint_prefix sim prefix<block_start>''' Gets indexes for the qvel values of all joints matching the prefix'''<line_sep>qvel_idxs_list=[joint_qvel_idxs(sim name)<for>name sim.model.joint_names<if>name.startswith(prefix)]<line_sep><return>list(itertools.chain.from_iterable(qvel_idxs_list))<block_end><def_stmt>body_names_from_joint_prefix sim prefix<block_start>''' Returns a list of body names that contain joints matching the given prefix'''<line_sep><return>[sim.model.body_id2name(sim.model.jnt_bodyid[sim.model.joint_name2id(name)])<for>name sim.model.joint_names<if>name.startswith(prefix)]<block_end>
|
<import_stmt>pytest<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy_utils.functions mock_engine render_expression render_statement <class_stmt>TestRender(object)<block_start>@pytest.fixture<def_stmt>User self Base<block_start><class_stmt>User(Base)<block_start>__tablename__='user'<line_sep>id=sa.Column(sa.Integer autoincrement=<true> primary_key=<true>)<line_sep>name=sa.Column(sa.Unicode(255))<block_end><return>User<block_end>@pytest.fixture<def_stmt>init_models self User<block_start><pass><block_end><def_stmt>test_render_orm_query self session User<block_start>query=session.query(User).filter_by(id=3)<line_sep>text=render_statement(query)<assert_stmt>'SELECT user.id, user.name'<in>text<assert_stmt>'FROM user'<in>text<assert_stmt>'WHERE user.id = 3'<in>text<block_end><def_stmt>test_render_statement self session User<block_start>statement=User.__table__.select().where(User.id<eq>3)<line_sep>text=render_statement(statement bind=session.bind)<assert_stmt>'SELECT user.id, user.name'<in>text<assert_stmt>'FROM user'<in>text<assert_stmt>'WHERE user.id = 3'<in>text<block_end><def_stmt>test_render_statement_without_mapper self session<block_start>statement=sa.select([sa.text('1')])<line_sep>text=render_statement(statement bind=session.bind)<assert_stmt>'SELECT 1'<in>text<block_end><def_stmt>test_render_ddl self engine User<block_start>expression='User.__table__.create(engine)'<line_sep>stream=render_expression(expression engine)<line_sep>text=stream.getvalue()<assert_stmt>'CREATE TABLE user'<in>text<assert_stmt>'PRIMARY KEY'<in>text<block_end><def_stmt>test_render_mock_ddl self engine User# TODO: mock_engine doesn't seem to work with locally scoped variables.
<block_start>self.engine=engine<with_stmt>mock_engine('self.engine')<as>stream<block_start>User.__table__.create(self.engine)<block_end>text=stream.getvalue()<assert_stmt>'CREATE TABLE user'<in>text<assert_stmt>'PRIMARY KEY'<in>text<block_end><block_end>
|
<import_stmt>torch<import_from_stmt>argparse ArgumentParser<line_sep>parser=ArgumentParser()<line_sep>parser.add_argument('--checkpoint_backbone' required=<true> type=str)<line_sep>parser.add_argument('--checkpoint_linear' required=<true> type=str)<line_sep>parser.add_argument('--output_file' required=<true> type=str)<if_stmt>__name__<eq>"__main__"<block_start>args=parser.parse_args()<line_sep>backbone=torch.load(args.checkpoint_backbone)['state_dict']<line_sep>model=torch.load(args.checkpoint_linear)<line_sep>linear=model['state_dict']<line_sep>head_index=model['best_acc_hidx']<line_sep>new_linear={}<for_stmt>key,val linear.items()<block_start>splits=key.split('.')<if_stmt>splits[0]<eq>str(head_index)<block_start>new_linear['.'.join(splits[2:])]=val<block_end><block_end>backbone.update(new_linear)<line_sep>model['state_dict']=backbone<line_sep>print(f"save {head_index}th head with acc {model['best_acc']}")<line_sep>torch.save(model args.output_file)<block_end>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.