content
stringlengths 0
1.55M
|
---|
# -*- coding:utf-8 -*-
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>logging<import_stmt>os<import_stmt>paddle.fluid<as>fluid<import_stmt>paddlehub<as>hub<import_from_stmt>paddlehub.common.logger logger<import_from_stmt>paddlehub.module.module moduleinfo serving<line_sep>@moduleinfo(name="jieba_paddle" version="1.0.0" summary="jieba_paddle is a chineses tokenizer using BiGRU base on the PaddlePaddle deeplearning framework. More information please refer to https://github.com/fxsjy/jieba." author="baidu-paddle" author_email="<EMAIL>" type="nlp/lexical_analysis")<class_stmt>JiebaPaddle(hub.Module)<block_start><def_stmt>_initialize self<block_start><pass><block_end>@serving<def_stmt>cut self sentence use_paddle=<true> cut_all=<false> HMM=<true><block_start>"""
The main function that segments an entire sentence that contains
Chinese characters into separated words.
Args:
sentence(str): The str(unicode) to be segmented.
use_paddle(bool): Whether use jieba paddle model or not. Default as true.
cut_all(bool): Model type. True for full pattern, False for accurate pattern.
HMM(bool): Whether to use the Hidden Markov Model.
Returns:
results(dict): The word segmentation result of the input sentence, whose key is 'word'.
"""<line_sep>self.check_dependency()<import_stmt>jieba<line_sep>jieba.setLogLevel(logging.ERROR)<line_sep>jieba._compat.setLogLevel(logging.ERROR)<if_stmt>use_paddle<block_start>jieba.enable_paddle()<line_sep>res=" ".join(jieba.cut(sentence use_paddle=<true>))<line_sep>seg_list=res.strip(" ").split(" ")<block_end><else_stmt><block_start>res=" ".join(jieba.cut(sentence cut_all=cut_all HMM=HMM))<line_sep>seg_list=res.strip(" ").split(" ")<block_end><return>seg_list<block_end><def_stmt>check_dependency self<block_start>"""
Check jieba tool dependency.
"""<try_stmt><block_start><import_stmt>jieba<block_end><except_stmt>ImportError<block_start>print('This module requires jieba tools. The running enviroment does not meet the requirments. Please install jieba packages.')<line_sep>exit()<block_end><block_end><def_stmt>cut_for_search self sentence HMM=<true><block_start>"""
Finer segmentation for search engines.
Args:
sentence(str): The str(unicode) to be segmented.
HMM(bool): Whether to use the Hidden Markov Model.
Returns:
results(dict): The word segmentation result of the input sentence, whose key is 'word'.
"""<line_sep>self.check_dependency()<import_stmt>jieba<line_sep>jieba.setLogLevel(logging.ERROR)<line_sep>res=" ".join(jieba.cut_for_search(sentence HMM=HMM))<line_sep>seg_list=res.strip(" ").split(" ")<line_sep><return>seg_list<block_end><def_stmt>load_userdict self user_dict<block_start>'''
Load personalized dict to improve detect rate.
Args:
user_dict(str): A plain text file path. It contains words and their ocurrences. Can be a file-like object, or the path of the dictionary file,
whose encoding must be utf-8.
Structure of dict file:
word1 freq1 word_type1
word2 freq2 word_type2
...
Word type may be ignored
'''<line_sep>self.check_dependency()<import_stmt>jieba<line_sep>jieba.setLogLevel(logging.ERROR)<line_sep>jieba.load_userdict("userdict.txt")<block_end><def_stmt>extract_tags self sentence topK=20 withWeight=<false> allowPOS=() withFlag=<false><block_start>"""
Extract keywords from sentence using TF-IDF algorithm.
Args:
topK(int): return how many top keywords. `None` for all possible words.
withWeight(bool): if True, return a list of (word, weight);
if False, return a list of words.
allowPOS(tuple): the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].
if the POS of w is not in this list,it will be filtered.
withFlag(bool): only work with allowPOS is not empty.
if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
Returns:
result(list): The key words.
"""<line_sep>self.check_dependency()<import_stmt>jieba<import_stmt>jieba.analyse<line_sep>jieba.setLogLevel(logging.ERROR)<line_sep>res=jieba.analyse.extract_tags(sentence topK=topK withWeight=withWeight allowPOS=allowPOS withFlag=withFlag)<line_sep><return>res<block_end><def_stmt>textrank self sentence topK=20 withWeight=<false> allowPOS=('ns' 'n' 'vn' 'v') withFlag=<false><block_start>"""
Extract keywords from sentence using TextRank algorithm.
Args:
topK(int): return how many top keywords. `None` for all possible words.
withWeight(bool): if True, return a list of (word, weight);
if False, return a list of words.
allowPOS(tuple): the allowed POS list eg. ['ns', 'n', 'vn', 'v','nr'].
if the POS of w is not in this list,it will be filtered.
withFlag(bool): only work with allowPOS is not empty.
if True, return a list of pair(word, weight) like posseg.cut
if False, return a list of words
Returns:
result(list): The key words.
"""<line_sep>self.check_dependency()<import_stmt>jieba<line_sep>jieba.setLogLevel(logging.ERROR)<line_sep>res=jieba.analyse.textrank(sentence topK=topK withWeight=withWeight allowPOS=allowPOS withFlag=withFlag)<line_sep><return>res<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>jb_pd=JiebaPaddle()<line_sep>res=jb_pd.cut(sentence="我来到北京清华大学" use_paddle=<true> )<line_sep>print(res)<line_sep>res=jb_pd.cut(sentence="我来到北京清华大学" use_paddle=<false> cut_all=<true>)<line_sep>print(res)<line_sep>res=jb_pd.cut(sentence="我来到北京清华大学" use_paddle=<false> cut_all=<false>)<line_sep>print(res)<line_sep>res=jb_pd.cut_for_search(sentence="我来到北京清华大学")<line_sep>print(res)<line_sep>res=jb_pd.extract_tags(sentence="我来到北京清华大学")<line_sep>print(res)<line_sep>res=jb_pd.extract_tags(sentence="我来到北京清华大学" withWeight=<true>)<line_sep>print(res)<line_sep>res=jb_pd.textrank(sentence="我来到北京清华大学" withWeight=<true>)<line_sep>print(res)<block_end> |
__author__='royrusso'<import_stmt>json<import_stmt>logging<import_stmt>jmespath<import_stmt>pytest<line_sep>LOGGER=logging.getLogger(__name__)<line_sep>pytest_plugins=["docker_compose"]<line_sep>@pytest.mark.es_versions<def_stmt>test_get_cluster_summary session_scoped_container_getter fixture<block_start>fixture.add_all_clusters(session_scoped_container_getter clear_first=<true>)<line_sep>response=fixture.app.get('/api/clusters/%s/_summary'%fixture.cluster_name)<assert_stmt>200<eq>response.status_code<line_sep>res=fixture.get_response_data(response)<assert_stmt>fixture.has_all_keys(fixture.config.KEYS_CLUSTER_SUMMARY res['data'][0].keys())<is><true><block_end>@pytest.mark.es_versions<def_stmt>test_get_cluster_health fixture<block_start>response=fixture.app.get('/api/clusters/%s/_health'%fixture.cluster_name)<assert_stmt>200<eq>response.status_code<line_sep>res=fixture.get_response_data(response)<assert_stmt>fixture.has_all_keys(fixture.config.KEYS_CLUSTER_HEALTH res['data'][0].keys())<is><true><block_end>@pytest.mark.es_versions<def_stmt>test_get_cluster_state fixture<block_start>response=fixture.app.get('/api/clusters/%s/_state'%fixture.cluster_name)<assert_stmt>200<eq>response.status_code<line_sep>res=fixture.get_response_data(response)<assert_stmt>fixture.has_all_keys(fixture.config.KEYS_CLUSTER_STATE res['data'][0].keys())<is><true><block_end>@pytest.mark.es_versions<def_stmt>test_get_cluster_stats fixture<block_start>response=fixture.app.get('/api/clusters/%s/_stats'%fixture.cluster_name)<assert_stmt>200<eq>response.status_code<line_sep>res=fixture.get_response_data(response)<assert_stmt>fixture.has_all_keys(fixture.config.KEYS_CLUSTER_STATS res['data'][0].keys())<is><true><block_end>@pytest.mark.es_versions<def_stmt>test_get_cluster_pending_tasks fixture<block_start>response=fixture.app.get('/api/clusters/%s/_pending_tasks'%fixture.cluster_name)<assert_stmt>200<eq>response.status_code<line_sep>res=fixture.get_response_data(response)<assert_stmt>fixture.has_all_keys(fixture.config.KEYS_CLUSTER_PENDING_TASKS res['data'][0].keys())<is><true><block_end>@pytest.mark.es_versions<def_stmt>test_get_cluster_settings fixture<block_start>response=fixture.app.get('/api/clusters/%s/_settings'%fixture.cluster_name)<assert_stmt>200<eq>response.status_code<line_sep>res=fixture.get_response_data(response)<assert_stmt>fixture.has_all_keys(fixture.config.KEYS_CLUSTER_SETTINGS res['data'][0].keys())<is><true><block_end>@pytest.mark.es_versions<def_stmt>test_put_cluster_settings fixture<block_start>body={"transient":{"discovery.zen.minimum_master_nodes":1}}<line_sep>response=fixture.app.put('/api/clusters/%s/_settings'%fixture.cluster_name data=json.dumps(body))<assert_stmt>200<eq>response.status_code<line_sep>response=fixture.app.get('/api/clusters/%s/_settings'%fixture.cluster_name)<assert_stmt>200<eq>response.status_code<line_sep>res=fixture.get_response_data(response)<assert_stmt>jmespath.search("transient.discovery.zen.minimum_master_nodes" res['data'][0])<eq>"1"<block_end> |
<import_stmt>numpy<as>np<import_stmt>pymc<as>pm<line_sep>challenger_data=np.genfromtxt("../../Chapter2_MorePyMC/data/challenger_data.csv" skip_header=1 usecols=[1 2] missing_values="NA" delimiter=",")<line_sep># drop the NA values
challenger_data=challenger_data[~np.isnan(challenger_data[: 1])]<line_sep>temperature=challenger_data[: 0]<line_sep>D=challenger_data[: 1]# defect or not?
beta=pm.Normal("beta" 0 0.001 value=0)<line_sep>alpha=pm.Normal("alpha" 0 0.001 value=0)<line_sep>@pm.deterministic<def_stmt>p temp=temperature alpha=alpha beta=beta<block_start><return>1.0/(1.+np.exp(beta<times>temperature+alpha))<block_end>observed=pm.Bernoulli("bernoulli_obs" p value=D observed=<true>)<line_sep>model=pm.Model([observed beta alpha])<line_sep># mysterious code to be explained in Chapter 3
map_=pm.MAP(model)<line_sep>map_.fit()<line_sep>mcmc=pm.MCMC(model)<line_sep>mcmc.sample(260000 220000 2)<line_sep> |
<import_stmt>unittest<import_stmt>uuid<import_stmt>datetime<import_from_stmt>boto.mturk.question ExternalQuestion<import_from_stmt>_init_environment SetHostMTurkConnection external_url config_environment<class_stmt>Test(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>config_environment()<block_end><def_stmt>test_create_hit_external self<block_start>q=ExternalQuestion(external_url=external_url frame_height=800)<line_sep>conn=SetHostMTurkConnection()<line_sep>keywords=['boto' 'test' 'doctest']<line_sep>create_hit_rs=conn.create_hit(question=q lifetime=60<times>65 max_assignments=2 title="Boto External Question Test" keywords=keywords reward=0.05 duration=60<times>6 approval_delay=60<times>60 annotation='An annotation from boto external question test' response_groups=['Minimal' 'HITDetail' 'HITQuestion' 'HITAssignmentSummary' ])<assert_stmt>(create_hit_rs.status<eq><true>)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end> |
<import_from_stmt>cryptography.fernet Fernet<import_stmt>os<def_stmt>write_key <block_start>"""
Generates a key and save it into a file
"""<line_sep>key=Fernet.generate_key()<with_stmt>open("key.key" "wb")<as>key_file<block_start>key_file.write(key)<block_end><block_end><def_stmt>load_key <block_start>"""
Loads the key from the current directory named `key.key`
"""<line_sep><return>open("key.key" "rb").read()<block_end><def_stmt>encrypt filename key<block_start>"""
Given a filename (str) and key (bytes), it encrypts the file and write it
"""<line_sep>f=Fernet(key)<with_stmt>open(filename "rb")<as>file# read all file data
<block_start>file_data=file.read()<block_end># encrypt data
encrypted_data=f.encrypt(file_data)<line_sep># write the encrypted file
<with_stmt>open(filename "wb")<as>file<block_start>file.write(encrypted_data)<block_end><block_end><def_stmt>decrypt filename key<block_start>"""
Given a filename (str) and key (bytes), it decrypts the file and write it
"""<line_sep>f=Fernet(key)<with_stmt>open(filename "rb")<as>file# read the encrypted data
<block_start>encrypted_data=file.read()<block_end># decrypt data
decrypted_data=f.decrypt(encrypted_data)<line_sep># write the original file
<with_stmt>open(filename "wb")<as>file<block_start>file.write(decrypted_data)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>argparse<line_sep>parser=argparse.ArgumentParser(description="Simple File Encryptor Script")<line_sep>parser.add_argument("file" help="File to encrypt/decrypt")<line_sep>parser.add_argument("-g" "--generate-key" dest="generate_key" action="store_true" help="Whether to generate a new key or use existing")<line_sep>parser.add_argument("-e" "--encrypt" action="store_true" help="Whether to encrypt the file, only -e or -d can be specified.")<line_sep>parser.add_argument("-d" "--decrypt" action="store_true" help="Whether to decrypt the file, only -e or -d can be specified.")<line_sep>args=parser.parse_args()<line_sep>file=args.file<line_sep>generate_key=args.generate_key<if_stmt>generate_key<block_start>write_key()<block_end># load the key
key=load_key()<line_sep>encrypt_=args.encrypt<line_sep>decrypt_=args.decrypt<if_stmt>encrypt_<and>decrypt_<block_start><raise>TypeError("Please specify whether you want to encrypt the file or decrypt it.")<block_end><elif_stmt>encrypt_<block_start>encrypt(file key)<block_end><elif_stmt>decrypt_<block_start>decrypt(file key)<block_end><else_stmt><block_start><raise>TypeError("Please specify whether you want to encrypt the file or decrypt it.")<block_end><block_end> |
<import_stmt>unittest<import_from_stmt>os.path join<import_from_stmt>unittest mock<import_from_stmt>pythonforandroid.recipes.python3 NDK_API_LOWER_THAN_SUPPORTED_MESSAGE <import_from_stmt>pythonforandroid.util BuildInterruptingException<import_from_stmt>tests.recipes.recipe_lib_test RecipeCtx<class_stmt>TestPython3Recipe(RecipeCtx unittest.TestCase)<block_start>"""
TestCase for recipe :mod:`~pythonforandroid.recipes.python3`
"""<line_sep>recipe_name="python3"<def_stmt>test_property__libpython self<block_start>self.assertEqual(self.recipe._libpython f'libpython{self.recipe.link_version}.so')<block_end>@mock.patch('pythonforandroid.recipes.python3.Path.is_file')<def_stmt>test_should_build self mock_is_file# in case that python lib exists, we shouldn't trigger the build
<block_start>self.assertFalse(self.recipe.should_build(self.arch))<line_sep># in case that python lib doesn't exist, we should trigger the build
mock_is_file.return_value=<false><line_sep>self.assertTrue(self.recipe.should_build(self.arch))<block_end><def_stmt>test_include_root self<block_start>expected_include_dir=join(self.recipe.get_build_dir(self.arch.arch) 'Include' )<line_sep>self.assertEqual(expected_include_dir self.recipe.include_root(self.arch.arch))<block_end><def_stmt>test_link_root self<block_start>expected_link_root=join(self.recipe.get_build_dir(self.arch.arch) 'android-build' )<line_sep>self.assertEqual(expected_link_root self.recipe.link_root(self.arch.arch))<block_end>@mock.patch("pythonforandroid.recipes.python3.subprocess.call")<def_stmt>test_compile_python_files self mock_subprocess<block_start>fake_compile_dir='/fake/compile/dir'<line_sep>hostpy=self.recipe.ctx.hostpython='/fake/hostpython3'<line_sep>self.recipe.compile_python_files(fake_compile_dir)<line_sep>mock_subprocess.assert_called_once_with([hostpy '-OO' '-m' 'compileall' '-b' '-f' fake_compile_dir] )<block_end>@mock.patch("pythonforandroid.recipe.Recipe.check_recipe_choices")@mock.patch("pythonforandroid.archs.glob")<def_stmt>test_get_recipe_env self mock_glob mock_check_recipe_choices <block_start>"""
Test that method
:meth:`~pythonforandroid.recipes.python3.Python3Recipe.get_recipe_env`
returns the expected flags
"""<line_sep>mock_glob.return_value=["llvm"]<line_sep>mock_check_recipe_choices.return_value=sorted(self.ctx.recipe_build_order)<line_sep>env=self.recipe.get_recipe_env(self.arch)<line_sep>self.assertIn(f'-fPIC -DANDROID -D__ANDROID_API__={self.ctx.ndk_api}' env["CFLAGS"])<line_sep>self.assertEqual(env["CC"] self.arch.get_clang_exe(with_target=<true>))<line_sep># make sure that the mocked methods are actually called
mock_glob.assert_called()<line_sep>mock_check_recipe_choices.assert_called()<block_end><def_stmt>test_set_libs_flags self# todo: properly check `Python3Recipe.set_lib_flags`
<block_start><pass><block_end># These decorators are to mock calls to `get_recipe_env`
# and `set_libs_flags`, since these calls are tested separately
@mock.patch("pythonforandroid.util.chdir")@mock.patch("pythonforandroid.util.makedirs")@mock.patch("pythonforandroid.archs.glob")<def_stmt>test_build_arch self mock_glob mock_makedirs mock_chdir <block_start>mock_glob.return_value=["llvm"]<line_sep># specific `build_arch` mocks
<with_stmt>mock.patch("builtins.open" mock.mock_open(read_data="#define ZLIB_VERSION 1.1\nfoo"))<as>mock_open_zlib mock.patch("pythonforandroid.recipes.python3.sh.Command")<as>mock_sh_command mock.patch("pythonforandroid.recipes.python3.sh.make")<as>mock_make mock.patch("pythonforandroid.recipes.python3.sh.cp")<as>mock_cp<block_start>self.recipe.build_arch(self.arch)<block_end># make sure that the mocked methods are actually called
recipe_build_dir=self.recipe.get_build_dir(self.arch.arch)<line_sep>sh_command_calls={f"{recipe_build_dir}/config.guess" f"{recipe_build_dir}/configure" }<for_stmt>command sh_command_calls<block_start>self.assertIn(mock.call(command) mock_sh_command.mock_calls )<block_end>mock_open_zlib.assert_called()<line_sep>self.assertEqual(mock_make.call_count 1)<for_stmt>make_call,kw mock_make.call_args_list<block_start>self.assertIn(f'INSTSONAME={self.recipe._libpython}' make_call)<block_end>mock_cp.assert_called_with("pyconfig.h" join(recipe_build_dir 'Include') )<line_sep>mock_makedirs.assert_called()<line_sep>mock_chdir.assert_called()<block_end><def_stmt>test_build_arch_wrong_ndk_api self# we check ndk_api using recipe's ctx
<block_start>self.recipe.ctx.ndk_api=20<with_stmt>self.assertRaises(BuildInterruptingException)<as>e<block_start>self.recipe.build_arch(self.arch)<block_end>self.assertEqual(e.exception.args[0] NDK_API_LOWER_THAN_SUPPORTED_MESSAGE.format(ndk_api=self.recipe.ctx.ndk_api min_ndk_api=self.recipe.MIN_NDK_API ) )<line_sep># restore recipe's ctx or we could get failures with other test,
# since we share `self.recipe with all the tests of the class
self.recipe.ctx.ndk_api=self.ctx.ndk_api<block_end>@mock.patch('shutil.copystat')@mock.patch('shutil.copyfile')@mock.patch("pythonforandroid.util.chdir")@mock.patch("pythonforandroid.util.makedirs")@mock.patch("pythonforandroid.util.walk")@mock.patch("pythonforandroid.recipes.python3.sh.find")@mock.patch("pythonforandroid.recipes.python3.sh.cp")@mock.patch("pythonforandroid.recipes.python3.sh.zip")@mock.patch("pythonforandroid.recipes.python3.subprocess.call")<def_stmt>test_create_python_bundle self mock_subprocess mock_sh_zip mock_sh_cp mock_sh_find mock_walk mock_makedirs mock_chdir mock_copyfile mock_copystat <block_start>fake_compile_dir='/fake/compile/dir'<line_sep>simulated_walk_result=[["/fake_dir" ["__pycache__" "Lib"] ["README" "setup.py"]] ["/fake_dir/Lib" ["ctypes"] ["abc.pyc" "abc.py"]] ["/fake_dir/Lib/ctypes" [] ["util.pyc" "util.py"]] ]<line_sep>mock_walk.return_value=simulated_walk_result<line_sep>self.recipe.create_python_bundle(fake_compile_dir self.arch)<line_sep>recipe_build_dir=self.recipe.get_build_dir(self.arch.arch)<line_sep>modules_build_dir=join(recipe_build_dir 'android-build' 'build' 'lib.linux{}-{}-{}'.format('2'<if>self.recipe.version[0]<eq>'2'<else>'' self.arch.command_prefix.split('-')[0] self.recipe.major_minor_version_string))<line_sep>expected_sp_paths=[modules_build_dir join(recipe_build_dir 'Lib') self.ctx.get_python_install_dir(self.arch.arch) ]<for_stmt>n,(sp_call kw) enumerate(mock_subprocess.call_args_list)<block_start>self.assertEqual(sp_call[0][-1] expected_sp_paths[n])<block_end># we expect two calls to `walk_valid_filens`
self.assertEqual(len(mock_walk.call_args_list) 2)<line_sep>mock_sh_zip.assert_called()<line_sep>mock_sh_cp.assert_called()<line_sep>mock_sh_find.assert_called()<line_sep>mock_makedirs.assert_called()<line_sep>mock_chdir.assert_called()<line_sep>mock_copyfile.assert_called()<line_sep>mock_copystat.assert_called()<block_end><block_end> |
"""
============================
Data set of recurrence plots
============================
A recurrence plot is an image obtained from a time series, representing the
pairwise Euclidean distances for each value (and more generally for each
trajectory) in the time series.
The image can be binarized using a threshold.
It is implemented as :class:`pyts.image.RecurrencePlot`.
In this example, we consider the training samples of the
`GunPoint dataset <http://timeseriesclassification.com/description.php?Dataset=GunPoint>`_,
consisting of 50 univariate time series of length 150.
The recurrence plot of each time series is independently computed and the
50 recurrence plots are plotted.
"""<line_sep># noqa:E501
# Author: <NAME> <<EMAIL>>
# License: BSD-3-Clause
<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>mpl_toolkits.axes_grid1 ImageGrid<import_from_stmt>pyts.image RecurrencePlot<import_from_stmt>pyts.datasets load_gunpoint<line_sep># Load the GunPoint dataset
X,_,_,_=load_gunpoint(return_X_y=<true>)<line_sep># Get the recurrence plots for all the time series
rp=RecurrencePlot(threshold='point' percentage=20)<line_sep>X_rp=rp.fit_transform(X)<line_sep># Plot the 50 recurrence plots
fig=plt.figure(figsize=(10 5))<line_sep>grid=ImageGrid(fig 111 nrows_ncols=(5 10) axes_pad=0.1 share_all=<true>)<for_stmt>i,ax enumerate(grid)<block_start>ax.imshow(X_rp[i] cmap='binary' origin='lower')<block_end>grid[0].get_yaxis().set_ticks([])<line_sep>grid[0].get_xaxis().set_ticks([])<line_sep>fig.suptitle("Recurrence plots for the 50 time series in the 'GunPoint' dataset" y=0.92)<line_sep>plt.show()<line_sep> |
r"""
Diametrically point loaded 2-D disk with nodal stress calculation. See
:ref:`sec-primer`.
Find :math:`\ul{u}` such that:
.. math::
\int_{\Omega} D_{ijkl}\ e_{ij}(\ul{v}) e_{kl}(\ul{u})
= 0
\;, \quad \forall \ul{v} \;,
where
.. math::
D_{ijkl} = \mu (\delta_{ik} \delta_{jl}+\delta_{il} \delta_{jk}) +
\lambda \ \delta_{ij} \delta_{kl}
\;.
"""<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_from_stmt>examples.linear_elasticity.its2D_1 *<import_from_stmt>sfepy.mechanics.matcoefs stiffness_from_youngpoisson<import_from_stmt>sfepy.discrete.fem.geometry_element geometry_data<import_from_stmt>sfepy.discrete FieldVariable<import_from_stmt>sfepy.discrete.fem Field<import_stmt>numpy<as>nm<line_sep>gdata=geometry_data['2_3']<line_sep>nc=len(gdata.coors)<def_stmt>nodal_stress out pb state extend=<false> integrals=<none><block_start>'''
Calculate stresses at nodal points.
'''<line_sep># Point load.
mat=pb.get_materials()['Load']<line_sep>P=2.0<times>mat.get_data('special' 'val')[1]<line_sep># Calculate nodal stress.
pb.time_update()<if_stmt>integrals<is><none><block_start>integrals=pb.get_integrals()<block_end>stress=pb.evaluate('ev_cauchy_stress.ivn.Omega(Asphalt.D, u)' mode='qp' integrals=integrals copy_materials=<false>)<line_sep>sfield=Field.from_args('stress' nm.float64 (3 ) pb.domain.regions['Omega'])<line_sep>svar=FieldVariable('sigma' 'parameter' sfield primary_var_name='(set-to-None)')<line_sep>svar.set_from_qp(stress integrals['ivn'])<line_sep>print('\n==================================================================')<line_sep>print('Given load = %.2f N'%-P)<line_sep>print('\nAnalytical solution')<line_sep>print('===================')<line_sep>print('Horizontal tensile stress = %.5e MPa/mm'%(-2.<times>P/(nm.pi<times>150.)))<line_sep>print('Vertical compressive stress = %.5e MPa/mm'%(-6.<times>P/(nm.pi<times>150.)))<line_sep>print('\nFEM solution')<line_sep>print('============')<line_sep>print('Horizontal tensile stress = %.5e MPa/mm'%(svar()[0]))<line_sep>print('Vertical compressive stress = %.5e MPa/mm'%(-svar()[1]))<line_sep>print('==================================================================')<line_sep><return>out<block_end>asphalt=materials['Asphalt'][0]<line_sep>asphalt.update({'D':stiffness_from_youngpoisson(2 young poisson)})<line_sep>options.update({'post_process_hook':'nodal_stress' })<line_sep>integrals={'ivn':('custom' gdata.coors [gdata.volume/nc]<times>nc) }<line_sep> |
"""Caching utilities."""<line_sep> |
<import_stmt>copy<import_stmt>logging<import_stmt>typing<import_from_stmt>typing Dict<import_from_stmt>bs4 BeautifulSoup<import_from_stmt>mitmproxy.http HTTPFlow<import_from_stmt>examples.contrib.webscanner_helper.urldict URLDict<line_sep>NO_CONTENT=object()<class_stmt>MappingAddonConfig<block_start>HTML_PARSER="html.parser"<block_end><class_stmt>MappingAddon<block_start>""" The mapping add-on can be used in combination with web application scanners to reduce their false positives.
Many web application scanners produce false positives caused by dynamically changing content of web applications
such as the current time or current measurements. When testing for injection vulnerabilities, web application
scanners are tricked into thinking they changed the content with the injected payload. In realty, the content of
the web application changed notwithstanding the scanner's input. When the mapping add-on is used to map the content
to a fixed value, these false positives can be avoided.
"""<line_sep>OPT_MAPPING_FILE="mapping_file"<line_sep>"""File where urls and css selector to mapped content is stored.
Elements will be replaced with the content given in this file. If the content is none it will be set to the first
seen value.
Example:
{
"http://10.10.10.10": {
"body": "My Text"
},
"URL": {
"css selector": "Replace with this"
}
}
"""<line_sep>OPT_MAP_PERSISTENT="map_persistent"<line_sep>"""Whether to store all new content in the configuration file."""<def_stmt>__init__ self filename:str persistent:bool=<false><arrow><none><block_start>""" Initializes the mapping add-on
Args:
filename: str that provides the name of the file in which the urls and css selectors to mapped content is
stored.
persistent: bool that indicates whether to store all new content in the configuration file.
Example:
The file in which the mapping config is given should be in the following format:
{
"http://10.10.10.10": {
"body": "My Text"
},
"<URL>": {
"<css selector>": "Replace with this"
}
}
"""<line_sep>self.filename=filename<line_sep>self.persistent=persistent<line_sep>self.logger=logging.getLogger(self.__class__.__name__)<with_stmt>open(filename)<as>f<block_start>self.mapping_templates=URLDict.load(f)<block_end><block_end><def_stmt>load self loader<block_start>loader.add_option(self.OPT_MAPPING_FILE str "" "File where replacement configuration is stored.")<line_sep>loader.add_option(self.OPT_MAP_PERSISTENT bool <false> "Whether to store all new content in the configuration file.")<block_end><def_stmt>configure self updated<block_start><if_stmt>self.OPT_MAPPING_FILE<in>updated<block_start>self.filename=updated[self.OPT_MAPPING_FILE]<with_stmt>open(self.filename)<as>f<block_start>self.mapping_templates=URLDict.load(f)<block_end><block_end><if_stmt>self.OPT_MAP_PERSISTENT<in>updated<block_start>self.persistent=updated[self.OPT_MAP_PERSISTENT]<block_end><block_end><def_stmt>replace self soup:BeautifulSoup css_sel:str replace:BeautifulSoup<arrow><none><block_start>"""Replaces the content of soup that matches the css selector with the given replace content."""<for_stmt>content soup.select(css_sel)<block_start>self.logger.debug(f"replace \"{content}\" with \"{replace}\"")<line_sep>content.replace_with(copy.copy(replace))<block_end><block_end><def_stmt>apply_template self soup:BeautifulSoup template:Dict[str typing.Union[BeautifulSoup]]<arrow><none><block_start>"""Applies the given mapping template to the given soup."""<for_stmt>css_sel,replace template.items()<block_start>mapped=soup.select(css_sel)<if_stmt><not>mapped<block_start>self.logger.warning(f"Could not find \"{css_sel}\", can not freeze anything.")<block_end><else_stmt><block_start>self.replace(soup css_sel BeautifulSoup(replace features=MappingAddonConfig.HTML_PARSER))<block_end><block_end><block_end><def_stmt>response self flow:HTTPFlow<arrow><none><block_start>"""If a response is received, check if we should replace some content. """<try_stmt><block_start>templates=self.mapping_templates[flow]<line_sep>res=flow.response<if_stmt>res<is><not><none><block_start>encoding=res.headers.get("content-encoding" "utf-8")<line_sep>content_type=res.headers.get("content-type" "text/html")<if_stmt>"text/html"<in>content_type<and>encoding<eq>"utf-8"<block_start>content=BeautifulSoup(res.content MappingAddonConfig.HTML_PARSER)<for_stmt>template templates<block_start>self.apply_template(content template)<block_end>res.content=content.encode(encoding)<block_end><else_stmt><block_start>self.logger.warning(f"Unsupported content type '{content_type}' or content encoding '{encoding}'")<block_end><block_end><block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><def_stmt>done self<arrow><none><block_start>"""Dumps all new content into the configuration file if self.persistent is set."""<if_stmt>self.persistent# make sure that all items are strings and not soups.
<block_start><def_stmt>value_dumper value<block_start>store={}<if_stmt>value<is><none><block_start><return>"None"<block_end><try_stmt><block_start><for_stmt>css_sel,soup value.items()<block_start>store[css_sel]=str(soup)<block_end><block_end><except_stmt><block_start><raise>RuntimeError(value)<block_end><return>store<block_end><with_stmt>open(self.filename "w")<as>f<block_start>self.mapping_templates.dump(f value_dumper)<block_end><block_end><block_end><block_end> |
"""
Copyright (c) 2022 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""<import_from_stmt>typing Dict Any<import_stmt>json<import_stmt>tensorflow<as>tf<import_from_stmt>nncf.common.compression BaseCompressionAlgorithmController<class_stmt>TFCompressionState(tf.train.experimental.PythonState)<block_start>"""
A wrapper for `BaseCompressionAlgorithmController` that allows saving
the compression state to the checkpoint.
"""<def_stmt>__init__ self controller:BaseCompressionAlgorithmController<block_start>"""
Initializes the wrapper for the controller.
:param controller: The controller which gives the compressions state.
"""<line_sep>self._ctrl=controller<block_end><def_stmt>serialize self<arrow>str<block_start>"""
Callback to serialize the compression state.
:return: A serialized compression state.
"""<line_sep>compression_state=self._ctrl.get_compression_state()<line_sep><return>json.dumps(compression_state)<block_end><def_stmt>deserialize self string_value:str<arrow><none><block_start>"""
Callback to deserialize the compression state.
:param string_value: A serialized compression state.
"""<line_sep>compression_state=json.loads(string_value)<line_sep>ctrl_state=compression_state[BaseCompressionAlgorithmController.CONTROLLER_STATE]<line_sep>self._ctrl.load_state(ctrl_state)<block_end><block_end><class_stmt>TFCompressionStateLoader(tf.train.experimental.PythonState)<block_start>"""
This is a class that allows extracting of the compression state from a checkpoint.
The extracted compression state is not applied.
"""<def_stmt>__init__ self<block_start>"""
Initializes the compression state loader.
"""<line_sep>self._state=<none><block_end>@property<def_stmt>state self<arrow>Dict[str Any]<block_start>"""
Returns the compression state which was extracted from the checkpoint.
:return: The compression state.
"""<line_sep><return>self._state<block_end><def_stmt>serialize self<arrow>str<block_start><raise>NotImplementedError('Use an instance of the `TFCompressionState` class to '<concat>'serialize the compression state.')<block_end><def_stmt>deserialize self string_value:str<arrow><none><block_start>"""
Callback to deserialize the compression state.
:param string_value: A serialized compression state.
"""<line_sep>self._state=json.loads(string_value)<block_end><block_end> |
<import_stmt>os<import_from_stmt>mushroom_rl.utils.preprocessors MinMaxPreprocessor<import_from_stmt>mushroom_rl.utils.callbacks PlotDataset<import_stmt>numpy<as>np<import_from_stmt>mushroom_rl.algorithms.policy_search REINFORCE<import_from_stmt>mushroom_rl.approximators.parametric LinearApproximator<import_from_stmt>mushroom_rl.approximators.regressor Regressor<import_from_stmt>mushroom_rl.core Core Logger<import_from_stmt>mushroom_rl.environments LQR<import_from_stmt>mushroom_rl.policy StateStdGaussianPolicy<import_from_stmt>mushroom_rl.utils.dataset compute_J<import_from_stmt>mushroom_rl.utils.optimizers AdaptiveOptimizer<import_from_stmt>tqdm tqdm<line_sep>"""
This script shows how to use preprocessors and plot callback.
"""<line_sep>tqdm.monitor_interval=0<def_stmt>experiment n_epochs n_iterations ep_per_run save_states_to_disk<block_start>np.random.seed()<line_sep>logger=Logger('plot_and_norm_example' results_dir=<none>)<line_sep>logger.strong_line()<line_sep>logger.info('Plotting and normalization example')<line_sep># MDP
mdp=LQR.generate(dimensions=2 max_pos=10. max_action=5. episodic=<true>)<line_sep>approximator=Regressor(LinearApproximator input_shape=mdp.info.observation_space.shape output_shape=mdp.info.action_space.shape)<line_sep>sigma=Regressor(LinearApproximator input_shape=mdp.info.observation_space.shape output_shape=mdp.info.action_space.shape)<line_sep>sigma_weights=2<times>np.ones(sigma.weights_size)<line_sep>sigma.set_weights(sigma_weights)<line_sep>policy=StateStdGaussianPolicy(approximator sigma)<line_sep># Agent
optimizer=AdaptiveOptimizer(eps=.01)<line_sep>algorithm_params=dict(optimizer=optimizer)<line_sep>agent=REINFORCE(mdp.info policy **algorithm_params)<line_sep># normalization callback
prepro=MinMaxPreprocessor(mdp_info=mdp.info)<line_sep># plotting callback
plotter=PlotDataset(mdp.info obs_normalized=<true>)<line_sep># Train
core=Core(agent mdp callback_step=plotter preprocessors=[prepro])<line_sep># training loop
<for_stmt>n range(n_epochs)<block_start>core.learn(n_episodes=n_iterations<times>ep_per_run n_episodes_per_fit=ep_per_run)<line_sep>dataset=core.evaluate(n_episodes=ep_per_run render=<false>)<line_sep>J=np.mean(compute_J(dataset mdp.info.gamma))<line_sep>logger.epoch_info(n+1 J=J)<block_end><if_stmt>save_states_to_disk# save normalization / plot states to disk path
<block_start>logger.info('Saving plotting and normalization data')<line_sep>os.makedirs("./logs/plot_and_norm" exist_ok=<true>)<line_sep>prepro.save("./logs/plot_and_norm/preprocessor.msh")<line_sep>plotter.save_state("./logs/plot_and_norm/plotting_state")<line_sep># load states from disk path
logger.info('Loading preprocessor and plotter')<line_sep>prerpo=MinMaxPreprocessor.load("./logs/plot_and_norm/preprocessor.msh")<line_sep>plotter.load_state("./logs/plot_and_norm/plotting_state")<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>experiment(n_epochs=10 n_iterations=10 ep_per_run=100 save_states_to_disk=<false>)<block_end> |
"""
Authors: <NAME>.
Copyright:
Copyright (c) 2021 Microsoft Research
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>sys<import_stmt>os<line_sep># Athos DIR
sys.path.append(os.path.join(os.path.dirname(__file__) ".." ".." ".."))<import_from_stmt>tests.utils Config Compiler assert_almost_equal<line_sep>@pytest.mark.parametrize("a_shape, out_shape" [([2 3] [6]) ([6] [2 3]) ([2 3] [3 2]) ([2 3] [-1]) # Flatten 1-D,
([1] []) # convert to scalar,
([3 2 3] [2 -1]) # infer -1 as 9,
([3 2 3] [-1 9]) # infer -1 as 2
] )@pytest.mark.parametrize("dtype" [np.single])<def_stmt>test_reshape test_dir backend a_shape out_shape dtype<block_start>graph=tf.Graph()<line_sep>a_inp=dtype(np.random.randn(*a_shape))<with_stmt>graph.as_default()<block_start>a=tf.compat.v1.placeholder(tf.as_dtype(dtype) shape=a_inp.shape name="a")<line_sep>output=tf.reshape(a out_shape name="output")<block_end><with_stmt>tf.compat.v1.Session(graph=graph)<as>sess<block_start>expected_output=sess.run(output feed_dict={a:a_inp})<block_end><assert_stmt>expected_output<is><not><none><line_sep>config=TFConfig(backend).add_input(a).add_output(output)<line_sep>compiler=Compiler(graph config test_dir)<line_sep>mpc_output=compiler.compile_and_run([a_inp])<line_sep>assert_almost_equal(model_output=expected_output mpc_tensor=mpc_output precision=2)<line_sep><return><block_end>@pytest.mark.parametrize("a_shape, perm" [([2 3] [1 0]) ([2 4 3] [0 2 1])] # normal transpose, with perm
)@pytest.mark.parametrize("dtype" [np.single])<def_stmt>test_transpose test_dir backend a_shape perm dtype<block_start>graph=tf.Graph()<line_sep>a_inp=dtype(np.random.randn(*a_shape))<with_stmt>graph.as_default()<block_start>a=tf.compat.v1.placeholder(tf.as_dtype(dtype) shape=a_inp.shape name="a")<line_sep>output=tf.transpose(a perm name="output")<block_end><with_stmt>tf.compat.v1.Session(graph=graph)<as>sess<block_start>expected_output=sess.run(output feed_dict={a:a_inp})<block_end>config=TFConfig(backend).add_input(a).add_output(output)<line_sep>compiler=Compiler(graph config test_dir)<line_sep>mpc_output=compiler.compile_and_run([a_inp])<line_sep>assert_almost_equal(model_output=expected_output mpc_tensor=mpc_output precision=2)<line_sep><return><block_end>@pytest.mark.parametrize("a_shape, num_or_size_splits, axis" [([2 10] 5 1) pytest.param([5 7] [1 4 2] 1 marks=pytest.mark.skip(reason="[split] don't support split into specific sizes (SplitV)") ) ] )@pytest.mark.parametrize("dtype" [np.single])<def_stmt>test_split test_dir backend a_shape num_or_size_splits axis dtype<block_start>graph=tf.Graph()<line_sep>a_inp=dtype(np.random.randn(*a_shape))<with_stmt>graph.as_default()<block_start>a=tf.compat.v1.placeholder(tf.as_dtype(dtype) shape=a_inp.shape name="a")<line_sep>output=tf.split(a num_or_size_splits axis name="output")<block_end><with_stmt>tf.compat.v1.Session(graph=graph)<as>sess<block_start>expected_output=sess.run(output feed_dict={a:a_inp})<block_end><if_stmt>type(output)<eq>list<block_start>tf_output=output[-1]<line_sep>tf_expected_output=expected_output[-1]<block_end><else_stmt><block_start>tf_output=output<line_sep>tf_expected_output=expected_output<block_end>config=TFConfig(backend).add_input(a).add_output(tf_output)<line_sep>compiler=Compiler(graph config test_dir)<line_sep>mpc_output=compiler.compile_and_run([a_inp])<line_sep>assert_almost_equal(model_output=tf_expected_output mpc_tensor=mpc_output precision=2)<line_sep><return><block_end># Squeeze
@pytest.mark.parametrize("a_shape, axis" [pytest.param([1 2 1 3 1 1] <none> marks=pytest.mark.skip(reason="[squeeze] Parametric squeeze not supported") ) pytest.param([1 2 1 3 1 1] [2 4] marks=pytest.mark.skip(reason="[squeeze] Parametric squeeze not supported") ) ] )@pytest.mark.parametrize("dtype" [np.single])<def_stmt>test_squeeze test_dir backend a_shape axis dtype<block_start>graph=tf.Graph()<line_sep>a_inp=dtype(np.random.randn(*a_shape))<with_stmt>graph.as_default()<block_start>a=tf.compat.v1.placeholder(tf.as_dtype(dtype) shape=a_inp.shape name="a")<line_sep>output=tf.squeeze(a axis=axis name="output")<block_end><with_stmt>tf.compat.v1.Session(graph=graph)<as>sess<block_start>expected_output=sess.run(output feed_dict={a:a_inp})<block_end>config=TFConfig(backend).add_input(a).add_output(output)<line_sep>compiler=Compiler(graph config test_dir)<line_sep>mpc_output=compiler.compile_and_run([a_inp])<line_sep>assert_almost_equal(model_output=expected_output mpc_tensor=mpc_output precision=2)<line_sep><return><block_end>@pytest.mark.parametrize("a_shape, begin, size" [([3 2 3] [1 0 0] [1 1 3]) ([3 2 3] [1 0 0] [1 2 3]) ([3 2 3] [1 0 0] [2 1 3]) ] )@pytest.mark.parametrize("dtype" [np.single])<def_stmt>test_slice test_dir backend a_shape begin size dtype<block_start>graph=tf.Graph()<line_sep>a_inp=dtype(np.random.randn(*a_shape))<with_stmt>graph.as_default()<block_start>a=tf.compat.v1.placeholder(tf.as_dtype(dtype) shape=a_inp.shape name="a")<line_sep>output=tf.slice(a begin size name="output")<block_end><with_stmt>tf.compat.v1.Session(graph=graph)<as>sess<block_start>expected_output=sess.run(output feed_dict={a:a_inp})<block_end>config=TFConfig(backend).add_input(a).add_output(output)<line_sep>compiler=Compiler(graph config test_dir)<line_sep>mpc_output=compiler.compile_and_run([a_inp])<line_sep>assert_almost_equal(model_output=expected_output mpc_tensor=mpc_output precision=2)<line_sep><return><block_end>@pytest.mark.parametrize("a_shape, b_shape, axis" [([2 3] [3 3] 0) ([2 3 2 1] [2 6 2 1] 1) ] )@pytest.mark.parametrize("dtype" [np.single])<def_stmt>test_concat test_dir backend a_shape b_shape axis dtype<block_start>graph=tf.Graph()<line_sep>a_inp=dtype(np.random.randn(*a_shape))<line_sep>b_inp=dtype(np.random.randn(*b_shape))<with_stmt>graph.as_default()<block_start>a=tf.compat.v1.placeholder(tf.as_dtype(dtype) shape=a_inp.shape name="a")<line_sep>b=tf.compat.v1.placeholder(tf.as_dtype(dtype) shape=b_inp.shape name="b")<line_sep>output=tf.concat([a b] axis name="output")<block_end><with_stmt>tf.compat.v1.Session(graph=graph)<as>sess<block_start>expected_output=sess.run(output feed_dict={a:a_inp b:b_inp})<block_end>config=TFConfig(backend).add_input(a).add_input(b).add_output(output)<line_sep>compiler=Compiler(graph config test_dir)<line_sep>mpc_output=compiler.compile_and_run([a_inp b_inp])<line_sep>assert_almost_equal(model_output=expected_output mpc_tensor=mpc_output precision=2)<line_sep><return><block_end># ExpandDims
@pytest.mark.parametrize("a_shape, axis" [pytest.param([3 2 3] 1 marks=pytest.mark.skip(reason="[expand_dims] not supported")) pytest.param([2 5] 0 marks=pytest.mark.skip(reason="[expand_dims] not supported")) ] )@pytest.mark.parametrize("dtype" [np.single])<def_stmt>test_expand_dims test_dir backend a_shape axis dtype<block_start>graph=tf.Graph()<line_sep>a_inp=dtype(np.random.randn(*a_shape))<with_stmt>graph.as_default()<block_start>a=tf.compat.v1.placeholder(tf.as_dtype(dtype) shape=a_inp.shape name="a")<line_sep>output=tf.expand_dims(a axis name="output")<block_end><with_stmt>tf.compat.v1.Session(graph=graph)<as>sess<block_start>expected_output=sess.run(output feed_dict={a:a_inp})<block_end>config=TFConfig(backend).add_input(a).add_output(output)<line_sep>compiler=Compiler(graph config test_dir)<line_sep>mpc_output=compiler.compile_and_run([a_inp])<line_sep>assert_almost_equal(model_output=expected_output mpc_tensor=mpc_output precision=2)<line_sep><return><block_end># Pad
@pytest.mark.parametrize("a_shape, paddings, mode, constant_values" [([1 2 2 1] [[1 1] [1 2] [1 1] [1 3]] "CONSTANT" 0) pytest.param([1 2 2 1] [[1 1] [1 2] [1 1] [1 3]] "REFLECT" 0 marks=pytest.mark.skip(reason="[pad] REFLECT not supported") ) pytest.param([1 2 2 1] [[1 1] [1 2] [1 1] [1 3]] "SYMMETRIC" 0 marks=pytest.mark.skip(reason="[pad] SYMMETRIC not supported") ) pytest.param([2 3] [[1 1] [2 2] ] "CONSTANT" 0 marks=pytest.mark.skip(reason="[pad] Generic pad not supported") ) pytest.param([1 2 2 1] [[1 1] [1 2] [1 1] [1 3]] "CONSTANT" 1.2 marks=pytest.mark.skip(reason="[pad] non-zero padding not supported") ) ] )@pytest.mark.parametrize("dtype" [np.single])<def_stmt>test_pad test_dir backend a_shape paddings mode constant_values dtype<block_start>graph=tf.Graph()<line_sep>a_inp=dtype(np.random.randn(*a_shape))<with_stmt>graph.as_default()<block_start>a=tf.compat.v1.placeholder(tf.as_dtype(dtype) shape=a_inp.shape name="a")<line_sep>pad=tf.constant(paddings name="paddings")<line_sep>output=tf.pad(a pad mode=mode constant_values=constant_values name="output")<block_end><with_stmt>tf.compat.v1.Session(graph=graph)<as>sess<block_start>expected_output=sess.run(output feed_dict={a:a_inp})<block_end>config=TFConfig(backend).add_input(a).add_output(output)<line_sep>compiler=Compiler(graph config test_dir)<line_sep>mpc_output=compiler.compile_and_run([a_inp])<line_sep>assert_almost_equal(model_output=expected_output mpc_tensor=mpc_output precision=2)<line_sep><return><block_end># Tile
@pytest.mark.parametrize("a_shape, multiples" [([2 3] [1 2]) ([2 3] [2 1]) ([2 3] [2 2])])@pytest.mark.parametrize("dtype" [np.single])@pytest.mark.skip(reason="[tile] Not supported")<def_stmt>test_tile test_dir backend a_shape multiples dtype<block_start>graph=tf.Graph()<line_sep>a_inp=dtype(np.random.randn(*a_shape))<with_stmt>graph.as_default()<block_start>a=tf.compat.v1.placeholder(tf.as_dtype(dtype) shape=a_inp.shape name="a")<line_sep>mults=tf.constant(multiples name="multiples")<line_sep>output=tf.tile(a mults name="output")<block_end><with_stmt>tf.compat.v1.Session(graph=graph)<as>sess<block_start>expected_output=sess.run(output feed_dict={a:a_inp})<block_end>config=TFConfig(backend).add_input(a).add_output(output)<line_sep>compiler=Compiler(graph config test_dir)<line_sep>mpc_output=compiler.compile_and_run([a_inp])<line_sep>assert_almost_equal(model_output=expected_output mpc_tensor=mpc_output precision=2)<line_sep><return><block_end> |
# This file is part of GridCal.
#
# GridCal is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# GridCal is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GridCal. If not, see <http://www.gnu.org/licenses/>.
<import_from_stmt>enum Enum<import_from_stmt>GridCal.Engine.Devices DeviceType<class_stmt>ResultTypes(Enum)# Power flow
<block_start>BusVoltage='Bus voltage' DeviceType.BusDevice<line_sep>BusVoltagePolar='Bus voltage (polar)' DeviceType.BusDevice<line_sep>BusActivePower='Bus active power' DeviceType.BusDevice<line_sep>BusReactivePower='Bus reactive power' DeviceType.BusDevice<line_sep>BranchPower='Branch power' DeviceType.BranchDevice<line_sep>BranchActivePowerFrom='Branch active power "from"' DeviceType.BranchDevice<line_sep>BranchReactivePowerFrom='Branch reactive power "from"' DeviceType.BranchDevice<line_sep>BranchActivePowerTo='Branch active power "to"' DeviceType.BranchDevice<line_sep>BranchReactivePowerTo='Branch reactive power "to"' DeviceType.BranchDevice<line_sep>BranchCurrent='Branch current' DeviceType.BranchDevice<line_sep>BranchActiveCurrentFrom='Branch active current "from"' DeviceType.BranchDevice<line_sep>BranchReactiveCurrentFrom='Branch reactive current "from"' DeviceType.BranchDevice<line_sep>BranchActiveCurrentTo='Branch active current "to"' DeviceType.BranchDevice<line_sep>BranchReactiveCurrentTo='Branch reactive current "to"' DeviceType.BranchDevice<line_sep>BranchTapModule='Branch tap module' DeviceType.BranchDevice<line_sep>BranchTapAngle='Branch tap angle' DeviceType.BranchDevice<line_sep>BranchBeq='Branch Beq' DeviceType.BranchDevice<line_sep>BranchLoading='Branch loading' DeviceType.BranchDevice<line_sep>Transformer2WTapModule='Transformer tap module' DeviceType.Transformer2WDevice<line_sep>BranchVoltage='Branch voltage drop' DeviceType.BranchDevice<line_sep>BranchAngles='Branch voltage angles' DeviceType.BranchDevice<line_sep>BranchLosses='Branch losses' DeviceType.BranchDevice<line_sep>BranchActiveLosses='Branch active losses' DeviceType.BranchDevice<line_sep>BranchReactiveLosses='Branch reactive losses' DeviceType.BranchDevice<line_sep>BatteryPower='Battery power' DeviceType.BatteryDevice<line_sep>BatteryEnergy='Battery energy' DeviceType.BatteryDevice<line_sep>HvdcLosses='HVDC losses' DeviceType.HVDCLineDevice<line_sep>HvdcPowerFrom='HVDC power "from"' DeviceType.HVDCLineDevice<line_sep>HvdcLoading='HVDC loading' DeviceType.HVDCLineDevice<line_sep>HvdcPowerTo='HVDC power "to"' DeviceType.HVDCLineDevice<line_sep># StochasticPowerFlowDriver
BusVoltageAverage='Bus voltage avg' DeviceType.BusDevice<line_sep>BusVoltageStd='Bus voltage std' DeviceType.BusDevice<line_sep>BusVoltageCDF='Bus voltage CDF' DeviceType.BusDevice<line_sep>BusPowerCDF='Bus power CDF' DeviceType.BusDevice<line_sep>BranchPowerAverage='Branch power avg' DeviceType.BranchDevice<line_sep>BranchPowerStd='Branch power std' DeviceType.BranchDevice<line_sep>BranchPowerCDF='Branch power CDF' DeviceType.BranchDevice<line_sep>BranchLoadingAverage='Branch loading avg' DeviceType.BranchDevice<line_sep>BranchLoadingStd='Branch loading std' DeviceType.BranchDevice<line_sep>BranchLoadingCDF='Branch loading CDF' DeviceType.BranchDevice<line_sep>BranchLossesAverage='Branch losses avg' DeviceType.BranchDevice<line_sep>BranchLossesStd='Branch losses std' DeviceType.BranchDevice<line_sep>BranchLossesCDF='Branch losses CDF' DeviceType.BranchDevice<line_sep># OPF
BusVoltageModule='Bus voltage module' DeviceType.BusDevice<line_sep>BusVoltageAngle='Bus voltage angle' DeviceType.BusDevice<line_sep>BusPower='Bus power' DeviceType.BusDevice<line_sep>ShadowPrices='Bus shadow prices' DeviceType.BusDevice<line_sep>BranchOverloads='Branch overloads' DeviceType.BranchDevice<line_sep>LoadShedding='Load shedding' DeviceType.LoadDevice<line_sep>ControlledGeneratorShedding='Generator shedding' DeviceType.GeneratorDevice<line_sep>ControlledGeneratorPower='Generator power' DeviceType.GeneratorDevice<line_sep># OPF-NTC
HvdcOverloads='HVDC overloads' DeviceType.HVDCLineDevice<line_sep>NodeSlacks='Nodal slacks' DeviceType.BusDevice<line_sep>GenerationDelta='Generation deltas' DeviceType.GeneratorDevice<line_sep>GenerationDeltaSlacks='Generation delta slacks' DeviceType.GeneratorDevice<line_sep>InterAreaExchange='Inter-Area exchange' DeviceType.NoDevice<line_sep># Short-circuit
BusShortCircuitPower='Bus short circuit power' DeviceType.BusDevice<line_sep># PTDF
PTDFBranchesSensitivity='Branch Flow sensitivity' DeviceType.BranchDevice<line_sep>PTDFBusVoltageSensitivity='Bus voltage sensitivity' DeviceType.BusDevice<line_sep>OTDF='Outage transfer distribution factors' DeviceType.BranchDevice<line_sep>MaxOverloads='Maximum contingency flow' DeviceType.BranchDevice<line_sep>ContingencyFlows='Contingency flow' DeviceType.BranchDevice<line_sep>ContingencyLoading='Contingency loading' DeviceType.BranchDevice<line_sep>WorstContingencyFlows='Worst contingency Sf' DeviceType.BranchDevice<line_sep>WorstContingencyLoading='Worst contingency loading' DeviceType.BranchDevice<line_sep>ContingencyFrequency='Contingency frequency' DeviceType.BranchDevice<line_sep>ContingencyRelativeFrequency='Contingency relative frequency' DeviceType.BranchDevice<line_sep>SimulationError='Error' DeviceType.BusDevice<line_sep>OTDFSimulationError='Error' DeviceType.BranchDevice<line_sep># sigma
SigmaReal='Sigma real' DeviceType.BusDevice<line_sep>SigmaImag='Sigma imaginary' DeviceType.BusDevice<line_sep>SigmaDistances='Sigma distances' DeviceType.BusDevice<line_sep>SigmaPlusDistances='Sigma + distances' DeviceType.BusDevice<line_sep># ATC
AvailableTransferCapacityMatrix='Available transfer capacity' DeviceType.BranchDevice<line_sep>AvailableTransferCapacity='Available transfer capacity (final)' DeviceType.BranchDevice<line_sep>AvailableTransferCapacityN='Available transfer capacity (N)' DeviceType.BranchDevice<line_sep>AvailableTransferCapacityAlpha='Sensitivity to the exchange' DeviceType.BranchDevice<line_sep>AvailableTransferCapacityBeta='Sensitivity to the exchange (N-1)' DeviceType.BranchDevice<line_sep>NetTransferCapacity='Net transfer capacity' DeviceType.BranchDevice<line_sep>AvailableTransferCapacityReport='ATC Report' DeviceType.NoDevice<line_sep>ContingencyFlowsReport='Contingency Report' DeviceType.NoDevice<line_sep># inputs analysis
ZoneAnalysis='Zone analysis' DeviceType.NoDevice<line_sep>CountryAnalysis='Country analysis' DeviceType.NoDevice<line_sep>AreaAnalysis='Area analysis' DeviceType.NoDevice<def_stmt>__str__ self<block_start><return>self.value<block_end><def_stmt>__repr__ self<block_start><return>str(self)<block_end>@staticmethod<def_stmt>argparse s<block_start><try_stmt><block_start><return>ResultTypes[s]<block_end><except_stmt>KeyError<block_start><return>s<block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
'''
To use, e.g. python .\web2py.py -S APPNAME --force_migrate
To use, e.g. python .\web2py.py -S APPNAME --force_migrate --fake_migrate
'''<import_stmt>logging<line_sep>logger=logging.getLogger("web2py")<def_stmt>get_databases request<block_start>dbs={}<line_sep>global_env=globals()<for_stmt>(key value) global_env.items()<block_start><try_stmt><block_start>cond=isinstance(value GQLDB)<block_end><except_stmt><block_start>cond=isinstance(value SQLDB)<block_end><if_stmt>cond<block_start>dbs[key]=value<block_end><block_end><return>dbs<block_end>logger.debug('Getting all databases')<line_sep>databases=get_databases(<none>)<line_sep>logger.debug('databases = %s' databases)<for_stmt>db_name databases<block_start>logger.debug('Migrating %s' db_name)<line_sep>db=databases[db_name]<line_sep>tables=db.tables<for_stmt>table_name tables# Force migration of lazy tables
<block_start>logger.debug("Ensuring migration of table '%s'" table_name)<line_sep>table=db[table_name]<line_sep>db(table).isempty()<block_end>db.commit()<block_end> |
# encoding=utf8
<import_stmt>jenkins_job_wrecker.modules.base<import_from_stmt>jenkins_job_wrecker.helpers get_bool Mapper<class_stmt>Triggers(jenkins_job_wrecker.modules.base.Base)<block_start>component='triggers'<def_stmt>gen_yml self yml_parent data<block_start>triggers=[]<for_stmt>child data<block_start>object_name=child.tag.split('.')[-1].lower()<line_sep>self.registry.dispatch(self.component object_name child triggers)<block_end>yml_parent.append(['triggers' triggers])<block_end><block_end><def_stmt>scmtrigger top parent<block_start>pollscm={}<for_stmt>child top<block_start><if_stmt>child.tag<eq>'spec'<block_start>pollscm['cron']=child.text<block_end><elif_stmt>child.tag<eq>'ignorePostCommitHooks'<block_start>pollscm['ignore-post-commit-hooks']=(child.text<eq>'true')<block_end><else_stmt><block_start><raise>NotImplementedError('cannot handle scm trigger '<concat>'setting %s'%child.tag)<block_end><block_end>parent.append({'pollscm':pollscm})<block_end><def_stmt>timertrigger top parent<block_start>parent.append({'timed':top[0].text})<block_end><def_stmt>reversebuildtrigger top parent<block_start>reverse={}<for_stmt>child top<block_start><if_stmt>child.tag<eq>'upstreamProjects'<block_start>reverse['jobs']=child.text<block_end><elif_stmt>child.tag<eq>'threshold'<block_start><pass># TODO
<block_end><elif_stmt>child.tag<eq>'spec'<block_start><pass># TODO
<block_end><else_stmt><block_start><raise>NotImplementedError('cannot handle reverse trigger '<concat>'setting %s'%child.tag)<block_end><block_end>parent.append({'reverse':reverse})<block_end><def_stmt>__gerrit_process_file_paths attribute<block_start>file_paths=[]<for_stmt>file_path_type attribute<block_start><if_stmt>file_path_type.tag<eq>"com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.data.FilePath"<block_start>file_path={}<for_stmt>file_path_attribute file_path_type<block_start><if_stmt>file_path_attribute.tag<eq>"compareType"<block_start>file_path["compare-type"]=file_path_attribute.text<block_end><elif_stmt>file_path_attribute.tag<eq>"pattern"<block_start>file_path["pattern"]=file_path_attribute.text<block_end><block_end>file_paths.append(file_path)<block_end><else_stmt><block_start><raise>NotImplementedError("Not implemented file path type: " file_path_type.tag)<block_end><block_end><return>file_paths<block_end><def_stmt>__gerrit_process_gerrit_projects child<block_start>projects=[]<for_stmt>gerrit_project child<block_start>project={}<for_stmt>attribute gerrit_project<block_start><if_stmt>attribute.tag<eq>"compareType"<block_start>project["project-compare-type"]=attribute.text<block_end><elif_stmt>attribute.tag<eq>"pattern"<block_start>project["project-pattern"]=attribute.text<block_end><elif_stmt>attribute.tag<eq>"branches"<block_start>branches=[]<for_stmt>branch_type attribute<block_start><if_stmt>branch_type.tag<eq>"com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.data.Branch"<block_start>branch={}<for_stmt>branch_attribute attribute[0]<block_start><if_stmt>branch_attribute.tag<eq>"compareType"<block_start>branch["branch-compare-type"]=branch_attribute.text<block_end><elif_stmt>branch_attribute.tag<eq>"pattern"<block_start>branch["branch-pattern"]=branch_attribute.text<block_end><else_stmt><block_start><raise>NotImplementedError("Not implemented branch attribute: " branch_attribute.tag)<block_end><block_end>branches.append(branch)<block_end><else_stmt><block_start><raise>NotImplementedError("Not implemented branch type: " branch_type.tag)<block_end><block_end>project["branches"]=branches<block_end><elif_stmt>attribute.tag<eq>"disableStrictForbiddenFileVerification"<block_start>project["disable-strict-forbidden-file-verification"]=get_bool(attribute.text)<block_end><elif_stmt>attribute.tag<eq>"filePaths"<block_start>file_paths=__gerrit_process_file_paths(attribute)<line_sep>project["file-paths"]=file_paths<block_end><elif_stmt>attribute.tag<eq>"forbiddenFilePaths"<block_start>forbidden_file_paths=__gerrit_process_file_paths(attribute)<line_sep>project["forbidden-file-paths"]=forbidden_file_paths<block_end><elif_stmt>attribute.tag<eq>"topics"<block_start>topics=__gerrit_process_file_paths(attribute)<line_sep>project["topics"]=topics<block_end><else_stmt><block_start><raise>NotImplementedError("Not implemented attribute: " attribute.tag)<block_end><block_end>projects.append(project)<block_end><return>projects<block_end><def_stmt>__gerrit_process_trigger_on_events child<block_start>trigger_on=[]<line_sep>sonyericsson_prefix="com.sonyericsson.hudson.plugins.gerrit.trigger.hudsontrigger.events."<for_stmt>event child<block_start><if_stmt>event.tag<eq>sonyericsson_prefix+"PluginChangeAbandonedEvent"<block_start>trigger_on.append("change-abandoned-event")<block_end><elif_stmt>event.tag<eq>sonyericsson_prefix+"PluginChangeMergedEvent"<block_start>trigger_on.append("change-merged-event")<block_end><elif_stmt>event.tag<eq>sonyericsson_prefix+"PluginChangeRestoredEvent"<block_start>trigger_on.append("change-restored-event")<block_end><elif_stmt>event.tag<eq>sonyericsson_prefix+"PluginCommentAddedEvent"<block_start>comment_added_event={}<for_stmt>element event<block_start><if_stmt>element.tag<eq>"verdictCategory"<block_start>comment_added_event["approval-category"]=element.text<block_end><elif_stmt>element.tag<eq>"commentAddedTriggerApprovalValue"<block_start>comment_added_event["approval-value"]=element.text<block_end><block_end>trigger_on.append({"comment-added-event":comment_added_event})<block_end><elif_stmt>event.tag<eq>sonyericsson_prefix+"PluginCommentAddedContainsEvent"<block_start>trigger_on.append({"comment-added-contains-event":{"comment-contains-value":event[0].text}})<block_end><elif_stmt>event.tag<eq>sonyericsson_prefix+"PluginDraftPublishedEvent"<block_start>trigger_on.append("draft-published-event")<block_end><elif_stmt>event.tag<eq>sonyericsson_prefix+"PluginPatchsetCreatedEvent"<block_start>patchset_created_event={}<for_stmt>attribute event<block_start><if_stmt>attribute.tag<eq>"excludeDrafts"<block_start>patchset_created_event["exclude-drafts"]=get_bool(attribute.text)<block_end><elif_stmt>attribute.tag<eq>"excludeTrivialRebase"<block_start>patchset_created_event["exclude-trivial-rebase"]=get_bool(attribute.text)<block_end><elif_stmt>attribute.tag<eq>"excludeNoCodeChange"<block_start>patchset_created_event["exclude-no-code-change"]=get_bool(attribute.text)<block_end><elif_stmt>attribute.tag<eq>"excludePrivateState"<block_start>patchset_created_event["exclude-private"]=get_bool(attribute.text)<block_end><elif_stmt>attribute.tag<eq>"excludeWipState"<block_start>patchset_created_event["exclude-wip"]=get_bool(attribute.text)<block_end><block_end>trigger_on.append({"patchset-created-event":patchset_created_event})<block_end><elif_stmt>event.tag<eq>sonyericsson_prefix+"PluginPrivateStateChangedEvent"<block_start>trigger_on.append("private-state-changed-event")<block_end><elif_stmt>event.tag<eq>sonyericsson_prefix+"PluginRefUpdatedEvent"<block_start>trigger_on.append("ref-updated-event")<block_end><elif_stmt>event.tag<eq>sonyericsson_prefix+"PluginTopicChangedEvent"<block_start>trigger_on.append("topic-changed-event")<block_end><elif_stmt>event.tag<eq>sonyericsson_prefix+"PluginWipStateChangedEvent"<block_start>trigger_on.append("wip-state-changed-event")<block_end><block_end><return>trigger_on<block_end><def_stmt>gerrittrigger top parent<block_start>mapper=Mapper({"gerritBuildStartedVerifiedValue":("gerrit-build-started-verified-value" int) "gerritBuildStartedCodeReviewValue":("gerrit-build-started-codereview-value" int) "gerritBuildSuccessfulVerifiedValue":("gerrit-build-successful-verified-value" int) "gerritBuildSuccessfulCodeReviewValue":("gerrit-build-successful-codereview-value" int) "gerritBuildFailedVerifiedValue":("gerrit-build-failed-verified-value" int) "gerritBuildFailedCodeReviewValue":("gerrit-build-failed-codereview-value" int) "gerritBuildUnstableVerifiedValue":("gerrit-build-unstable-verified-value" int) "gerritBuildUnstableCodeReviewValue":("gerrit-build-unstable-codereview-value" int) "gerritBuildNotBuiltVerifiedValue":("gerrit-build-notbuilt-verified-value" int) "gerritBuildNotBuiltCodeReviewValue":("gerrit-build-notbuilt-codereview-value" int) "silentMode":("silent" bool) "silentStartMode":("silent-start" bool) "escapeQuotes":("escape-quotes" bool) "dependencyJobsNames":("dependency-jobs" str) "nameAndEmailParameterMode":("name-and-email-parameter-mode" str) "commitMessageParameterMode":("commit-message-parameter-mode" str) "changeSubjectParameterMode":("change-subject-parameter-mode" str) "commentTextParameterMode":("comment-text-parameter-mode" str) "buildStartMessage":("start-message" str) "buildFailureMessage":("failure-message" str) "buildSuccessfulMessage":("successful-message" str) "buildUnstableMessage":("unstable-message" str) "buildNotBuiltMessage":("notbuilt-message" str) "buildUnsuccessfulFilepath":("failure-message-file" str) "customUrl":("custom-url" str) "serverName":("server-name" str) "dynamicTriggerConfiguration":("dynamic-trigger-enabled" bool) "triggerConfigURL":("dynamic-trigger-url" str) })<line_sep>gerrit_trigger={}<for_stmt>child top<block_start><if_stmt>mapper.map_element(child gerrit_trigger)<block_start><pass># Handled by the mapper.
<block_end><elif_stmt>child.tag<eq>"gerritProjects"<block_start>gerrit_trigger["projects"]=__gerrit_process_gerrit_projects(child)<block_end><elif_stmt>child.tag<eq>"dynamicGerritProjects"<block_start><pass># No implementation by JJB
<block_end><elif_stmt>child.tag<eq>"spec"<block_start><pass># Not needed in yml
<block_end><elif_stmt>child.tag<eq>"skipVote"<block_start>skip_vote={}<for_stmt>attribute child<block_start><if_stmt>attribute.tag<eq>"onSuccessful"<block_start>skip_vote["successful"]=get_bool(attribute.text)<block_end><if_stmt>attribute.tag<eq>"onFailed"<block_start>skip_vote["failed"]=get_bool(attribute.text)<block_end><if_stmt>attribute.tag<eq>"onUnstable"<block_start>skip_vote["unstable"]=get_bool(attribute.text)<block_end><if_stmt>attribute.tag<eq>"onNotBuilt"<block_start>skip_vote["notbuilt"]=get_bool(attribute.text)<block_end><block_end>gerrit_trigger["skip-vote"]=skip_vote<block_end><elif_stmt>child.tag<eq>"notificationLevel"<block_start><if_stmt>child.text<is><none><block_start>gerrit_trigger["notification-level"]="NONE"<block_end><else_stmt><block_start>gerrit_trigger["notification-level"]=child.text<block_end><block_end><elif_stmt>child.tag<eq>"triggerOnEvents"<block_start>gerrit_trigger["trigger-on"]=__gerrit_process_trigger_on_events(child)<block_end><elif_stmt>child.tag<eq>"gerritTriggerTimerTask"<block_start><pass># Unconfigurable Attribute
<block_end><elif_stmt>child.tag<eq>"triggerInformationAction"<block_start><pass># Unconfigurable Attribute
<block_end><else_stmt><block_start><raise>NotImplementedError("Not implemented Gerrit Trigger Plugin's attribute: " child.tag)<block_end><block_end>parent.append({'gerrit':gerrit_trigger})<block_end><def_stmt>githubpushtrigger top parent<block_start>parent.append('github')<block_end><def_stmt>ghprbtrigger top parent<block_start>ghpr={}<for_stmt>child top<block_start><if_stmt>child.tag<eq>'spec'<or>child.tag<eq>'cron'<block_start>ghpr['cron']=child.text<block_end><elif_stmt>child.tag<eq>'adminlist'<and>child.text<block_start>ghpr['admin-list']=child.text.strip().split('\n')<block_end><elif_stmt>child.tag<eq>'allowMembersOfWhitelistedOrgsAsAdmin'<block_start>ghpr['allow-whitelist-orgs-as-admins']=get_bool(child.text)<block_end><elif_stmt>child.tag<eq>'whitelist'<and>child.text<is><not><none><block_start>ghpr['white-list']=child.text.strip().split('\n')<block_end><elif_stmt>child.tag<eq>'orgslist'<and>child.text<is><not><none><block_start>ghpr['org-list']=child.text.strip().split('\n')<block_end><elif_stmt>child.tag<eq>'buildDescTemplate'<block_start>ghpr['build-desc-template']=child.text<block_end><elif_stmt>child.tag<eq>'triggerPhrase'<block_start>ghpr['trigger-phrase']=child.text<block_end><elif_stmt>child.tag<eq>'onlyTriggerPhrase'<block_start>ghpr['only-trigger-phrase']=get_bool(child.text)<block_end><elif_stmt>child.tag<eq>'useGitHubHooks'<block_start>ghpr['github-hooks']=get_bool(child.text)<block_end><elif_stmt>child.tag<eq>'permitAll'<block_start>ghpr['permit-all']=get_bool(child.text)<block_end><elif_stmt>child.tag<eq>'autoCloseFailedPullRequests'<block_start>ghpr['auto-close-on-fail']=get_bool(child.text)<block_end><elif_stmt>child.tag<eq>'whiteListTargetBranches'<block_start>ghpr['white-list-target-branches']=[]<for_stmt>branch child<block_start><if_stmt>branch[0].text<is><not><none><block_start>ghpr['white-list-target-branches'].append(branch[0].text.strip())<block_end><block_end><block_end><elif_stmt>child.tag<eq>'gitHubAuthId'<block_start>ghpr['auth-id']=child.text<block_end><block_end>parent.append({'github-pull-request':ghpr})<block_end> |
# coding: utf-8
<import_stmt>datetime<import_stmt>pytest<import_stmt>numpy<as>np<import_from_stmt>...models.transition.linear ConstantVelocity<import_from_stmt>...predictor.information InformationKalmanPredictor<import_from_stmt>...predictor.kalman KalmanPredictor<import_from_stmt>...types.state InformationState GaussianState<import_from_stmt>...types.array StateVector CovarianceMatrix<line_sep>@pytest.mark.parametrize("PredictorClass, transition_model, prior_mean, prior_covar" [(# Standard Kalman
InformationKalmanPredictor ConstantVelocity(noise_diff_coeff=0.1) StateVector([-6.45 0.7]) CovarianceMatrix([[4.1123 0.0013] [0.0013 0.0365]]))] ids=["standard"])<def_stmt>test_information PredictorClass transition_model prior_mean prior_covar# Define time related variables
<block_start>timestamp=datetime.datetime.now()<line_sep>timediff=2# 2sec
new_timestamp=timestamp+datetime.timedelta(seconds=timediff)<line_sep># First do prediction in standard way
test_state=GaussianState(prior_mean prior_covar timestamp=timestamp)<line_sep>test_predictor=KalmanPredictor(transition_model)<line_sep>test_prediction=test_predictor.predict(test_state timestamp=new_timestamp)<line_sep># define the precision matrix and information state
precision_matrix=np.linalg.inv(prior_covar)<line_sep>info_state_mean=precision_matrix@prior_mean<line_sep># Define prior information state
prior=InformationState(info_state_mean precision_matrix timestamp=timestamp)<line_sep># Initialise a Information filter predictor
predictor=PredictorClass(transition_model=transition_model)<line_sep># Perform and assert state prediction
prediction=predictor.predict(prior=prior timestamp=new_timestamp)<line_sep># reconstruct the state vector and covariance matrix
pred_covar=np.linalg.inv(prediction.precision)<line_sep>[email protected]_vector<line_sep># And do the tests
<assert_stmt>(np.allclose(predictor._transition_function(prior time_interval=new_timestamp-timestamp) test_prediction.state_vector 0 atol=1e-14))<assert_stmt>(np.allclose(pred_mean test_prediction.state_vector 0 atol=1.e-14))<assert_stmt>(np.allclose(pred_covar test_prediction.covar 0 atol=1.e-14))<assert_stmt>(prediction.timestamp<eq>new_timestamp)<line_sep># test that we can get to the inverse matrix
<class_stmt>ConstantVelocitywithInverse(ConstantVelocity)<block_start><def_stmt>inverse_matrix self **kwargs<block_start><return>np.linalg.inv(self.matrix(**kwargs))<block_end><block_end>transition_model_winv=ConstantVelocitywithInverse(noise_diff_coeff=0.1)<line_sep>predictor_winv=PredictorClass(transition_model_winv)<line_sep># Test this still works
prediction_from_inv=predictor_winv.predict(prior=prior timestamp=new_timestamp)<assert_stmt>(np.allclose(prediction.state_vector prediction_from_inv.state_vector 0 atol=1.e-14))<line_sep># TODO: Test with Control Model
<block_end> |
<import_from_stmt>. yolov3<line_sep> |
<import_stmt>hues.dpda<as>DPDA<def_stmt>test_zero_negation <block_start>func=DPDA.zero_break<assert_stmt>func((1 2 3 4 0 10 1))<eq>(10 1)<assert_stmt>func((1 2 3 4 5 0))<eq>tuple()<block_end><def_stmt>test_order_annihilation <block_start>func=DPDA.annihilate<assert_stmt>func(range(0 10) (1 2 3 4 4 3))<eq>(3 )<assert_stmt>func(range(5 12) (1 2 10 11 11 2))<eq>(1 2 2 11)<block_end><def_stmt>test_built_order_annihilation <block_start>f1=DPDA.annihilator(range(5 12))<assert_stmt>f1((1 2 10 11 11 2))<eq>(1 2 2 11)<block_end><def_stmt>test_dedup <block_start>func=DPDA.dedup<assert_stmt>func((1 2 3 3 4 2 1 3 5))<eq>(1 2 3 4 5)<block_end><def_stmt>test_chaining <block_start>funcs=(DPDA.zero_break # Take the last non-reset subset
DPDA.annihilator(range(5)) # Between 0 and 5, keep the last one
DPDA.annihilator(range(10 15)) # Between 10 and 15, keep the last one
DPDA.dedup # Finally remove duplicates
)<line_sep>stack=(1 2 3 2 2 0 1 2 3 2 5 5 11 3 15 14)<line_sep>expected=(5 15 3 14)<assert_stmt>DPDA.apply(funcs stack)<eq>expected<assert_stmt>DPDA.apply(funcs (1 1 0))<eq>tuple()<block_end> |
"""
radish
~~~~~~
The root from red to green. BDD tooling for Python.
:copyright: (c) 2019 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""<import_stmt>threading<line_sep>world=threading.local()<line_sep>world.__doc__="""Thread-local radish contex object
This object can be used to attach arbitrary data like
variables, functions and other objects which
can be accessed later in Step Implementations and Hooks.
However, it's preferred to use scoped contexts
like :attr:`radish.Step.context`, :attr:`Scenario.context`
or :attr:`Feature.context` for data.
"""<def_stmt>pick func<block_start>"""Add the given function to the ``world`` object
This can be used to easier access helper functions in Steps and Hooks.
"""<line_sep>setattr(world func.__name__ func)<line_sep><return>func<block_end>world.pick=pick<line_sep> |
<import_from_stmt>sandbox.rocky.tf.q_functions.base QFunction<import_stmt>sandbox.rocky.tf.core.layers<as>L<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<import_from_stmt>rllab.core.serializable Serializable<import_from_stmt>sandbox.rocky.tf.core.layers_powered LayersPowered<import_from_stmt>sandbox.rocky.tf.misc tensor_utils<import_from_stmt>sandbox.rocky.tf.policies.base StochasticPolicy<class_stmt>NAFMLPQFunction(QFunction LayersPowered Serializable)<block_start><def_stmt>__init__ self env_spec name='nafqnet' hidden_sizes=(32 32) hidden_nonlinearity=tf.nn.relu action_merge_layer=0 output_nonlinearity=<none> hidden_W_init=L.XavierUniformInitializer() hidden_b_init=L.ZerosInitializer() output_W_init=L.XavierUniformInitializer() output_b_init=L.ZerosInitializer() bn=<false><block_start>Serializable.quick_init(self locals())<assert_stmt><not>env_spec.action_space.is_discrete<line_sep>action_dim=env_spec.action_space.flat_dim<line_sep>self._action_dim=action_dim<line_sep>self._env_spec=env_spec<line_sep>n_layers=len(hidden_sizes)<line_sep>action_merge_layer=(action_merge_layer%n_layers+n_layers)%n_layers<with_stmt>tf.variable_scope(name)<block_start>l_obs=L.InputLayer(shape=(<none> env_spec.observation_space.flat_dim) name="obs")<line_sep>l_action=L.InputLayer(shape=(<none> env_spec.action_space.flat_dim) name="actions")<line_sep>l_policy_mu=L.InputLayer(shape=(<none> action_dim) name="policy_mu")<line_sep>l_policy_sigma=L.InputLayer(shape=(<none> action_dim action_dim) name="policy_sigma")<line_sep>l_hidden=l_obs<line_sep>idx=0<line_sep>l_hidden_kwargs=dict(W=hidden_W_init b=hidden_b_init nonlinearity=hidden_nonlinearity )<line_sep>l_output_kwargs=dict(W=output_W_init b=output_b_init )<while_stmt>idx<l>action_merge_layer<block_start><if_stmt>bn<block_start>l_hidden=L.batch_norm(l_hidden)<block_end>l_hidden=L.DenseLayer(l_hidden num_units=hidden_sizes[idx] name="h%d"%(idx+1) **l_hidden_kwargs )<line_sep>idx<augadd>1<block_end>_idx=idx<line_sep>_l_hidden=l_hidden<line_sep># compute L network
<while_stmt>idx<l>n_layers<block_start><if_stmt>bn<block_start>l_hidden=L.batch_norm(l_hidden)<block_end>l_hidden=L.DenseLayer(l_hidden num_units=hidden_sizes[idx] name="L_h%d"%(idx+1) **l_hidden_kwargs )<line_sep>idx<augadd>1<block_end>l_L=L.DenseLayer(l_hidden num_units=action_dim<power>2 nonlinearity=<none> name="L_h%d"%(idx+1) **l_output_kwargs )<line_sep># compute V network
idx=_idx<line_sep>l_hidden=_l_hidden<while_stmt>idx<l>n_layers<block_start><if_stmt>bn<block_start>l_hidden=L.batch_norm(l_hidden)<block_end>l_hidden=L.DenseLayer(l_hidden num_units=hidden_sizes[idx] name="V_h%d"%(idx+1) **l_hidden_kwargs )<line_sep>idx<augadd>1<block_end>l_V=L.DenseLayer(l_hidden num_units=1 nonlinearity=<none> name="V_h%d"%(idx+1) **l_output_kwargs )<line_sep># compute mu network
idx=_idx<line_sep>l_hidden=_l_hidden<while_stmt>idx<l>n_layers<block_start><if_stmt>bn<block_start>l_hidden=L.batch_norm(l_hidden)<block_end>l_hidden=L.DenseLayer(l_hidden num_units=hidden_sizes[idx] name="mu_h%d"%(idx+1) **l_hidden_kwargs )<line_sep>idx<augadd>1<block_end><if_stmt>bn<block_start>l_hidden=L.batch_norm(l_hidden)<block_end>l_mu=L.DenseLayer(l_hidden num_units=action_dim nonlinearity=tf.nn.tanh name="mu_h%d"%(idx+1) **l_output_kwargs )<line_sep>L_var,V_var,mu_var=L.get_output([l_L l_V l_mu] deterministic=<true>)<line_sep>V_var=tf.reshape(V_var (-1 ))<line_sep># compute advantage
L_mat_var=self.get_L_sym(L_var)<line_sep>P_var=self.get_P_sym(L_mat_var)<line_sep>A_var=self.get_A_sym(P_var mu_var l_action.input_var)<line_sep># compute Q
Q_var=A_var+V_var<line_sep># compute expected Q under Gaussian policy
e_A_var=self.get_e_A_sym(P_var mu_var l_policy_mu.input_var l_policy_sigma.input_var)<line_sep>e_Q_var=e_A_var+V_var<line_sep>self._f_qval=tensor_utils.compile_function([l_obs.input_var l_action.input_var] Q_var)<line_sep>self._f_e_qval=tensor_utils.compile_function([l_obs.input_var l_policy_mu.input_var l_policy_sigma.input_var] e_Q_var)<line_sep>self._L_layer=l_L<line_sep>self._V_layer=l_V<line_sep>self._mu_layer=l_mu<line_sep>self._obs_layer=l_obs<line_sep>self._action_layer=l_action<line_sep>self._policy_mu_layer=l_policy_mu<line_sep>self._policy_sigma_layer=l_policy_sigma<line_sep>self._output_nonlinearity=output_nonlinearity<line_sep>self.init_policy()<line_sep>LayersPowered.__init__(self [l_L l_V l_mu])<block_end><block_end><def_stmt>init_policy self<block_start><pass><block_end><def_stmt>get_L_sym self L_vec_var<block_start>L=tf.reshape(L_vec_var (-1 self._action_dim self._action_dim))<line_sep><return>tf.matrix_band_part(L -1 0)-tf.matrix_diag(tf.matrix_diag_part(L))+tf.matrix_diag(tf.exp(tf.matrix_diag_part(L)))<block_end><def_stmt>get_P_sym self L_mat_var<block_start><return>tf.matmul(L_mat_var tf.matrix_transpose(L_mat_var))<block_end><def_stmt>get_e_A_sym self P_var mu_var policy_mu_var policy_sigma_var<block_start>e_A_var1=self.get_A_sym(P_var mu_var policy_mu_var)<line_sep>e_A_var2=-0.5<times>tf.reduce_sum(tf.matrix_diag_part(tf.matmul(P_var policy_sigma_var)) 1)<line_sep>#e_A_var2 = - 0.5 * tf.trace(tf.matmul(P_var, policy_sigma_var))
<return>e_A_var1+e_A_var2<block_end><def_stmt>get_A_sym self P_var mu_var action_var<block_start>delta_var=action_var-mu_var<line_sep>delta_mat_var=tf.reshape(delta_var (-1 self._action_dim 1))<line_sep>P_delta_var=tf.squeeze(tf.matmul(P_var delta_mat_var) [2])<line_sep><return>-0.5<times>tf.reduce_sum(delta_var<times>P_delta_var 1)<block_end><def_stmt>get_qval self observations actions<block_start>qvals=self._f_qval(observations actions)<line_sep><return>qvals<block_end><def_stmt>get_output_sym self obs_var **kwargs<block_start>L_var,V_var,mu_var=L.get_output([self._L_layer self._V_layer self._mu_layer] {self._obs_layer:obs_var} **kwargs)<line_sep>V_var=tf.reshape(V_var (-1 ))<line_sep><return>L_var V_var mu_var<block_end><def_stmt>_get_qval_sym self obs_var action_var **kwargs<block_start>L_var,V_var,mu_var=self.get_output_sym(obs_var **kwargs)<line_sep>L_mat_var=self.get_L_sym(L_var)<line_sep>P_var=self.get_P_sym(L_mat_var)<line_sep>A_var=self.get_A_sym(P_var mu_var action_var)<line_sep>Q_var=A_var+V_var<line_sep><return>Q_var A_var V_var<block_end><def_stmt>get_qval_sym self obs_var action_var **kwargs<block_start><return>self._get_qval_sym(obs_var action_var **kwargs)[0]<block_end><def_stmt>get_e_qval self observations policy<block_start><if_stmt>isinstance(policy StochasticPolicy)<block_start>agent_info=policy.dist_info(observations)<line_sep>mu,log_std=agent_info['mean'] agent_info["log_std"]<line_sep>std=np.array([np.diag(x)<for>x np.exp(log_std)] dtype=log_std.dtype)<line_sep>qvals=self._f_e_qval(observations mu std)<block_end><else_stmt><block_start>actions,_=policy.get_actions(observations)<line_sep>qvals=self.get_qval(observations actions)<block_end><return>qvals<block_end><def_stmt>get_e_qval_sym self obs_var policy **kwargs<block_start><if_stmt>isinstance(policy StochasticPolicy)<block_start>agent_info=policy.dist_info_sym(obs_var)<line_sep>mu,log_std=agent_info['mean'] agent_info["log_std"]<line_sep>std=tf.matrix_diag(tf.exp(log_std))<line_sep>L_var,V_var,mu_var=self.get_output_sym(obs_var **kwargs)<line_sep>L_mat_var=self.get_L_sym(L_var)<line_sep>P_var=self.get_P_sym(L_mat_var)<line_sep>A_var=self.get_e_A_sym(P_var mu_var mu std)<line_sep>qvals=A_var+V_var<block_end><else_stmt><block_start>mu=policy.get_action_sym(obs_var)<line_sep>qvals=self.get_qval_sym(obs_var mu **kwargs)<block_end><return>qvals<block_end><def_stmt>get_cv_sym self obs_var action_var policy **kwargs#_, avals, _ = self._get_qval_sym(obs_var, action_var, **kwargs)
<block_start>qvals=self.get_qval_sym(obs_var action_var **kwargs)<line_sep>e_qvals=self.get_e_qval_sym(obs_var policy **kwargs)<line_sep>avals=qvals-e_qvals<line_sep><return>avals<block_end><block_end> |
# Copyright 2021, 2022 Cambridge Quantum Computing Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_future_stmt> annotations<line_sep>__all__=['Tokeniser']<import_from_stmt>abc ABC abstractmethod<import_from_stmt>collections.abc Iterable<class_stmt>Tokeniser(ABC)<block_start>"""Base Class for all tokenisers"""<line_sep>@abstractmethod<def_stmt>split_sentences self text:str<arrow>list[str]<block_start>"""Split input text into a list of sentences.
Parameters
----------
text : str
A single string that contains one or multiple sentences.
Returns
-------
list of str
List of sentences, one sentence in each string.
"""<block_end>@abstractmethod<def_stmt>tokenise_sentences self sentences:Iterable[str]<arrow>list[list[str]]<block_start>"""Tokenise a list of sentences.
Parameters
----------
sentences : list of str
A list of untokenised sentences.
Returns
-------
list of list of str
A list of tokenised sentences. Each sentence is given as a list
of tokens - strings
"""<block_end><def_stmt>tokenise_sentence self sentence:str<arrow>list[str]<block_start>"""Tokenise a sentence.
Parameters
----------
sentence : str
An untokenised sentence.
Returns
-------
list of str
A tokenised sentence given as a list of tokens - strings.
"""<line_sep><return>self.tokenise_sentences([sentence])[0]<block_end><block_end> |
<import_stmt>os<import_stmt>subprocess<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>multiprocessing Pool cpu_count<def_stmt>plot_middle data slice_no=<none><block_start><if_stmt><not>slice_no<block_start>slice_no=data.shape[-1]<floordiv>2<block_end>plt.figure()<line_sep>plt.imshow(data[<ellipsis> slice_no] cmap="gray")<line_sep>plt.show()<line_sep><return><block_end><def_stmt>registration src_path dst_path ref_path<block_start>command=["flirt" "-in" src_path "-ref" ref_path "-out" dst_path "-bins" "256" "-cost" "corratio" "-searchrx" "0" "0" "-searchry" "0" "0" "-searchrz" "0" "0" "-dof" "12" "-interp" "spline"]<line_sep>subprocess.call(command stdout=open(os.devnull "r") stderr=subprocess.STDOUT)<line_sep><return><block_end><def_stmt>orient2std src_path dst_path<block_start>command=["fslreorient2std" src_path dst_path]<line_sep>subprocess.call(command)<line_sep><return><block_end><def_stmt>create_dir path<block_start><if_stmt><not>os.path.isdir(path)<block_start>os.makedirs(path)<block_end><return><block_end><def_stmt>unwarp_main arg **kwarg<block_start><return>main(*arg **kwarg)<block_end><def_stmt>main src_path dst_path ref_path<block_start>print("Registration on: " src_path)<try_stmt><block_start>orient2std(src_path dst_path)<line_sep>registration(dst_path dst_path ref_path)<block_end><except_stmt>RuntimeError<block_start>print("\tFalied on: " src_path)<block_end><return><block_end>parent_dir=os.path.dirname(os.getcwd())<line_sep>data_dir=os.path.join(parent_dir "data")<line_sep>data_src_dir=os.path.join(data_dir "ADNI")<line_sep>data_dst_dir=os.path.join(data_dir "ADNIReg")<line_sep>data_labels=["AD" "NC"]<line_sep>create_dir(data_dst_dir)<line_sep>ref_path=os.path.join(data_dir "Template" "MNI152_T1_1mm.nii.gz")<line_sep># ref_path = os.path.join(data_dir, "Template", "MNI152_T1_1mm_brain.nii.gz")
data_src_paths,data_dst_paths=[] []<for_stmt>label data_labels<block_start>src_label_dir=os.path.join(data_src_dir label)<line_sep>dst_label_dir=os.path.join(data_dst_dir label)<line_sep>create_dir(dst_label_dir)<for_stmt>subject os.listdir(src_label_dir)<block_start>data_src_paths.append(os.path.join(src_label_dir subject))<line_sep>data_dst_paths.append(os.path.join(dst_label_dir subject))<block_end><block_end># Test
# main(data_src_paths[0], data_dst_paths[0], ref_path)
# Multi-processing
paras=zip(data_src_paths data_dst_paths [ref_path]<times>len(data_src_paths))<line_sep>pool=Pool(processes=cpu_count())<line_sep>pool.map(unwarp_main paras)<line_sep> |
# Author: <NAME>
# Email: <EMAIL>
# License: MIT License
<import_from_stmt>.exp_imp_based_opt ExpectedImprovementBasedOptimization<import_from_stmt>.surrogate_models GPR_linear GPR <line_sep>gaussian_process={"gp_nonlinear":GPR() "gp_linear":GPR_linear()}<class_stmt>BayesianOptimizer(ExpectedImprovementBasedOptimization)<block_start><def_stmt>__init__ self search_space initialize={"grid":4 "random":2 "vertices":4} gpr=gaussian_process["gp_nonlinear"] xi=0.03 warm_start_smbo=<none> max_sample_size=10000000 sampling={"random":1000000} warnings=100000000 rand_rest_p=0.03 <block_start>super().__init__(search_space initialize)<line_sep>self.gpr=gpr<line_sep>self.regr=gpr<line_sep>self.xi=xi<line_sep>self.warm_start_smbo=warm_start_smbo<line_sep>self.max_sample_size=max_sample_size<line_sep>self.sampling=sampling<line_sep>self.warnings=warnings<line_sep>self.rand_rest_p=rand_rest_p<line_sep>self.init_position_combinations()<line_sep>self.init_warm_start_smbo()<block_end><block_end> |
# Copyright (c) 2020 Graphcore Ltd. All rights reserved.
<import_stmt>argparse<import_stmt>numpy<as>np<import_stmt>popart<import_stmt>json<import_stmt>os<import_stmt>copy<import_stmt>logging_util<import_stmt>text_utils<line_sep># set up logging
logger=logging_util.get_basic_logger(__name__)<def_stmt>add_conf_args run_mode<block_start>""" define the argument parser object """<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--batch_size' type=int default=2 help='Set batch size for training.')<line_sep>parser.add_argument('--batch_size_for_inference' type=int default=12 help='Set batch size for inference.')<line_sep>parser.add_argument('--dataset' type=str choices=['VCTK'] default='VCTK' help='Choose which dataset to process')<line_sep>parser.add_argument('--no_pre_load_data' action="store_true" default=<false> help="do not pre-load the full data-set into memory")<if_stmt>run_mode<eq>'training'<block_start>parser.add_argument('--model_dir' type=str required=<true> help='Path to save model checkpoints during training')<line_sep>parser.add_argument('--data_dir' type=str required=<true> help='Path to data')<block_end><elif_stmt>run_mode<in>['autoregressive_synthesis' 'prep_autoregressive_graph']<block_start>parser.add_argument('--inference_model_dir' type=str required=<true> help='Path to directory where inference model is saved')<block_end><if_stmt>run_mode<in>['prep_autoregressive_graph' 'non_autoregressive_synthesis']<block_start>parser.add_argument('--trained_model_file' type=str required=<true> help='Path to onnx file for trained model')<block_end><if_stmt>'synthesis'<in>run_mode# autoregressive or non-autoregressive
<block_start>parser.add_argument('--sentence' type=str required=<true> help='Text to synthesize speech')<line_sep>parser.add_argument('--results_path' type=str required=<true> help='Path to save results files')<block_end>parser.add_argument('--batches_per_step' type=int default=50 help="How many mini-batches to perform on the device before returning to the host.")<line_sep>parser.add_argument('--num_epochs' type=int default=5000 help="Number of training epochs")<line_sep>parser.add_argument('--init_lr' type=float default=0.05 help="Initial learning rate")<line_sep>parser.add_argument('--checkpoint_interval' type=int default=10 help="How many epochs to complete before checkpointing")<line_sep>parser.add_argument('--validation_interval' type=int default=10 help="How many epochs to complete before running validation")<line_sep>parser.add_argument('--not_multi_thread_dataloader' action="store_true" default=<false> help="Disable multi threaded data loading")<line_sep>parser.add_argument('--num_threads' type=int default=32 help="The number of threads to be used to load data")<line_sep>parser.add_argument('--replication_factor' type=int default=1 help="Number of times to replicate the graph to perform data parallel "<concat>"training or inference. Must be a factor of the number of IPUs")<line_sep>parser.add_argument('--simulation' action="store_true" help="Run the program on the IPU Model")<line_sep>parser.add_argument('--select_ipu' type=str default="AUTO" help="Select IPU: either AUTO or a valid IPU ID")<line_sep>parser.add_argument('--num_ipus' type=int default=1 help="Number of IPUs")<line_sep>parser.add_argument('--recompute' action="store_true" default=<false> help="Enable recomputations of activations in backward pass")<line_sep>parser.add_argument('--prng' action="store_true" default=<true> help="Enable Stochastic Rounding")<line_sep>parser.add_argument('--fp_exceptions' action="store_true" default=<false> help="Enable floating point exception")<line_sep>parser.add_argument('--no_validation' action="store_true" help="Do not do any validation runs.")<line_sep>parser.add_argument('--proportion_train_set' type=float default=0.80 help="Proportion of training set [0.0-1.0]")<line_sep>parser.add_argument('--generated_data' action="store_true" default=<false> help="Enable random data generation for benchmarking")<line_sep>parser.add_argument('--num_io_tiles' type=int default=0 help="Number of IO tiles")<line_sep><return>parser<block_end><def_stmt>get_conf parser<block_start>""" parse the arguments and set the model configuration parameters """<line_sep>conf=parser.parse_args()<line_sep># For the deep-voice model, numerical stability issues were observed with FP16
# (hence we don't support FP16)
conf.precision=np.float32<if_stmt>conf.select_ipu<ne>'AUTO'<block_start>conf.select_ipu=int(conf.select_ipu)<block_end># The number of samples that each device will process (for training)
conf.samples_per_device=int(conf.batch_size/conf.replication_factor)<line_sep># The number of samples that each device will process (for inference)
conf.samples_per_device_for_inference=int(conf.batch_size_for_inference/conf.replication_factor)<line_sep>set_model_conf(conf)<line_sep><return>conf<block_end><def_stmt>set_model_conf conf print_model_conf=<true><block_start>""" set the model configuration parameters """<if_stmt>conf.dataset<eq>'VCTK'<block_start>conf_path=os.path.join(os.path.dirname(os.path.realpath(__file__)) "vctk_model_conf.json")<line_sep>logger.info("Loading model configuration from {}".format(conf_path))<with_stmt>open(conf_path)<as>f<block_start>model_conf=json.load(f)<block_end><block_end>conf.num_symbols=len(text_utils.symbols)<for_stmt>k model_conf.keys()<block_start>setattr(conf k model_conf[k])<block_end>conf.max_spectrogram_length=int((conf.max_duration_secs<times>conf.sample_rate)/(conf.hop_length<times>conf.n_frames_per_pred))<if_stmt>print_model_conf<block_start>logger.info("Model configuration params:")<line_sep>logger.info(json.dumps(serialize_model_conf(conf) sort_keys=<true> indent=4))<block_end><return>conf<block_end><def_stmt>serialize_model_conf conf<block_start>""" convert configuration object into json serializable object """<line_sep>conf_dict=copy.copy(vars(conf))<line_sep>conf_dict['precision']=32<if>conf_dict['precision']<eq>np.float32<else>np.float16<line_sep><return>conf_dict<block_end><def_stmt>get_device conf<block_start>""" Acquire IPU device """<line_sep>device_manager=popart.DeviceManager()<if_stmt>conf.simulation<block_start>logger.info("Creating ipu sim")<line_sep>ipu_options={"compileIPUCode":<true> 'numIPUs':conf.num_ipus "tilesPerIPU":1216}<line_sep>device=device_manager.createIpuModelDevice(ipu_options)<if_stmt>device<is><none><block_start><raise>OSError("Failed to acquire IPU.")<block_end><block_end><else_stmt><block_start>logger.info("Acquiring IPU")<if_stmt>conf.select_ipu<eq>'AUTO'<block_start>device=device_manager.acquireAvailableDevice(conf.num_ipus)<block_end><else_stmt><block_start>device=device_manager.acquireDeviceById(conf.select_ipu)<block_end><if_stmt>device<is><none><block_start><raise>OSError("Failed to acquire IPU.")<block_end><else_stmt><block_start>logger.info("Acquired IPU: {}".format(device))<block_end><block_end><return>device<block_end><def_stmt>get_session_options opts<block_start>""" get popart session options """<line_sep># Create a session to compile and execute the graph
options=popart.SessionOptions()<line_sep>options.engineOptions={"debug.allowOutOfMemory":"true"}<line_sep># Enable the reporting of variables in the summary report
options.reportOptions={'showVarStorage':'true'}<if_stmt>opts.fp_exceptions# Enable exception on floating point errors
<block_start>options.enableFloatingPointChecks=<true><block_end><if_stmt>opts.prng<block_start>options.enableStochasticRounding=<true><block_end># Need to disable constant weights so they can be set before
# executing the inference session
options.constantWeights=<false><line_sep># Enable recomputation
<if_stmt>opts.recompute<block_start>options.autoRecomputation=popart.RecomputationType.Standard<block_end># Enable auto-sharding
<if_stmt>opts.num_ipus<g>1<and>opts.num_ipus<g>opts.replication_factor<block_start>options.enableVirtualGraphs=<true><line_sep>options.virtualGraphMode=popart.VirtualGraphMode.Auto<block_end><if_stmt>opts.replication_factor<g>1<block_start>options.enableReplicatedGraphs=<true><line_sep>options.replicatedGraphCount=opts.replication_factor<line_sep># Enable merge updates
options.mergeVarUpdate=popart.MergeVarUpdateType.AutoLoose<line_sep>options.mergeVarUpdateMemThreshold=6000000<block_end><if_stmt>opts.num_io_tiles<g>0<block_start>options.enableExplicitMainLoops=<true><line_sep>options.useHostCopyOps=<true><line_sep>options.numIOTiles=opts.num_io_tiles<line_sep>options.virtualGraphMode=popart.VirtualGraphMode.Auto<line_sep># Both true & false should work - testing with false to avoid
# host-cycle-overhead
options.rearrangeAnchorsOnHost=<false><line_sep>options.rearrangeStreamsOnHost=<false><block_end><return>options<block_end><def_stmt>create_session_anchors proto loss device dataFlow options training optimizer=<none><block_start>""" Create the desired session and compile the graph """<if_stmt>training<block_start>session_type="training"<line_sep>session=popart.TrainingSession(fnModel=proto loss=loss deviceInfo=device optimizer=optimizer dataFlow=dataFlow userOptions=options)<block_end><else_stmt><block_start>session_type="validation"<line_sep>session=popart.InferenceSession(fnModel=proto deviceInfo=device dataFlow=dataFlow userOptions=options)<block_end>logger.info("Preparing the {} graph".format(session_type))<line_sep>session.prepareDevice()<line_sep>logger.info("{0} graph preparation complete.".format(session_type.capitalize() ))<line_sep># Create buffers to receive results from the execution
anchors=session.initAnchorArrays()<line_sep><return>session anchors<block_end> |
<import_from_stmt>requests.utils requote_uri<import_from_stmt>base64 b64decode<import_from_stmt>hashlib md5<import_from_stmt>Crypto.Cipher AES<class_stmt>TwistSourceDecryptor<block_start>BLOCK_SIZE=16<line_sep>SECRET_KEY=b'<KEY>'<def_stmt>__init__ self enc_src<block_start>self.enc_src=enc_src.encode('utf-8')<block_end><def_stmt>__pad self data<block_start>length=self.BLOCK_SIZE-(len(data)%self.BLOCK_SIZE)<line_sep><return>data+(chr(length)<times>length).encode()<block_end><def_stmt>__unpad self data# print(data[-1])
<block_start><return>data[:-(data[-1]<if>type(data[-1])<eq>int<else>ord(data[-1]))]<block_end><def_stmt>__get_key_iv self data salt output=48<block_start><assert_stmt>len(salt)<eq>8 len(salt)<line_sep>data<augadd>salt<line_sep>key=md5(data).digest()<line_sep>key_iv_data=key<while_stmt>len(key_iv_data)<l>output<block_start>key=md5(key+data).digest()<line_sep>key_iv_data<augadd>key<block_end><return>key_iv_data[:output]<block_end><def_stmt>decrypt self<block_start>enc_data=b64decode(self.enc_src)<line_sep># print("b64decode enc :", enc_data)
<assert_stmt>enc_data[:8]<eq>b'Salted__'<line_sep>salt=enc_data[8:16]# 8byte salt
key_iv=self.__get_key_iv(self.SECRET_KEY salt)# key+iv is 48bytes
key=key_iv[:32]# key is 32byte
iv=key_iv[32:]# 16byte iv
# print("key :", key)
# print("iv :", iv)
aes=AES.new(key AES.MODE_CBC iv)<line_sep>decrypt_data=aes.decrypt(enc_data[16:])# actual data are after first 16bytes (which is salt)
decrypt_data=self.__unpad(decrypt_data).decode('utf-8').lstrip(' ')<line_sep># print(decrypt_data)
<return>requote_uri(decrypt_data)<block_end><block_end># parse to url safe value
# if __name__ == "__main__":
# enc = "<KEY>
# dec = TwistSourceDecryptor(enc).decrypt()
# print(dec)
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT license.
<import_stmt>torch<import_stmt>argparse<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_stmt>os<line_sep>os.environ['IS_QVGA_MONO']='1'<import_from_stmt>data.choose_config cfg<line_sep>cfg=cfg.cfg<line_sep>parser=argparse.ArgumentParser(description='Generating input to quantized face detection code')<line_sep>parser.add_argument('--image_dir' default="images" type=str help='Folder containing image(s)')<line_sep>parser.add_argument('--out_dir' default="input" type=str help='Folder containing the CSV files')<line_sep>args=parser.parse_args()<if_stmt><not>os.path.exists(args.out_dir)<block_start>os.makedirs(args.out_dir)<block_end>img_list=[os.path.join(args.image_dir x)<for>x os.listdir(args.image_dir)]<line_sep>xoutfile=open(os.path.join(args.out_dir "X.csv") "w")<for_stmt>image_path sorted(img_list)<block_start>img=Image.open(image_path)<line_sep>img=img.convert('RGB')<line_sep>img=np.array(img)<line_sep>scale=1<line_sep>max_im_shrink_x=320/(img.shape[1])<line_sep>max_im_shrink_y=240/(img.shape[0])<line_sep>image=cv2.resize(img <none> <none> fx=max_im_shrink_x fy=max_im_shrink_y interpolation=cv2.INTER_LINEAR)<if_stmt>len(image.shape)<eq>3<block_start>image=np.swapaxes(image 1 2)<line_sep>image=np.swapaxes(image 1 0)<block_end># RBG to BGR
x=image[[2 1 0] : :]<line_sep>x=x.astype('float32')<line_sep>x<augsub>cfg.img_mean<line_sep>x=x[[2 1 0] : :]<line_sep>x=0.299<times>x[0]+0.587<times>x[1]+0.114<times>x[2]<line_sep>x<augdiv>scale<line_sep>x=np.rint(x).astype(int)<for_stmt>i range(240)<block_start><for_stmt>j range(320)<block_start><if_stmt>i<eq>239<and>j<eq>319<block_start>xoutfile.write(str(x[i j])+"\n")<block_end><else_stmt><block_start>xoutfile.write(str(x[i j])+', ')<block_end><block_end><block_end><block_end>youtfile=open(os.path.join(args.out_dir "Y.csv") "w")<for_stmt>_ range(len(img_list))<block_start><for_stmt>i range(18000)<block_start><if_stmt>i<eq>17999<block_start>youtfile.write("0\n")<block_end><else_stmt><block_start>youtfile.write("0, ")<block_end><block_end><block_end> |
<import_stmt>streamlit<as>st<import_from_stmt>pathlib Path<import_stmt>sys<line_sep>ROOT=(Path(__file__).parent/"..").resolve()<line_sep>sys.path.append(str(ROOT))<import_from_stmt>pathlib Path<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>typing *<import_from_stmt>utils_glue *<import_from_stmt>pytorch_transformers *<import_stmt>itertools<import_stmt>json<line_sep>@st.cache<def_stmt>load_model src<block_start>SRC=ROOT/src<if_stmt>src.startswith("bert-")<block_start>SRC=src<block_end>config=BertConfig.from_pretrained(SRC)<line_sep><return>BertForSequenceClassification.from_pretrained(SRC from_tf=<false> config=config)<block_end>@st.cache<def_stmt>load_words <block_start>vocab=BertTokenizer.from_pretrained("bert-base-uncased")<line_sep><return>list(vocab.vocab.keys())<block_end>@st.cache<def_stmt>load_freqs src:str="train_freqs_sst.json"<block_start><with_stmt>open(ROOT/"info"/src "rt")<as>f<block_start>freqs=json.load(f)<block_end><return>freqs<block_end>@st.cache<def_stmt>load_importances src:str="word_positivities_sst.json"<block_start><with_stmt>open(ROOT/"info"/src "rt")<as>f<block_start>importances=json.load(f)<block_end><return>importances<block_end>sim=torch.nn.modules.distance.CosineSimilarity(0)<def_stmt>cosine_sim x y<block_start><return>sim(x.view(-1) y.view(-1)).item()<block_end><def_stmt>l2_difference_normalized x y<block_start>d=x.view(-1).shape[0]<line_sep><return>torch.norm(x-y).item()/d<block_end><class_stmt>ModelComparer<block_start><def_stmt>__init__ self sources:List[str] model_cls:str="bert" model_name:str="bert-base-uncased"<block_start>self.models=[load_model(src)<for>src sources]<line_sep>self.tokenizer=BertTokenizer.from_pretrained(model_name)<line_sep>self.parameters={n:[p]<for>n,p self.models[0].named_parameters()}<for_stmt>m self.models[1:]<block_start><for_stmt>n,p m.named_parameters()<block_start>self.parameters[n].append(p)<block_end><block_end><block_end><def_stmt>get_embeddings self word<block_start><return>[model.bert.embeddings.word_embeddings.weight[self.tokenizer.vocab[word] :]<for>model self.models]<block_end><def_stmt>mean_embedding_similarity self word:str<block_start><return>np.mean([cosine_sim(e1 e2)<for>e1,e2 itertools.combinations(self.get_embeddings(word) 2)])<block_end><def_stmt>mean_similarity self parameter:str<block_start><return>np.mean([cosine_sim(e1 e2)<for>e1,e2 itertools.combinations(self.parameters[parameter] 2)])<block_end><def_stmt>mean_difference self parameter:str diff=l2_difference_normalized<block_start><return>np.mean([diff(e1 e2)<for>e1,e2 itertools.combinations(self.parameters[parameter] 2)])<block_end><def_stmt>norms self parameter<block_start><return>[torch.norm(e)<for>e self.parameters[parameter]]<block_end><block_end># important constants
weight_options=["bert-base-uncased"]+[f"logs/{p.stem}"<for>p (ROOT/"logs").glob("*")]+[f"weights/{p.stem}"<for>p (ROOT/"weights").glob("*")]<line_sep> |
<import_stmt>py weakref<import_from_stmt>rpython.rlib rgc<import_from_stmt>rpython.rtyper.lltypesystem lltype llmemory<import_from_stmt>rpython.rtyper.test.tool BaseRtypingTest<class_stmt>TestRweakref(BaseRtypingTest)<block_start><def_stmt>test_weakref_simple self<block_start><class_stmt>A<block_start><pass><block_end><class_stmt>B(A)<block_start><pass><block_end><class_stmt>C(A)<block_start><pass><block_end><def_stmt>f n<block_start><if_stmt>n<block_start>x=B()<line_sep>x.hello=42<line_sep>r=weakref.ref(x)<block_end><else_stmt><block_start>x=C()<line_sep>x.hello=64<line_sep>r=weakref.ref(x)<block_end><return>r().hello x# returns 'x' too, to keep it alive
<block_end>res=self.interpret(f [1])<assert_stmt>res.item0<eq>42<line_sep>res=self.interpret(f [0])<assert_stmt>res.item0<eq>64<block_end><def_stmt>test_prebuilt_weakref self<block_start><class_stmt>A<block_start><pass><block_end>a1=A()<line_sep>a1.hello=5<line_sep>w1=weakref.ref(a1)<line_sep>a2=A()<line_sep>a2.hello=8<line_sep>w2=weakref.ref(a2)<def_stmt>f n<block_start><if_stmt>n<block_start>r=w1<block_end><else_stmt><block_start>r=w2<block_end><return>r().hello<block_end>res=self.interpret(f [1])<assert_stmt>res<eq>5<line_sep>res=self.interpret(f [0])<assert_stmt>res<eq>8<block_end><def_stmt>test_prebuilt_dead_weakref self<block_start><class_stmt>A<block_start><pass><block_end>a1=A()<line_sep>w1=weakref.ref(a1)<line_sep>a2=A()<line_sep>w2=weakref.ref(a2)<del_stmt>a1<line_sep>rgc.collect()<assert_stmt>w1()<is><none><def_stmt>f n<block_start><if_stmt>n<block_start>r=w1<block_end><else_stmt><block_start>r=w2<block_end><return>r()<is><not><none><block_end>res=self.interpret(f [1])<assert_stmt>res<eq><false><line_sep>res=self.interpret(f [0])<assert_stmt>res<eq><true><block_end><def_stmt>test_multiple_prebuilt_dead_weakrefs self<block_start><class_stmt>A<block_start><pass><block_end>a1=A()<line_sep>w1=weakref.ref(a1)<line_sep>a2=A()<line_sep>w2=weakref.ref(a2)<line_sep>a3=A()<line_sep>w3=weakref.ref(a3)<line_sep>a4=A()<line_sep>w4=weakref.ref(a4)<del_stmt>a1 a3<line_sep>rgc.collect()<assert_stmt>w1()<is><none><assert_stmt>w3()<is><none><def_stmt>f n<block_start><if_stmt>n<g>0<block_start><if_stmt>n<g>5<block_start>r=w1<block_end><else_stmt><block_start>r=w3<block_end><assert_stmt>r()<is><none><block_end><else_stmt><block_start><if_stmt>n<l>-5<block_start>r=w2<block_end><else_stmt><block_start>r=w4<block_end><assert_stmt>r()<is><not><none><block_end><return>r()<is><not><none><block_end>res=self.interpret(f [1])<assert_stmt>res<eq><false><line_sep>res=self.interpret(f [0])<assert_stmt>res<eq><true><line_sep>res=self.interpret(f [100])<assert_stmt>res<eq><false><line_sep>res=self.interpret(f [-100])<assert_stmt>res<eq><true><block_end><def_stmt>test_pbc_null_weakref self<block_start><class_stmt>A<block_start><pass><block_end>a1=A()<line_sep>mylist=[weakref.ref(a1) <none>]<def_stmt>fn i<block_start>item=mylist[i]<line_sep><return>item<is><none><block_end><assert_stmt>self.interpret(fn [0])<is><false><assert_stmt>self.interpret(fn [1])<is><true><block_end><def_stmt>test_ll_weakref self<block_start>S=lltype.GcStruct('S' ('x' lltype.Signed))<def_stmt>g <block_start>s=lltype.malloc(S)<line_sep>w=llmemory.weakref_create(s)<assert_stmt>llmemory.weakref_deref(lltype.Ptr(S) w)<eq>s<assert_stmt>llmemory.weakref_deref(lltype.Ptr(S) w)<eq>s<line_sep><return>w# 's' is forgotten here
<block_end><def_stmt>f <block_start>w=g()<line_sep>rgc.collect()<line_sep><return>llmemory.weakref_deref(lltype.Ptr(S) w)<block_end>res=self.interpret(f [])<assert_stmt>res<eq>lltype.nullptr(S)<block_end><block_end><class_stmt>TestRWeakrefDisabled(BaseRtypingTest)<block_start><def_stmt>test_no_real_weakref self<block_start><class_stmt>A<block_start><pass><block_end>a1=A()<line_sep>mylist=[weakref.ref(a1) <none>]<def_stmt>g <block_start>a2=A()<line_sep><return>weakref.ref(a2)<block_end><def_stmt>fn i<block_start>w=g()<line_sep>rgc.collect()<assert_stmt>w()<is><not><none><line_sep><return>mylist[i]<is><none><block_end><assert_stmt>self.interpret(fn [0] rweakref=<false>)<is><false><assert_stmt>self.interpret(fn [1] rweakref=<false>)<is><true><block_end><block_end> |
#################################################################################
# The Institute for the Design of Advanced Energy Systems Integrated Platform
# Framework (IDAES IP) was produced under the DOE Institute for the
# Design of Advanced Energy Systems (IDAES), and is copyright (c) 2018-2021
# by the software owners: The Regents of the University of California, through
# Lawrence Berkeley National Laboratory, National Technology & Engineering
# Solutions of Sandia, LLC, Carnegie Mellon University, West Virginia University
# Research Corporation, et al. All rights reserved.
#
# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and
# license information.
#################################################################################
<import_from_stmt>pandas DataFrame<import_from_stmt>collections OrderedDict<import_from_stmt>pyomo.environ value<import_from_stmt>pyomo.network Arc Port<import_from_stmt>pyomo.core.base.units_container units<import_stmt>idaes.logger<as>idaeslog<line_sep>_log=idaeslog.getLogger(__name__)<line_sep>__author__="<NAME>, <NAME>"<def_stmt>arcs_to_stream_dict blk additional=<none> descend_into=<true> sort=<false> prepend=<none> s=<none><block_start>"""
Creates a stream dictionary from the Arcs in a model, using the Arc names as
keys. This can be used to automate the creation of the streams dictionary
needed for the ``create_stream_table_dataframe()`` and ``stream_states_dict()``
functions.
Args:
blk (pyomo.environ._BlockData): Pyomo model to search for Arcs
additional (dict): Additional states to add to the stream dictionary,
which aren't represented by arcs in blk, for example feed or
product streams without Arcs attached or states internal to a unit
model.
descend_into (bool): If True, search subblocks for Arcs as well. The
default is True.
sort (bool): If True sort keys and return an OrderedDict
prepend (str): Prepend a string to the arc name joined with a '.'.
This can be useful to prevent conflicting names when sub blocks
contain Arcs that have the same names when used in combination
with descend_into=False.
s (dict): Add streams to an existing stream dict.
Returns:
Dictionary with Arc names as keys and the Arcs as values.
"""<if_stmt>s<is><none><block_start>s={}<block_end><for_stmt>c blk.component_objects(Arc descend_into=descend_into)<block_start>key=c.getname()<if_stmt>prepend<is><not><none><block_start>key=".".join([prepend key])<block_end>s[key]=c<block_end><if_stmt>additional<is><not><none><block_start>s.update(additional)<block_end><if_stmt>sort<block_start>s=OrderedDict(sorted(s.items()))<block_end><return>s<block_end><def_stmt>stream_states_dict streams time_point=0<block_start>"""
Method to create a dictionary of state block representing stream states.
This takes a dict with stream name keys and stream values.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
time_point : point in the time domain at which to generate stream table
(default = 0)
Returns:
A pandas DataFrame containing the stream table data.
"""<line_sep>stream_dict=OrderedDict()<def_stmt>_stream_dict_add sb n i=<none><block_start>"""add a line to the stream table"""<if_stmt>i<is><none><block_start>key=n<block_end><else_stmt><block_start>key="{}[{}]".format(n i)<block_end>stream_dict[key]=sb<block_end><for_stmt>n streams.keys()<block_start><if_stmt>isinstance(streams[n] Arc)<block_start><for_stmt>i,a streams[n].items()<block_start><try_stmt># if getting the StateBlock from the destination port
# fails for any reason try the source port. This could
# happen if a port does not have an associated
# StateBlock. For example a surrogate model may not
# use state blocks, unit models may handle physical
# properties without state blocks, or the port could
# be used to serve the purpose of a translator block.
<block_start>sb=_get_state_from_port(a.ports[1] time_point)<block_end><except_stmt><block_start>sb=_get_state_from_port(a.ports[0] time_point)<block_end>_stream_dict_add(sb n i)<block_end><block_end><elif_stmt>isinstance(streams[n] Port)<block_start>sb=_get_state_from_port(streams[n] time_point)<line_sep>_stream_dict_add(sb n)<block_end><else_stmt># _IndexedStateBlock is a private class, so cannot directly test
# whether streams[n] is one or not.
<block_start><try_stmt><block_start>sb=streams[n][time_point]<block_end><except_stmt>KeyError<as>err<block_start><raise>TypeError(f"Either component type of stream argument {streams[n]} "<concat>f"is unindexed or {time_point} is not a member of its "<concat>f"indexing set.")<from>err<block_end>_stream_dict_add(sb n)<block_end><block_end><return>stream_dict<block_end><def_stmt>tag_state_quantities blocks attributes labels exception=<false><block_start>""" Take a stream states dictionary, and return a tag dictionary for stream
quantities. This takes a dictionary (blk) that has state block labels as
keys and state blocks as values. The attributes are a list of attributes to
tag. If an element of the attribute list is list-like, the fist element is
the attribute and the remaining elements are indexes. Lables provides a list
of attribute lables to be used to create the tag. Tags are blk_key + label
for the attribute.
Args:
blocks (dict): Dictionary of state blocks. The key is the block label to
be used in the tag, and the value is a state block.
attributes (list-like): A list of attriutes to tag. It is okay if a
particular attribute does not exist in a state bock. This allows
you to mix state blocks with differnt sets of attributes. If an
attribute is indexed, the attribute can be specified as a list or
tuple where the first element is the attribute and the remaining
elements are indexes.
labels (list-like): These are attribute lables. The order corresponds to the
attribute list. They are used to create the tags. Tags are in the
form blk.key + label.
exception (bool): If True, raise exceptions releated to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with differnt
indexing. (default is True)
Return:
(dict): Dictionary where the keys are tags and the values are model
attributes, usually Pyomo component data objects.
"""<line_sep>tags={}<if_stmt>labels<is><none><block_start>lables=attributes<for_stmt>a attributes<block_start><if_stmt>isinstance(a (tuple list))<block_start><if_stmt>len(a)<eq>2# in case there are multiple indexes and user gives tuple
<block_start>label=f"{a[0]}[{a[1]}]"<block_end><if_stmt>len(a)<g>2<block_start>label=f"{a[0]}[{a[1:]}]"<block_end><else_stmt><block_start>label=a[0]<block_end><block_end><block_end><block_end><for_stmt>key,s blocks.items()<block_start><for_stmt>i,a enumerate(attributes)<block_start>j=<none><if_stmt>isinstance(a (list tuple))# if a is list or tuple, the first element should be the
# attribute and the remaining elements should be indexes.
<block_start><if_stmt>len(a)<eq>2<block_start>j=a[1]# catch user supplying list-like of indexes
<block_end><if_stmt>len(a)<g>2<block_start>j=a[1:]<block_end>#if len(a) == 1, we'll say that's fine here. Don't know why you
#would put the attribute in a list-like if not indexed, but I'll
#allow it.
a=a[0]<block_end>v=getattr(s a <none>)<if_stmt>j<is><not><none><and>v<is><not><none><block_start><try_stmt><block_start>v=v[j]<block_end><except_stmt>KeyError<block_start><if_stmt><not>exception<block_start>v=<none><block_end><else_stmt><block_start>_log.error(f"{j} is not a valid index of {a}")<line_sep><raise>KeyError(f"{j} is not a valid index of {a}")<block_end><block_end><block_end><try_stmt><block_start>value(v exception=<false>)<block_end><except_stmt>TypeError<block_start><if_stmt><not>exception<block_start>v=<none><block_end><else_stmt><block_start>_log.error(f"Cannot calculate value of {a} (may be subscriptable)")<line_sep><raise>TypeError(f"Cannot calculate value of {a} (may be subscriptable)")<block_end><block_end><except_stmt>ZeroDivisionError<block_start><pass># this one is okay
<block_end><if_stmt>v<is><not><none><block_start>tags[f"{key}{labels[i]}"]=v<block_end><block_end><block_end><return>tags<block_end><def_stmt>create_stream_table_dataframe streams true_state=<false> time_point=0 orient="columns" add_units=<false><block_start>"""
Method to create a stream table in the form of a pandas dataframe. Method
takes a dict with name keys and stream values. Use an OrderedDict to list
the streams in a specific order, otherwise the dataframe can be sorted
later.
Args:
streams : dict with name keys and stream values. Names will be used as
display names for stream table, and streams may be Arcs, Ports or
StateBlocks.
true_state : indicated whether the stream table should contain the
display variables define in the StateBlock (False, default) or the
state variables (True).
time_point : point in the time domain at which to generate stream table
(default = 0)
orient : orientation of stream table. Accepted values are 'columns'
(default) where streams are displayed as columns, or 'index' where
stream are displayed as rows.
add_units : Add a Units column to the dataframe representing the units
of the stream values.
Returns:
A pandas DataFrame containing the stream table data.
"""<line_sep>stream_attributes=OrderedDict()<line_sep>stream_states=stream_states_dict(streams=streams time_point=time_point)<line_sep>full_keys=[]# List of all rows in dataframe to fill in missing data
<if_stmt>add_units<and>stream_states<block_start>stream_attributes['Units']={}<block_end><for_stmt>key,sb stream_states.items()<block_start>stream_attributes[key]={}<if_stmt>true_state<block_start>disp_dict=sb.define_state_vars()<block_end><else_stmt><block_start>disp_dict=sb.define_display_vars()<block_end><for_stmt>k disp_dict<block_start><for_stmt>i disp_dict[k]<block_start>stream_key=k<if>i<is><none><else>f"{k} {i}"<line_sep>stream_attributes[key][stream_key]=value(disp_dict[k][i])<if_stmt>add_units<block_start>pyomo_unit=units.get_units(disp_dict[k][i])<if_stmt>pyomo_unit<block_start>pint_unit=pyomo_unit._get_pint_unit()<line_sep>stream_attributes['Units'][stream_key]={'raw':str(pyomo_unit) 'html':'{:~H}'.format(pint_unit) 'latex':'{:~L}'.format(pint_unit)}<block_end><else_stmt><block_start>stream_attributes['Units'][stream_key]=<none><block_end><block_end><if_stmt>stream_key<not><in>full_keys<block_start>full_keys.append(stream_key)<block_end><block_end><block_end><block_end># Check for missing rows in any stream, and fill with "-" if needed
<for_stmt>k,v stream_attributes.items()<block_start><for_stmt>r full_keys<block_start><if_stmt>r<not><in>v.keys()# Missing row, fill with placeholder
<block_start>v[r]="-"<block_end><block_end><block_end><return>DataFrame.from_dict(stream_attributes orient=orient)<block_end><def_stmt>stream_table_dataframe_to_string stream_table **kwargs<block_start>"""
Method to print a stream table from a dataframe. Method takes any argument
understood by DataFrame.to_string
"""<line_sep># Set some default values for keyword arguments
na_rep=kwargs.pop("na_rep" "-")<line_sep>justify=kwargs.pop("justify" "center")<line_sep>float_format=kwargs.pop("float_format" <lambda>x:"{:#.5g}".format(x))<line_sep># Print stream table
<return>stream_table.to_string(na_rep=na_rep justify=justify float_format=float_format **kwargs)<block_end><def_stmt>_get_state_from_port port time_point<block_start>"""
Attempt to find a StateBlock-like object connected to a Port. If the
object is indexed both in space and time, assume that the time index
comes first. If no components are assigned to the Port, raise a
ValueError. If the first component's parent block has no index, raise an
AttributeError. If different variables on the port appear to be connected
to different state blocks, raise a RuntimeError.
Args:
port (pyomo.network.Port): a port with variables derived from some
single StateBlock
time_point : point in the time domain at which to index StateBlock
(default = 0)
Returns:
(StateBlock-like) : an object containing all the components contained
in the port.
"""<line_sep>vlist=list(port.iter_vars())<line_sep>states=[v.parent_block().parent_component()<for>v vlist]<if_stmt>len(vlist)<eq>0<block_start><raise>ValueError(f"No block could be retrieved from Port {port.name} "<concat>f"because it contains no components.")<block_end># Check the number of indices of the parent property block. If its indexed
# both in space and time, keep the second, spatial index and throw out the
# first, temporal index. If that ordering is changed, this method will
# need to be changed as well.
<try_stmt><block_start>idx=vlist[0].parent_block().index()<block_end><except_stmt>AttributeError<as>err<block_start><raise>AttributeError(f"No block could be retrieved from Port {port.name} "<concat>f"because block {vlist[0].parent_block().name} has no index.")<from>err<block_end># Assuming the time index is always first and the spatial indices are all
# the same
<if_stmt>isinstance(idx tuple)<block_start>idx=(time_point vlist[0].parent_block().index()[1:])<block_end><else_stmt><block_start>idx=(time_point )<block_end># This method also assumes that ports with different spatial indices won't
# end up at the same port. Otherwise this check is insufficient.
<if_stmt>all(states[0]<is>s<for>s states)<block_start><return>states[0][idx]<block_end><raise>RuntimeError(f"No block could be retrieved from Port {port.name} "<concat>f"because components are derived from multiple blocks.")<block_end><def_stmt>generate_table blocks attributes heading=<none> exception=<true><block_start>"""
Create a Pandas DataFrame that contains a list of user-defined attributes
from a set of Blocks.
Args:
blocks (dict): A dictionary with name keys and BlockData objects for
values. Any name can be associated with a block. Use an OrderedDict
to show the blocks in a specific order, otherwise the dataframe can
be sorted later.
attributes (list or tuple of strings): Attributes to report from a
Block, can be a Var, Param, or Expression. If an attribute doesn't
exist or doesn't have a valid value, it will be treated as missing
data.
heading (list or tuple of srings): A list of strings that will be used
as column headings. If None the attribute names will be used.
exception (bool): If True, raise exceptions releated to invalid or
missing indexes. If false missing or bad indexes are ignored and
None is used for the table value. Setting this to False allows
tables where some state blocks have the same attributes with differnt
indexing. (default is True)
Returns:
(DataFrame): A Pandas dataframe containing a data table
"""<if_stmt>heading<is><none><block_start>heading=attributes<block_end>st=DataFrame(columns=heading)<line_sep>row=[<none>]<times>len(attributes)# not a big deal but save time on realloc
<for_stmt>key,s blocks.items()<block_start><for_stmt>i,a enumerate(attributes)<block_start>j=<none><if_stmt>isinstance(a (list tuple))# if a is list or tuple, assume index supplied
<block_start><try_stmt><block_start><assert_stmt>len(a)<g>1<block_end><except_stmt>AssertionError<block_start>_log.error(f"An index must be supplided for attribute {a[0]}")<line_sep><raise>AssertionError(f"An index must be supplided for attribute {a[0]}")<block_end>j=a[1:]<line_sep>a=a[0]<block_end>v=getattr(s a <none>)<if_stmt>j<is><not><none><and>v<is><not><none><block_start><try_stmt><block_start>v=v[j]<block_end><except_stmt>KeyError<block_start><if_stmt><not>exception<block_start>v=<none><block_end><else_stmt><block_start>_log.error(f"{j} is not a valid index of {a}")<line_sep><raise>KeyError(f"{j} is not a valid index of {a}")<block_end><block_end><block_end><try_stmt><block_start>v=value(v exception=<false>)<block_end><except_stmt>TypeError<block_start><if_stmt><not>exception<block_start>v=<none><block_end><else_stmt><block_start>_log.error(f"Cannot calculate value of {a} (may be subscriptable)")<line_sep><raise>TypeError(f"Cannot calculate value of {a} (may be subscriptable)")<block_end><block_end><except_stmt>ZeroDivisionError<block_start>v=<none><block_end>row[i]=v<block_end>st.loc[key]=row<block_end><return>st<block_end> |
"""Basic Operations for the demo."""<import_from_stmt>plynx.demo.basic_functions GROUP<as>basic_group<import_from_stmt>plynx.demo.hello_world GROUP<as>hello_group<import_from_stmt>plynx.demo.types GROUP<as>types_group<line_sep>COLLECTION=[hello_group types_group basic_group ]<line_sep> |
"""Elasticsearch store plugin."""<import_stmt>json<import_stmt>logging<import_from_stmt>elasticsearch Elasticsearch ElasticsearchException<line_sep>_log=logging.getLogger(__name__)<class_stmt>EsStore<block_start>"""Elasticsearch adapter to index cloud data in Elasticsearch."""<def_stmt>__init__ self host='localhost' port=9200 index='cloudmarker' buffer_size=5000000<block_start>"""Create an instance of :class:`EsStore` plugin.
The plugin uses the default port for Elasticsearch if not
specified.
The ``buffer_size`` for the plugin is the value for the maximum
number of bytes of data to be sent in a bulk API request to
Elasticsearch.
Arguments:
host (str): Elasticsearch host
port (int): Elasticsearch port
index (str): Elasticsearch index
buffer_size (int): Maximum number of bytes of data to hold
in the in-memory buffer.
"""<line_sep>self._es=Elasticsearch([{'host':host 'port':port}])<line_sep>self._index=index<line_sep>self._buffer_size=buffer_size<line_sep>self._buffer=''<line_sep>self._cur_buffer_size=0<block_end># TODO: Add method to create mapping for efficient indexing of data.
# TODO: Add method to prune old data.
# TODO: Add support for multiple indexes
<def_stmt>_doc_index_body self doc doc_id=<none><block_start>"""Create the body for a bulk insert API call to Elasticsearch.
Arguments:
doc (dict): Document
doc_id: Document ID
Returns:
(str): Request body corresponding to the ``doc``.
"""<line_sep>action_def={'index':{'_index':self._index '_id':doc_id}}<line_sep>src_def=doc<line_sep><return>json.dumps(action_def)+'\n'+json.dumps(src_def)+'\n'<block_end><def_stmt>_flush self<block_start>"""Bulk insert buffered records into Elasticserach."""<try_stmt><block_start>resp=self._es.bulk(self._buffer)<block_end><except_stmt>ElasticsearchException<as>e# Handles exceptions of all types defined here.
# https://github.com/elastic/elasticsearch-py/blob/master/elasticsearch/exceptions.py
<block_start>_log.error('Bulk Index Error: %s: %s' type(e).__name__ e)<line_sep><return><block_end># Read and parse the response.
items=resp['items']<line_sep>records_sent=len(items)<line_sep>fail_count=0<line_sep># If response code for an item is not 2xx, increment the count of
# failed insertions.
<if_stmt>resp['errors']<block_start><for_stmt>item items<block_start><if_stmt><not>199<l>item['index']['status']<l>300<block_start>fail_count<augadd>1<line_sep>_log.debug('Failed to insert record; ID: %s' item['index']['_id'])<block_end><block_end>_log.error('Failed to write %d records' fail_count)<block_end>_log.info('Indexed %d records' records_sent-fail_count)<line_sep># Reset the buffer.
self._cur_buffer_size=0<line_sep>self._buffer=''<block_end><def_stmt>write self record<block_start>"""Write JSON records to the Elasticsearch index.
Flush the buffer by saving its content to Elasticsearch when
the buffer size exceeds the configured size.
Arguments:
record (dict): Data to save to Elasticsearch.
"""<line_sep>es_record=self._doc_index_body(record)# TODO: Send valid doc ID
es_record_bytes=len(es_record)<if_stmt>(self._cur_buffer_size<and>es_record_bytes+self._cur_buffer_size<g>self._buffer_size)<block_start>self._flush()<block_end><else_stmt><block_start>self._buffer<augadd>es_record<line_sep>self._cur_buffer_size<augadd>es_record_bytes<block_end><block_end><def_stmt>done self<block_start>"""Flush pending records to Elasticsearch."""<if_stmt>self._cur_buffer_size<block_start>self._flush()<block_end><block_end><block_end> |
# Copyright (c) 2017 <NAME> and <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: <NAME>
<class_stmt>Result<block_start>enums='''
NotRun
Skipped
Passed
Failed
Errored
'''.split()<for_stmt>idx,enum enumerate(enums)<block_start>locals()[enum]=idx<block_end>@classmethod<def_stmt>name cls enum<block_start><return>cls.enums[enum]<block_end><def_stmt>__init__ self value reason=<none><block_start>self.value=value<line_sep>self.reason=reason<block_end><def_stmt>__str__ self<block_start><return>self.name(self.value)<block_end><block_end><class_stmt>Status<block_start>enums='''
Unscheduled
Building
Running
TearingDown
Complete
Avoided
'''.split()<for_stmt>idx,enum enumerate(enums)<block_start>locals()[enum]=idx<block_end>@classmethod<def_stmt>name cls enum<block_start><return>cls.enums[enum]<block_end><block_end> |
<import_from_stmt>fuzzconfig FuzzConfig<import_stmt>nonrouting<import_stmt>pytrellis<import_stmt>fuzzloops<import_stmt>interconnect<line_sep>cfg=FuzzConfig(job="USRMCLK" family="ECP5" device="LFE5U-45F" ncl="empty.ncl" tiles=["MIB_R71C4:EFB0_PICB0" "MIB_R71C5:EFB1_PICB1" "MIB_R71C6:EFB2_PICB0" "MIB_R71C7:EFB3_PICB1" "MIB_R71C3:BANKREF8"])<def_stmt>get_substs config<block_start><return>dict(sysconfig=(" ".join(["{}={}".format(k v)<for>k,v config.items()])))<block_end><def_stmt>main <block_start>pytrellis.load_database("../../../database")<line_sep>cfg.setup()<line_sep>empty_bitfile=cfg.build_design(cfg.ncl {})<line_sep>nonrouting.fuzz_enum_setting(cfg "SYSCONFIG.BACKGROUND_RECONFIG" ["OFF" "ON"] <lambda>x:get_substs(dict(BACKGROUND_RECONFIG=x)) empty_bitfile <false>)<line_sep>nonrouting.fuzz_enum_setting(cfg "SYSCONFIG.TRANSFR" ["OFF" "ON"] <lambda>x:get_substs(dict(TRANSFR=x)) empty_bitfile <false>)<line_sep>nonrouting.fuzz_enum_setting(cfg "SYSCONFIG.DONE_EX" ["OFF" "ON"] <lambda>x:get_substs(dict(DONE_EX=x)) empty_bitfile <false>)<line_sep>nonrouting.fuzz_enum_setting(cfg "SYSCONFIG.DONE_OD" ["OFF" "ON"] <lambda>x:get_substs(dict(DONE_OD=x)) empty_bitfile <false>)<line_sep>nonrouting.fuzz_enum_setting(cfg "SYSCONFIG.DONE_PULL" ["OFF" "ON"] <lambda>x:get_substs(dict(DONE_PULL=x)) empty_bitfile <false>)<line_sep>nonrouting.fuzz_enum_setting(cfg "SYSCONFIG.SLAVE_SPI_PORT" ["DISABLE" "ENABLE"] <lambda>x:get_substs(dict(SLAVE_SPI_PORT=x)) empty_bitfile <false>)<line_sep>nonrouting.fuzz_enum_setting(cfg "SYSCONFIG.MASTER_SPI_PORT" ["DISABLE" "ENABLE"] <lambda>x:get_substs(dict(MASTER_SPI_PORT=x)) empty_bitfile <false>)<line_sep>nonrouting.fuzz_enum_setting(cfg "SYSCONFIG.SLAVE_PARALLEL_PORT" ["DISABLE" "ENABLE"] <lambda>x:get_substs(dict(SLAVE_PARALLEL_PORT=x)) empty_bitfile <false>)<line_sep>nonrouting.fuzz_enum_setting(cfg "SYSCONFIG.CONFIG_IOVOLTAGE" ["1.2" "1.5" "1.8" "2.5" "3.3"] <lambda>x:get_substs(dict(CONFIG_IOVOLTAGE=x SLAVE_SPI_PORT="ENABLE")) empty_bitfile <false>)<line_sep>nonrouting.fuzz_enum_setting(cfg "SYSCONFIG.WAKE_UP" ["4" "21"] <lambda>x:get_substs(dict(WAKE_UP=x)) empty_bitfile <false>)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end> |
<import_from_future_stmt> unicode_literals<import_from_stmt>operator attrgetter<import_stmt>django<line_sep>get_remote_field=attrgetter('remote_field'<if>django.VERSION<ge>(1 9)<else>'rel')<if_stmt>django.VERSION<ge>(1 9)<block_start><def_stmt>get_remote_field_model field<block_start>model=getattr(field 'model' <none>)<if_stmt>model<block_start><return>field.remote_field.model<block_end><else_stmt><block_start><return>field.related_model<block_end><block_end><def_stmt>get_opts_label opts<block_start><return>opts.label<block_end><def_stmt>many_to_many_set instance m2m value<block_start>getattr(instance m2m).set(value)<block_end><block_end><else_stmt><block_start><def_stmt>get_remote_field_model field<block_start><return>getattr(getattr(field 'rel' <none>) 'to' <none>)<block_end><def_stmt>get_opts_label opts<block_start><return>"%s.%s"%(opts.app_label opts.object_name)<block_end><def_stmt>many_to_many_set instance m2m value<block_start>setattr(instance m2m value)<block_end><block_end> |
<import_stmt>os<import_stmt>glob<import_stmt>logging<import_from_stmt>conans ConanFile tools Meson<line_sep>_meson_feature=["disabled" "enabled" "auto"]<line_sep>_features=[]<class_stmt>GStPluginsBadConan(ConanFile)<block_start>python_requires="aac-sdk-tools/1.0"<line_sep>python_requires_extend="aac-sdk-tools.BaseSdkDependency"<line_sep>name="gst-plugins-bad"<line_sep>version="1.18.4"<line_sep>description="GStreamer is a development framework for creating applications like media players, video editors, "<concat>"streaming media broadcasters and so on"<line_sep>topics=("conan" "gstreamer" "multimedia" "video" "audio" "broadcasting" "framework" "media")<line_sep>homepage="https://gstreamer.freedesktop.org/"<line_sep>license="GPL-2.0-only"<line_sep>exports=["LICENSE.md"]<line_sep>settings="os" "arch" "compiler" "build_type"<line_sep>options=dict({"shared":[<true> <false>] "fPIC":[<true> <false>]} **{f:_meson_feature<for>f _features})<line_sep>default_options=dict({"shared":<false> "fPIC":<true>} **{f:"auto"<for>f _features})<line_sep>exports_sources=["patches/*.patch"]<line_sep>requires=["openssl/1.1.1i" "libxml2/2.9.10"]<line_sep>build_requires=["meson/0.56.2" "bison/3.7.1" "flex/2.6.4" "pkgconf/1.7.3"]<line_sep>generators="pkg_config"<line_sep>_source_subfolder="source_subfolder"<line_sep>_build_subfolder="build_subfolder"<def_stmt>configure self<block_start><del_stmt>self.settings.compiler.libcxx<del_stmt>self.settings.compiler.cppstd<line_sep>self.options['gstreamer'].shared=self.options.shared<block_end><def_stmt>config_options self<block_start><if_stmt>self.settings.os<eq>'Windows'<block_start><del_stmt>self.options.fPIC<block_end><block_end><def_stmt>requirements self<block_start>self.requires(f"gst-plugins-base/{self.version}@{self.user}/{self.channel}")<line_sep>self.requires(f"faad2/2.10.0@{self.user}/{self.channel}")<block_end><def_stmt>source self<block_start>tools.get(**self.conan_data["sources"][self.version])<line_sep>os.rename(f"{self.name}-{self.version}" self._source_subfolder)<block_end><def_stmt>_apply_patches self<block_start><for_stmt>filename sorted(glob.glob("patches/*.patch"))<block_start>logging.info(f"applying patch: {filename}")<line_sep>tools.patch(base_path=self._source_subfolder patch_file=filename)<block_end><block_end><def_stmt>_configure_meson self<block_start>defs=dict()<def_stmt>add_flag name value<block_start><if_stmt>name<in>defs<block_start>defs[name]<augadd>" "+value<block_end><else_stmt><block_start>defs[name]=value<block_end><block_end><def_stmt>add_compiler_flag value<block_start>add_flag("c_args" value)<line_sep>add_flag("cpp_args" value)<block_end><def_stmt>add_linker_flag value<block_start>add_flag("c_link_args" value)<line_sep>add_flag("cpp_link_args" value)<block_end>meson=Meson(self)<if_stmt>self.settings.compiler<eq>"Visual Studio"<block_start>add_linker_flag("-lws2_32")<line_sep>add_compiler_flag(f"-{self.settings.compiler.runtime}")<if_stmt>int(str(self.settings.compiler.version))<l>14<block_start>add_compiler_flag("-Dsnprintf=_snprintf")<block_end><block_end><if_stmt>self.settings.get_safe("compiler.runtime")<block_start>defs["b_vscrt"]=str(self.settings.compiler.runtime).lower()<block_end><for_stmt>x ["tools" "examples" "benchmarks" "tests"]<block_start>defs[x]="disabled"<block_end><for_stmt>x _features<block_start>defs[x]=self.options.get_safe(x)<block_end># Disable options that cause build issues on non-Linux systems
<if_stmt>self.settings.os<ne>'Linux'<or>(hasattr(self 'settings_build')<and>tools.cross_building(self skip_x64_x86=<true>))<block_start>meson.options["introspection"]="disabled"<line_sep>meson.options["orc"]="disabled"<block_end># Disable unused plugins
<for_stmt>plugin ["closedcaption" "rsvg" "ttml"]<block_start>meson.options[plugin]="disabled"<block_end># Enable hls explicitly for HTTP streaming
meson.options["hls"]="enabled"<line_sep>meson.options["hls-crypto"]="openssl"<line_sep># Somehow Meson ignore PKG_CONFIG_PATH env. Force setting it with option.
meson.options["pkg_config_path"]=os.getenv('PKG_CONFIG_PATH')<line_sep>meson.configure(build_folder=self._build_subfolder source_folder=self._source_subfolder defs=defs)<line_sep><return>meson<block_end><def_stmt>build self<block_start>self._apply_patches()<with_stmt>tools.environment_append({"PKG_CONFIG_PATH":[os.getcwd()]})<block_start>meson=self._configure_meson()<line_sep>meson.build()<block_end><block_end><def_stmt>package self<block_start>meson=self._configure_meson()<line_sep>meson.install()<block_end><def_stmt>package_info self<block_start>gst_plugin_path=os.path.join(self.package_folder "lib" "gstreamer-1.0")<if_stmt>self.options.shared<block_start>logging.info(f"Appending GST_PLUGIN_PATH env var: {gst_plugin_path}")<line_sep>self.env_info.GST_PLUGIN_PATH.append(gst_plugin_path)<block_end><else_stmt><block_start>self.cpp_info.libdirs.append(gst_plugin_path)<line_sep>self.cpp_info.libs=tools.collect_libs(self)<block_end><block_end><block_end> |
<import_stmt>os<import_stmt>shutil<import_stmt>subprocess<import_stmt>pytest<def_stmt>_assert_eq left right<block_start><assert_stmt>left<eq>right '{} != {}'.format(left right)<block_end>N_STEPS=100<line_sep>N_TRIALS=2<line_sep>N_JOBS=1<line_sep>ALGOS=('ppo2' 'a2c' 'trpo' 'acktr')<line_sep># Not yet supported:
# ALGOS = ('acer', 'dqn')
ENV_IDS=('CartPole-v1' )<line_sep>LOG_FOLDER='logs/tests_optimize/'<line_sep>experiments={}<for_stmt>algo ALGOS<block_start><for_stmt>env_id ENV_IDS<block_start>experiments['{}-{}'.format(algo env_id)]=(algo env_id)<block_end><block_end># Test for DDPG
experiments['ddpg-MountainCarContinuous-v0']=('ddpg' 'MountainCarContinuous-v0')<line_sep># Test for SAC
experiments['sac-Pendulum-v0']=('sac' 'Pendulum-v0')<line_sep># Test for TD3
experiments['td3-Pendulum-v0']=('td3' 'Pendulum-v0')<line_sep># Clean up
<if_stmt>os.path.isdir(LOG_FOLDER)<block_start>shutil.rmtree(LOG_FOLDER)<block_end>@pytest.mark.parametrize("sampler" ['random' 'tpe'])@pytest.mark.parametrize("pruner" ['none' 'halving' 'median'])@pytest.mark.parametrize("experiment" experiments.keys())<def_stmt>test_optimize sampler pruner experiment<block_start>algo,env_id=experiments[experiment]<line_sep>args=['-n' str(N_STEPS) '--algo' algo '--env' env_id '--log-folder' LOG_FOLDER '--n-trials' str(N_TRIALS) '--n-jobs' str(N_JOBS) '--sampler' sampler '--pruner' pruner '-optimize']<line_sep>return_code=subprocess.call(['python' 'train.py']+args)<line_sep>_assert_eq(return_code 0)<block_end> |
# Copyright (c) 2018 <NAME>
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>oslo_db.sqlalchemy test_fixtures<import_from_stmt>oslo_db.sqlalchemy utils<as>db_utils<import_from_stmt>glance.tests.functional.db test_migrations<import_stmt>glance.tests.utils<as>test_utils<class_stmt>TestRockyExpand02Mixin(test_migrations.AlembicMigrationsMixin)<block_start><def_stmt>_get_revisions self config<block_start><return>test_migrations.AlembicMigrationsMixin._get_revisions(self config head='rocky_expand02')<block_end><def_stmt>_pre_upgrade_rocky_expand02 self engine<block_start>images=db_utils.get_table(engine 'images')<line_sep>self.assertNotIn('os_hash_algo' images.c)<line_sep>self.assertNotIn('os_hash_value' images.c)<block_end><def_stmt>_check_rocky_expand02 self engine data<block_start>images=db_utils.get_table(engine 'images')<line_sep>self.assertIn('os_hash_algo' images.c)<line_sep>self.assertTrue(images.c.os_hash_algo.nullable)<line_sep>self.assertIn('os_hash_value' images.c)<line_sep>self.assertTrue(images.c.os_hash_value.nullable)<block_end><block_end><class_stmt>TestRockyExpand02MySQL(TestRockyExpand02Mixin test_fixtures.OpportunisticDBTestMixin test_utils.BaseTestCase )<block_start>FIXTURE=test_fixtures.MySQLOpportunisticFixture<block_end> |
<import_stmt>argparse<import_stmt>os<import_stmt>yaml<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>time<import_stmt>json<import_from_stmt>.eval_np PanopticEval<import_from_stmt>.config global_cfg<line_sep>need_nuscenes_remap=<false><if_stmt>global_cfg.DATA_CONFIG.DATASET_NAME<eq>'SemanticKitti'<block_start>DATA=yaml.safe_load(open('semantic-kitti.yaml' 'r'))<line_sep># get number of interest classes, and the label mappings
class_remap=DATA["learning_map"]<line_sep>class_inv_remap=DATA["learning_map_inv"]<line_sep>class_ignore=DATA["learning_ignore"]<line_sep>nr_classes=len(class_inv_remap)<line_sep>class_strings=DATA["labels"]<line_sep># make lookup table for mapping
maxkey=max(class_remap.keys())<line_sep># +100 hack making lut bigger just in case there are unknown labels
class_lut=np.zeros((maxkey+100) dtype=np.int32)<line_sep>class_lut[list(class_remap.keys())]=list(class_remap.values())<line_sep>ignore_class=[cl<for>cl,ignored class_ignore.items()<if>ignored]<line_sep>class_inv_lut=np.zeros((20) dtype=np.int32)<line_sep>class_inv_lut[list(class_inv_remap.keys())]=list(class_inv_remap.values())<line_sep>things=['car' 'truck' 'bicycle' 'motorcycle' 'other-vehicle' 'person' 'bicyclist' 'motorcyclist']<line_sep>stuff=['road' 'sidewalk' 'parking' 'other-ground' 'building' 'vegetation' 'trunk' 'terrain' 'fence' 'pole' 'traffic-sign']<line_sep>all_classes=things+stuff<line_sep>valid_xentropy_ids=[1 4 2 3 5 6 7 8]<block_end><else_stmt><block_start><raise>NotImplementedError<block_end><def_stmt>init_eval min_points=50<block_start>print("New evaluator with min_points of {}".format(min_points))<line_sep>class_evaluator=PanopticEval(nr_classes <none> ignore_class min_points=min_points)<line_sep><return>class_evaluator<block_end><def_stmt>eval_one_scan class_evaluator gt_sem gt_ins pred_sem pred_ins<block_start>class_evaluator.addBatch(pred_sem pred_ins gt_sem gt_ins)<block_end><def_stmt>eval_one_scan_w_fname class_evaluator gt_sem gt_ins pred_sem pred_ins fname<block_start>class_evaluator.addBatch_w_fname(pred_sem pred_ins gt_sem gt_ins fname)<block_end><def_stmt>printResults class_evaluator logger=<none> sem_only=<false><block_start>class_PQ,class_SQ,class_RQ,class_all_PQ,class_all_SQ,class_all_RQ=class_evaluator.getPQ()<line_sep>class_IoU,class_all_IoU=class_evaluator.getSemIoU()<line_sep># now make a nice dictionary
output_dict={}<line_sep># make python variables
class_PQ=class_PQ.item()<line_sep>class_SQ=class_SQ.item()<line_sep>class_RQ=class_RQ.item()<line_sep>class_all_PQ=class_all_PQ.flatten().tolist()<line_sep>class_all_SQ=class_all_SQ.flatten().tolist()<line_sep>class_all_RQ=class_all_RQ.flatten().tolist()<line_sep>class_IoU=class_IoU.item()<line_sep>class_all_IoU=class_all_IoU.flatten().tolist()<line_sep>output_dict["all"]={}<line_sep>output_dict["all"]["PQ"]=class_PQ<line_sep>output_dict["all"]["SQ"]=class_SQ<line_sep>output_dict["all"]["RQ"]=class_RQ<line_sep>output_dict["all"]["IoU"]=class_IoU<line_sep>classwise_tables={}<for_stmt>idx,(pq rq sq iou) enumerate(zip(class_all_PQ class_all_RQ class_all_SQ class_all_IoU))<block_start>class_str=class_strings[class_inv_remap[idx]]<line_sep>output_dict[class_str]={}<line_sep>output_dict[class_str]["PQ"]=pq<line_sep>output_dict[class_str]["SQ"]=sq<line_sep>output_dict[class_str]["RQ"]=rq<line_sep>output_dict[class_str]["IoU"]=iou<block_end>PQ_all=np.mean([float(output_dict[c]["PQ"])<for>c all_classes])<line_sep>PQ_dagger=np.mean([float(output_dict[c]["PQ"])<for>c things]+[float(output_dict[c]["IoU"])<for>c stuff])<line_sep>RQ_all=np.mean([float(output_dict[c]["RQ"])<for>c all_classes])<line_sep>SQ_all=np.mean([float(output_dict[c]["SQ"])<for>c all_classes])<line_sep>PQ_things=np.mean([float(output_dict[c]["PQ"])<for>c things])<line_sep>RQ_things=np.mean([float(output_dict[c]["RQ"])<for>c things])<line_sep>SQ_things=np.mean([float(output_dict[c]["SQ"])<for>c things])<line_sep>PQ_stuff=np.mean([float(output_dict[c]["PQ"])<for>c stuff])<line_sep>RQ_stuff=np.mean([float(output_dict[c]["RQ"])<for>c stuff])<line_sep>SQ_stuff=np.mean([float(output_dict[c]["SQ"])<for>c stuff])<line_sep>mIoU=output_dict["all"]["IoU"]<line_sep>codalab_output={}<line_sep>codalab_output["pq_mean"]=float(PQ_all)<line_sep>codalab_output["pq_dagger"]=float(PQ_dagger)<line_sep>codalab_output["sq_mean"]=float(SQ_all)<line_sep>codalab_output["rq_mean"]=float(RQ_all)<line_sep>codalab_output["iou_mean"]=float(mIoU)<line_sep>codalab_output["pq_stuff"]=float(PQ_stuff)<line_sep>codalab_output["rq_stuff"]=float(RQ_stuff)<line_sep>codalab_output["sq_stuff"]=float(SQ_stuff)<line_sep>codalab_output["pq_things"]=float(PQ_things)<line_sep>codalab_output["rq_things"]=float(RQ_things)<line_sep>codalab_output["sq_things"]=float(SQ_things)<line_sep>key_list=["pq_mean" "pq_dagger" "sq_mean" "rq_mean" "iou_mean" "pq_stuff" "rq_stuff" "sq_stuff" "pq_things" "rq_things" "sq_things"]<if_stmt>sem_only<and>logger<ne><none><block_start>evaluated_fnames=class_evaluator.evaluated_fnames<line_sep>logger.info('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames) len(evaluated_fnames)-len(set(evaluated_fnames))))<line_sep>logger.info('| | IoU | PQ | RQ | SQ |')<for_stmt>k,v output_dict.items()<block_start>logger.info('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(k.ljust(8)[-8:] v['IoU'] v['PQ'] v['RQ'] v['SQ']))<block_end><return>codalab_output<block_end><if_stmt>sem_only<and>logger<is><none><block_start>evaluated_fnames=class_evaluator.evaluated_fnames<line_sep>print('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames) len(evaluated_fnames)-len(set(evaluated_fnames))))<line_sep>print('| | IoU | PQ | RQ | SQ |')<for_stmt>k,v output_dict.items()<block_start>print('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(k.ljust(8)[-8:] v['IoU'] v['PQ'] v['RQ'] v['SQ']))<block_end><return>codalab_output<block_end><if_stmt>logger<ne><none><block_start>evaluated_fnames=class_evaluator.evaluated_fnames<line_sep>logger.info('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames) len(evaluated_fnames)-len(set(evaluated_fnames))))<line_sep>logger.info('| | PQ | RQ | SQ | IoU |')<for_stmt>k,v output_dict.items()<block_start>logger.info('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(k.ljust(8)[-8:] v['PQ'] v['RQ'] v['SQ'] v['IoU']))<block_end>logger.info('True Positive: ')<line_sep>logger.info('\t|\t'.join([str(x)<for>x class_evaluator.pan_tp]))<line_sep>logger.info('False Positive: ')<line_sep>logger.info('\t|\t'.join([str(x)<for>x class_evaluator.pan_fp]))<line_sep>logger.info('False Negative: ')<line_sep>logger.info('\t|\t'.join([str(x)<for>x class_evaluator.pan_fn]))<block_end><if_stmt>logger<is><none><block_start>evaluated_fnames=class_evaluator.evaluated_fnames<line_sep>print('Evaluated {} frames. Duplicated frame number: {}'.format(len(evaluated_fnames) len(evaluated_fnames)-len(set(evaluated_fnames))))<line_sep>print('| | PQ | RQ | SQ | IoU |')<for_stmt>k,v output_dict.items()<block_start>print('|{}| {:.4f} | {:.4f} | {:.4f} | {:.4f} |'.format(k.ljust(8)[-8:] v['PQ'] v['RQ'] v['SQ'] v['IoU']))<block_end>print('True Positive: ')<line_sep>print('\t|\t'.join([str(x)<for>x class_evaluator.pan_tp]))<line_sep>print('False Positive: ')<line_sep>print('\t|\t'.join([str(x)<for>x class_evaluator.pan_fp]))<line_sep>print('False Negative: ')<line_sep>print('\t|\t'.join([str(x)<for>x class_evaluator.pan_fn]))<block_end><for_stmt>key key_list<block_start><if_stmt>logger<ne><none><block_start>logger.info("{}:\t{}".format(key codalab_output[key]))<block_end><else_stmt><block_start>print("{}:\t{}".format(key codalab_output[key]))<block_end><block_end><return>codalab_output<block_end> |
<import_from_stmt>kubernetes client<import_from_stmt>kubeflow.fairing.builders.cluster.context_source ContextSourceInterface<import_from_stmt>kubeflow.fairing.cloud ibm_cloud<import_from_stmt>kubeflow.fairing utils<import_from_stmt>kubeflow.fairing.constants constants<class_stmt>COSContextSource(ContextSourceInterface)<block_start>"""
IBM Cloud Object Storage Context Source.
:param namespace: namespace that IBM COS credential secret created in.
:param region: region name, default to us-geo
:param cos_endpoint_url: IBM COS endpoint url, such as "https://s3..."
"""<def_stmt>__init__ self namespace=<none> region='us-geo' cos_endpoint_url=constants.IBM_COS_DEFAULT_ENDPOINT<block_start>self.cos_endpoint_url=cos_endpoint_url<line_sep>self.region=region<line_sep>self.namespace=namespace<or>utils.get_default_target_namespace()<line_sep>self.aws_access_key_id,self.aws_secret_access_key=ibm_cloud.get_ibm_cos_credentials(namespace)<block_end><def_stmt>prepare self context_filename# pylint: disable=arguments-differ
<block_start>"""
:param context_filename: context filename
"""<line_sep>self.uploaded_context_url=self.upload_context(context_filename)<block_end><def_stmt>upload_context self context_filename<block_start>"""
:param context_filename: context filename
"""<line_sep>cos_uploader=ibm_cloud.COSUploader(self.namespace self.cos_endpoint_url)<line_sep>context_hash=utils.crc(context_filename)<line_sep>bucket_name='kubeflow-'+context_hash.lower()<line_sep><return>cos_uploader.upload_to_bucket(blob_name='fairing-builds/'+context_hash bucket_name=bucket_name file_to_upload=context_filename)<block_end><def_stmt>generate_pod_spec self image_name push# pylint: disable=arguments-differ
<block_start>"""
:param image_name: name of image to be built
:param push: whether to push image to given registry or not
"""<line_sep>args=["--dockerfile=Dockerfile" "--destination="+image_name "--context="+self.uploaded_context_url]<if_stmt><not>push<block_start>args.append("--no-push")<block_end><return>client.V1PodSpec(containers=[client.V1Container(name='kaniko' image=constants.KANIKO_IMAGE args=args env=[client.V1EnvVar(name='AWS_REGION' value=self.region) client.V1EnvVar(name='AWS_ACCESS_KEY_ID' value=self.aws_access_key_id) client.V1EnvVar(name='AWS_SECRET_ACCESS_KEY' value=self.aws_secret_access_key) client.V1EnvVar(name='S3_ENDPOINT' value=self.cos_endpoint_url) ] volume_mounts=[client.V1VolumeMount(name="docker-config" mount_path="/kaniko/.docker/")])] restart_policy='Never' volumes=[client.V1Volume(name="docker-config" config_map=client.V1ConfigMapVolumeSource(name="docker-config"))])<block_end><def_stmt>cleanup self# TODO(@jinchihe)
<block_start><pass><block_end><block_end> |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: minitaur_logging.proto
<import_stmt>sys<import_stmt>os inspect<line_sep>currentdir=os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))<line_sep>parentdir=os.path.dirname(os.path.dirname(currentdir))<line_sep>os.sys.path.insert(0 parentdir)<line_sep>_b=sys.version_info[0]<l>3<and>(<lambda>x:x)<or>(<lambda>x:x.encode('latin1'))<import_from_stmt>google.protobuf descriptor<as>_descriptor<import_from_stmt>google.protobuf message<as>_message<import_from_stmt>google.protobuf reflection<as>_reflection<import_from_stmt>google.protobuf symbol_database<as>_symbol_database<import_from_stmt>google.protobuf descriptor_pb2<line_sep># @@protoc_insertion_point(imports)
_sym_db=_symbol_database.Default()<import_from_stmt>pybullet_envs.minitaur.envs timestamp_pb2<as>timestamp__pb2<import_from_stmt>pybullet_envs.minitaur.envs vector_pb2<as>vector__pb2<line_sep>DESCRIPTOR=_descriptor.FileDescriptor(name='minitaur_logging.proto' package='robotics.reinforcement_learning.minitaur.envs' syntax='proto3' serialized_pb=_b('\n\x16minitaur_logging.proto\x12-robotics.reinforcement_learning.minitaur.envs\x1a\x0ftimestamp.proto\x1a\x0cvector.proto\"k\n\x0fMinitaurEpisode\x12X\n\x0cstate_action\x18\x01 \x03(\x0b\x32\x42.robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction\"U\n\x12MinitaurMotorState\x12\r\n\x05\x61ngle\x18\x01 \x01(\x01\x12\x10\n\x08velocity\x18\x02 \x01(\x01\x12\x0e\n\x06torque\x18\x03 \x01(\x01\x12\x0e\n\x06\x61\x63tion\x18\x04 \x01(\x01\"\xce\x02\n\x13MinitaurStateAction\x12\x12\n\ninfo_valid\x18\x06 \x01(\x08\x12(\n\x04time\x18\x01 \x01(\x0b\x32\x1a.google.protobuf.Timestamp\x12\x32\n\rbase_position\x18\x02 \x01(\x0b\x32\x1b.robotics.messages.Vector3d\x12\x35\n\x10\x62\x61se_orientation\x18\x03 \x01(\x0b\x32\x1b.robotics.messages.Vector3d\x12\x35\n\x10\x62\x61se_angular_vel\x18\x04 \x01(\x0b\x32\x1b.robotics.messages.Vector3d\x12W\n\x0cmotor_states\x18\x05 \x03(\x0b\x32\x41.robotics.reinforcement_learning.minitaur.envs.MinitaurMotorStateb\x06proto3') dependencies=[timestamp__pb2.DESCRIPTOR vector__pb2.DESCRIPTOR ])<line_sep>_MINITAUREPISODE=_descriptor.Descriptor(name='MinitaurEpisode' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurEpisode' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='state_action' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurEpisode.state_action' index=0 number=1 type=11 cpp_type=10 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto3' extension_ranges=[] oneofs=[] serialized_start=104 serialized_end=211 )<line_sep>_MINITAURMOTORSTATE=_descriptor.Descriptor(name='MinitaurMotorState' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='angle' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState.angle' index=0 number=1 type=1 cpp_type=5 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='velocity' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState.velocity' index=1 number=2 type=1 cpp_type=5 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='torque' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState.torque' index=2 number=3 type=1 cpp_type=5 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='action' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState.action' index=3 number=4 type=1 cpp_type=5 label=1 has_default_value=<false> default_value=float(0) message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto3' extension_ranges=[] oneofs=[] serialized_start=213 serialized_end=298 )<line_sep>_MINITAURSTATEACTION=_descriptor.Descriptor(name='MinitaurStateAction' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction' filename=<none> file=DESCRIPTOR containing_type=<none> fields=[_descriptor.FieldDescriptor(name='info_valid' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.info_valid' index=0 number=6 type=8 cpp_type=7 label=1 has_default_value=<false> default_value=<false> message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='time' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.time' index=1 number=1 type=11 cpp_type=10 label=1 has_default_value=<false> default_value=<none> message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='base_position' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.base_position' index=2 number=2 type=11 cpp_type=10 label=1 has_default_value=<false> default_value=<none> message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='base_orientation' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.base_orientation' index=3 number=3 type=11 cpp_type=10 label=1 has_default_value=<false> default_value=<none> message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='base_angular_vel' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.base_angular_vel' index=4 number=4 type=11 cpp_type=10 label=1 has_default_value=<false> default_value=<none> message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) _descriptor.FieldDescriptor(name='motor_states' full_name='robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction.motor_states' index=5 number=5 type=11 cpp_type=10 label=3 has_default_value=<false> default_value=[] message_type=<none> enum_type=<none> containing_type=<none> is_extension=<false> extension_scope=<none> options=<none> file=DESCRIPTOR) ] extensions=[] nested_types=[] enum_types=[] options=<none> is_extendable=<false> syntax='proto3' extension_ranges=[] oneofs=[] serialized_start=301 serialized_end=635 )<line_sep>_MINITAUREPISODE.fields_by_name['state_action'].message_type=_MINITAURSTATEACTION<line_sep>_MINITAURSTATEACTION.fields_by_name['time'].message_type=timestamp__pb2._TIMESTAMP<line_sep>_MINITAURSTATEACTION.fields_by_name['base_position'].message_type=vector__pb2._VECTOR3D<line_sep>_MINITAURSTATEACTION.fields_by_name['base_orientation'].message_type=vector__pb2._VECTOR3D<line_sep>_MINITAURSTATEACTION.fields_by_name['base_angular_vel'].message_type=vector__pb2._VECTOR3D<line_sep>_MINITAURSTATEACTION.fields_by_name['motor_states'].message_type=_MINITAURMOTORSTATE<line_sep>DESCRIPTOR.message_types_by_name['MinitaurEpisode']=_MINITAUREPISODE<line_sep>DESCRIPTOR.message_types_by_name['MinitaurMotorState']=_MINITAURMOTORSTATE<line_sep>DESCRIPTOR.message_types_by_name['MinitaurStateAction']=_MINITAURSTATEACTION<line_sep>_sym_db.RegisterFileDescriptor(DESCRIPTOR)<line_sep>MinitaurEpisode=_reflection.GeneratedProtocolMessageType('MinitaurEpisode' (_message.Message ) dict(DESCRIPTOR=_MINITAUREPISODE __module__='minitaur_logging_pb2'# @@protoc_insertion_point(class_scope:robotics.reinforcement_learning.minitaur.envs.MinitaurEpisode)
))<line_sep>_sym_db.RegisterMessage(MinitaurEpisode)<line_sep>MinitaurMotorState=_reflection.GeneratedProtocolMessageType('MinitaurMotorState' (_message.Message ) dict(DESCRIPTOR=_MINITAURMOTORSTATE __module__='minitaur_logging_pb2'# @@protoc_insertion_point(class_scope:robotics.reinforcement_learning.minitaur.envs.MinitaurMotorState)
))<line_sep>_sym_db.RegisterMessage(MinitaurMotorState)<line_sep>MinitaurStateAction=_reflection.GeneratedProtocolMessageType('MinitaurStateAction' (_message.Message ) dict(DESCRIPTOR=_MINITAURSTATEACTION __module__='minitaur_logging_pb2'# @@protoc_insertion_point(class_scope:robotics.reinforcement_learning.minitaur.envs.MinitaurStateAction)
))<line_sep>_sym_db.RegisterMessage(MinitaurStateAction)<line_sep># @@protoc_insertion_point(module_scope)
|
<import_from_stmt>RecoEgamma.EgammaIsolationAlgos.interestingEgammaIsoDetIdsSequence_cff *<import_from_stmt>RecoEgamma.PhotonIdentification.photonId_cff *<import_from_stmt>RecoEgamma.ElectronIdentification.electronIdSequence_cff *<import_from_stmt>RecoEgamma.EgammaHFProducers.hfEMClusteringSequence_cff *<import_from_stmt>RecoEgamma.EgammaIsolationAlgos.egmIsolationDefinitions_cff *<line_sep>eidRobustLoose.verticesCollection="hiSelectedVertex"<line_sep>eidRobustTight.verticesCollection="hiSelectedVertex"<line_sep>eidRobustHighEnergy.verticesCollection="hiSelectedVertex"<line_sep>eidLoose.verticesCollection="hiSelectedVertex"<line_sep>eidTight.verticesCollection="hiSelectedVertex"<line_sep>hfRecoEcalCandidate.VertexCollection="hiSelectedVertex"<line_sep>egammaHighLevelRecoPostPFTask=cms.Task(interestingEgammaIsoDetIdsTask egmIsolationTask photonIDTask photonIDTaskGED eIdTask hfEMClusteringTask)<line_sep>egammaHighLevelRecoPostPF=cms.Sequence(egammaHighLevelRecoPostPFTask)<line_sep> |
""" Environment with a distribution of mazes (one new maze is drawn at each episode)
Author: <NAME>
"""<import_stmt>numpy<as>np<import_from_stmt>deer.base_classes Environment<line_sep>#import matplotlib
#matplotlib.use('qt5agg')
#from mpl_toolkits.axes_grid1 import host_subplot
#import mpl_toolkits.axisartist as AA
#import matplotlib.pyplot as plt
<import_stmt>copy<import_stmt>a_star_path_finding<as>pf<class_stmt>MyEnv(Environment)<block_start>VALIDATION_MODE=0<def_stmt>__init__ self rng **kwargs<block_start>self._random_state=rng<line_sep>self._mode=-1<line_sep>self._mode_score=0.0<line_sep>self._mode_episode_count=0<line_sep>self._episode_steps=0<line_sep>self._actions=[0 1 2 3]<line_sep>self._size_maze=8<line_sep>self._higher_dim_obs=kwargs.get('higher_dim_obs' <false>)<line_sep>self._reverse=kwargs.get('reverse' <false>)<line_sep>self._n_walls=int((self._size_maze-2)<power>2/3.)#int((self._size_maze)**2/3.)
self._n_rewards=3<line_sep>self.create_map()<line_sep>self.intern_dim=3<block_end><def_stmt>create_map self<block_start>valid_map=<false><while_stmt>valid_map<eq><false># Agent
<block_start>self._pos_agent=[1 1]<line_sep># Walls
self._pos_walls=[]<for_stmt>i range(self._size_maze)<block_start>self._pos_walls.append([i 0])<line_sep>self._pos_walls.append([i self._size_maze-1])<block_end><for_stmt>j range(self._size_maze-2)<block_start>self._pos_walls.append([0 j+1])<line_sep>self._pos_walls.append([self._size_maze-1 j+1])<block_end>n=0<while_stmt>n<l>self._n_walls<block_start>potential_wall=[self._random_state.randint(1 self._size_maze-2) self._random_state.randint(1 self._size_maze-2)]<if_stmt>(potential_wall<not><in>self._pos_walls<and>potential_wall<ne>self._pos_agent)<block_start>self._pos_walls.append(potential_wall)<line_sep>n<augadd>1<block_end><block_end># Rewards
#self._pos_rewards=[[self._size_maze-2,self._size_maze-2]]
self._pos_rewards=[]<line_sep>n=0<while_stmt>n<l>self._n_rewards<block_start>potential_reward=[self._random_state.randint(1 self._size_maze-1) self._random_state.randint(1 self._size_maze-1)]<if_stmt>(potential_reward<not><in>self._pos_rewards<and>potential_reward<not><in>self._pos_walls<and>potential_reward<ne>self._pos_agent)<block_start>self._pos_rewards.append(potential_reward)<line_sep>n<augadd>1<block_end><block_end>valid_map=self.is_valid_map(self._pos_agent self._pos_walls self._pos_rewards)<block_end><block_end><def_stmt>is_valid_map self pos_agent pos_walls pos_rewards<block_start>a=pf.AStar()<line_sep>pos_walls<line_sep>walls=[tuple(w)<for>w pos_walls]<line_sep>start=tuple(pos_agent)<for_stmt>r pos_rewards<block_start>end=tuple(r)<line_sep>a.init_grid(self._size_maze self._size_maze walls start end)<line_sep>maze=a<line_sep>optimal_path=maze.solve()<if_stmt>(optimal_path<eq><none>)<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>reset self mode<block_start>self._episode_steps=0<line_sep>self._mode=mode<line_sep>self.create_map()<if_stmt>mode<eq>MyEnv.VALIDATION_MODE<block_start><if_stmt>self._mode<ne>MyEnv.VALIDATION_MODE<block_start>self._mode=MyEnv.VALIDATION_MODE<line_sep>self._mode_score=0.0<line_sep>self._mode_episode_count=0<block_end><else_stmt><block_start>self._mode_episode_count<augadd>1<block_end><block_end><return>[1<times>[self._size_maze<times>[self._size_maze<times>[0]]]]<block_end><def_stmt>act self action<block_start>self._episode_steps<augadd>1<line_sep>action=self._actions[action]<line_sep>reward=-0.1<if_stmt>(action<eq>0)<block_start><if_stmt>([self._pos_agent[0]+1 self._pos_agent[1]]<not><in>self._pos_walls)<block_start>self._pos_agent[0]=self._pos_agent[0]+1<block_end><block_end><elif_stmt>(action<eq>1)<block_start><if_stmt>([self._pos_agent[0] self._pos_agent[1]+1]<not><in>self._pos_walls)<block_start>self._pos_agent[1]=self._pos_agent[1]+1<block_end><block_end><elif_stmt>(action<eq>2)<block_start><if_stmt>([self._pos_agent[0]-1 self._pos_agent[1]]<not><in>self._pos_walls)<block_start>self._pos_agent[0]=self._pos_agent[0]-1<block_end><block_end><elif_stmt>(action<eq>3)<block_start><if_stmt>([self._pos_agent[0] self._pos_agent[1]-1]<not><in>self._pos_walls)<block_start>self._pos_agent[1]=self._pos_agent[1]-1<block_end><block_end><if_stmt>(self._pos_agent<in>self._pos_rewards)<block_start>reward=1<line_sep>self._pos_rewards.remove(self._pos_agent)<block_end>self._mode_score<augadd>reward<line_sep><return>reward<block_end><def_stmt>summarizePerformance self test_data_set learning_algo *args **kwargs<block_start>print("test_data_set.observations.shape")<line_sep>print(test_data_set.observations()[0][0:1])<line_sep>print("self._mode_score:"+str(self._mode_score)+".")<block_end><def_stmt>inputDimensions self<block_start><if_stmt>(self._higher_dim_obs<eq><true>)<block_start><return>[(1 self._size_maze<times>6 self._size_maze<times>6)]<block_end><else_stmt><block_start><return>[(1 self._size_maze self._size_maze)]<block_end><block_end><def_stmt>observationType self subject<block_start><return>np.float32<block_end><def_stmt>nActions self<block_start><return>len(self._actions)<block_end><def_stmt>observe self<block_start>self._map=np.zeros((self._size_maze self._size_maze))<for_stmt>coord_wall self._pos_walls<block_start>self._map[coord_wall[0] coord_wall[1]]=1<block_end><for_stmt>coord_reward self._pos_rewards<block_start>self._map[coord_reward[0] coord_reward[1]]=2<block_end>self._map[self._pos_agent[0] self._pos_agent[1]]=0.5<if_stmt>(self._higher_dim_obs<eq><true>)<block_start>indices_reward=np.argwhere(self._map<eq>2)<line_sep>indices_agent=np.argwhere(self._map<eq>0.5)<line_sep>self._map=self._map/1.<line_sep>self._map=np.repeat(np.repeat(self._map 6 axis=0) 6 axis=1)<line_sep># agent repr
agent_obs=np.zeros((6 6))<line_sep>agent_obs[0 2]=0.8<line_sep>agent_obs[1 0:5]=0.9<line_sep>agent_obs[2 1:4]=0.9<line_sep>agent_obs[3 1:4]=0.9<line_sep>agent_obs[4 1]=0.9<line_sep>agent_obs[4 3]=0.9<line_sep>agent_obs[5 0:2]=0.9<line_sep>agent_obs[5 3:5]=0.9<line_sep># reward repr
reward_obs=np.zeros((6 6))<line_sep>reward_obs[: 1]=0.7<line_sep>reward_obs[0 1:4]=0.6<line_sep>reward_obs[1 3]=0.7<line_sep>reward_obs[2 1:4]=0.6<line_sep>reward_obs[4 2]=0.7<line_sep>reward_obs[5 2:4]=0.7<for_stmt>i indices_reward<block_start>self._map[i[0]<times>6:(i[0]+1)<times>6: i[1]<times>6:(i[1]+1)<times>6]=reward_obs<block_end><for_stmt>i indices_agent<block_start>self._map[i[0]<times>6:(i[0]+1)<times>6: i[1]<times>6:(i[1]+1)<times>6]=agent_obs<block_end>self._map=(self._map<times>2)-1#scaling
#print ("self._map higher_dim_obs")
#print (self._map)
#plt.imshow(self._map, cmap='gray_r')
#plt.show()
<block_end><else_stmt><block_start>self._map=self._map/2.<line_sep>self._map[self._map<eq>0.5]=0.99# agent
self._map[self._map<eq>1.]=0.5<block_end># reward
<if_stmt>(self._reverse<eq><true>)<block_start>self._map=-self._map<block_end>#1-self._map
<return>[self._map]<block_end><def_stmt>inTerminalState self<block_start><if_stmt>(self._pos_rewards<eq>[]<or>(self._mode<ge>0<and>self._episode_steps<ge>50))<block_start><return><true><block_end><else_stmt><block_start><return><false><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>hashlib<line_sep>rng=np.random.RandomState(123456)<line_sep>env=MyEnv(rng higher_dim_obs=<false>)<line_sep>maps=[]<for_stmt>i range(10000)<block_start>env.create_map()<line_sep>one_laby=env.observe()[0]<line_sep># Hashing the labyrinths to be able to find duplicates in O(1)
one_laby=int(hashlib.sha1(str(one_laby).encode('utf-8')).hexdigest() 16)%(10<power>8)<line_sep># TESTING ADDING DUPLICATION
<if_stmt>i%1000<eq>0<block_start>env.reset(0)<block_end><if_stmt>i%1000<eq>500<block_start>env.reset(1)<block_end>maps.append(copy.deepcopy(one_laby))<block_end>duplicate_laby=0<for_stmt>i range(10000)<block_start>env.create_map()<line_sep>one_laby=env.observe()[0]<line_sep># Hashing the labyrinths to be able to find duplicates in O(1)
one_laby=int(hashlib.sha1(str(one_laby).encode('utf-8')).hexdigest() 16)%(10<power>8)<line_sep># TESTING ADDING DUPLICATION
#if i%1000==0:
# maps.append(one_laby)
# TESTING WITH RESETS
<if_stmt>i%1000<eq>0<block_start>env.reset(0)<block_end><if_stmt>i%1000<eq>500<block_start>env.reset(1)<block_end>duplicate=min(maps.count(one_laby) 1)<line_sep>duplicate_laby<augadd>duplicate<if_stmt>i%1000<eq>0<block_start>print("Number of duplicate labyrinths:"+str(duplicate_laby)+".")<block_end><block_end><block_end> |
# -*- coding: utf-8 -*-
<import_from_stmt>.._common *<class_stmt>IfengVideo(Extractor)<block_start>name='凤凰视频 (ifeng video)'# Expired
<def_stmt>prepare self<block_start>info=MediaInfo(self.name)<line_sep>self.vid=self.url[-13:-6]<line_sep>info.title=self.name+'-'+self.vid<line_sep>data=get_response('http://tv.ifeng.com/html5/{self.vid}/video.json'.format(**vars())).json()<if_stmt>'bqSrc'<in>data<block_start>info.streams['SD']={'container':'mp4' 'video_profile':'标清' 'src':[data['bqSrc']] 'size':0}<block_end><if_stmt>'gqSrc'<in>data<block_start>info.streams['HD']={'container':'mp4' 'video_profile':'高清' 'src':[data['gqSrc']] 'size':0}<block_end><return>info<block_end><block_end>site=IfengVideo()<line_sep> |
<import_stmt>_sk_fail<line_sep>_sk_fail._("dis")<line_sep> |
# -*- coding: utf-8 -*-
"""Shared functionality for delimiter separated values output modules."""<import_from_stmt>plaso.output formatting_helper<import_from_stmt>plaso.output interface<class_stmt>DSVEventFormattingHelper(formatting_helper.EventFormattingHelper)<block_start>"""Delimiter separated values output module event formatting helper."""<def_stmt>__init__ self output_mediator field_formatting_helper field_names field_delimiter=','<block_start>"""Initializes a delimiter separated values event formatting helper.
Args:
output_mediator (OutputMediator): output mediator.
field_formatting_helper (FieldFormattingHelper): field formatting helper.
field_names (list[str]): names of the fields to output.
field_delimiter (Optional[str]): field delimiter.
"""<line_sep>super(DSVEventFormattingHelper self).__init__(output_mediator)<line_sep>self._field_delimiter=field_delimiter<line_sep>self._field_names=field_names<line_sep>self._field_formatting_helper=field_formatting_helper<block_end><def_stmt>_SanitizeField self field<block_start>"""Sanitizes a field for output.
This method replaces any field delimiters with a space.
Args:
field (str): value of the field to sanitize.
Returns:
str: sanitized value of the field.
"""<if_stmt>self._field_delimiter<and>isinstance(field str)<block_start><return>field.replace(self._field_delimiter ' ')<block_end><return>field<block_end><def_stmt>GetFormattedEvent self event event_data event_data_stream event_tag<block_start>"""Retrieves a string representation of the event.
Args:
event (EventObject): event.
event_data (EventData): event data.
event_data_stream (EventDataStream): event data stream.
event_tag (EventTag): event tag.
Returns:
str: string representation of the event.
"""<line_sep>field_values=[]<for_stmt>field_name self._field_names<block_start>field_value=self._field_formatting_helper.GetFormattedField(field_name event event_data event_data_stream event_tag)<line_sep>field_value=self._SanitizeField(field_value)<line_sep>field_values.append(field_value)<block_end><return>self._field_delimiter.join(field_values)<block_end><def_stmt>GetFormattedFieldNames self<block_start>"""Retrieves a string representation of the field names.
Returns:
str: string representation of the field names.
"""<line_sep><return>self._field_delimiter.join(self._field_names)<block_end><def_stmt>SetFieldDelimiter self field_delimiter<block_start>"""Sets the field delimiter.
Args:
field_delimiter (str): field delimiter.
"""<line_sep>self._field_delimiter=field_delimiter<block_end><def_stmt>SetFields self field_names<block_start>"""Sets the names of the fields to output.
Args:
field_names (list[str]): names of the fields to output.
"""<line_sep>self._field_names=field_names<block_end><block_end><class_stmt>DSVOutputModule(interface.TextFileOutputModule)<block_start>"""Shared functionality for delimiter separated values output modules."""<def_stmt>__init__ self output_mediator field_formatting_helper names delimiter=',' header=<none><block_start>"""Initializes a delimiter separated values output module.
Args:
output_mediator (OutputMediator): an output mediator.
field_formatting_helper (FieldFormattingHelper): field formatting helper.
names (list[str]): names of the fields to output.
delimiter (Optional[str]): field delimiter.
header (Optional[str]): header, where None will have WriteHeader
generate a header from the field names.
"""<line_sep>event_formatting_helper=DSVEventFormattingHelper(output_mediator field_formatting_helper names field_delimiter=delimiter)<line_sep>super(DSVOutputModule self).__init__(output_mediator event_formatting_helper)<line_sep>self._header=header<block_end><def_stmt>SetFieldDelimiter self field_delimiter<block_start>"""Sets the field delimiter.
Args:
field_delimiter (str): field delimiter.
"""<line_sep>self._event_formatting_helper.SetFieldDelimiter(field_delimiter)<block_end><def_stmt>SetFields self field_names<block_start>"""Sets the names of the fields to output.
Args:
field_names (list[str]): names of the fields to output.
"""<line_sep>self._event_formatting_helper.SetFields(field_names)<block_end><def_stmt>WriteHeader self<block_start>"""Writes the header to the output."""<if_stmt>self._header<block_start>output_text=self._header<block_end><else_stmt><block_start>output_text=self._event_formatting_helper.GetFormattedFieldNames()<block_end>self.WriteLine(output_text)<block_end><block_end> |
<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_stmt>random<import_stmt>pickle<import_from_stmt>sklearn.model_selection train_test_split<import_from_stmt>sklearn.feature_extraction.text TfidfVectorizer<import_from_stmt>sklearn.linear_model LogisticRegression<line_sep>#The url needs to undergo some amount of cleasing before we use it. We tokenize it by removing slash , dots and coms
<def_stmt>url_cleanse web_url<block_start>web_url=web_url.lower()<line_sep>urltoken=[]<line_sep>dot_slash=[]<line_sep>slash=str(web_url).split('/')<for_stmt>i slash<block_start>r1=str(i).split('-')<line_sep>token_slash=[]<for_stmt>j range(0 len(r1))<block_start>r2=str(r1[j]).split('.')<line_sep>token_slash=token_slash+r2<block_end>dot_slash=dot_slash+r1+token_slash<block_end>urltoken=list(set(dot_slash))<if_stmt>'com'<in>urltoken<block_start>urltoken.remove('com')<block_end><return>urltoken<block_end># We injest the data and convert it to the relevant dataframes.
input_url='~/data.csv'<line_sep>data_csv=pd.read_csv(input_url ',' error_bad_lines=<false>)<line_sep>data_df=pd.DataFrame(data_csv)<line_sep>url_df=np.array(data_df)<line_sep>random.shuffle(data_df)<line_sep>y=[d[1]<for>d data_df]<line_sep>inputurls=[d[0]<for>d data_df]<line_sep>#http://blog.christianperone.com/2011/09/machine-learning-text-feature-extraction-tf-idf-part-i/
#We need to generate the tf-idf from the urls.
url_vectorizer=TfidfVectorizer(tokenizer=url_cleanse)<line_sep>x=url_vectorizer.fit_transform(inputurls)<line_sep>x_train,x_test,y_train,y_test=train_test_split(x y test_size=0.2 random_state=42)<line_sep>l_regress=LogisticRegression()# Logistic regression
l_regress.fit(x_train y_train)<line_sep>l_score=l_regress.score(x_test y_test)<line_sep>print("score: {0:.2f} %".format(100<times>l_score))<line_sep>url_vectorizer_save=url_vectorizer<line_sep>file1="model.pkl"<with_stmt>open(file1 'wb')<as>f<block_start>pickle.dump(l_regress f)<block_end>f.close()<line_sep>file2="vector.pkl"<with_stmt>open(file2 'wb')<as>f2<block_start>pickle.dump(vectorizer_save f2)<block_end>f2.close()<line_sep>#We load a bunch of urls that we want to check are legit or not
urls=['hackthebox.eu' 'facebook.com']<line_sep>file1="model.pkl"<with_stmt>open(file1 'rb')<as>f1<block_start>lgr=pickle.load(f1)<block_end>f1.close()<line_sep>file2="pvector.pkl"<with_stmt>open(file2 'rb')<as>f2<block_start>url_vectorizer=pickle.load(f2)<block_end>f2.close()<line_sep>url_vectorizer=url_vectorizer<line_sep>x=url_vectorizer.transform(inputurls)<line_sep>y_predict=l_regress.predict(x)<line_sep>print(inputurls)<line_sep>print(y_predict)<line_sep># We can use the whitelist to make the predictions
whitelisted_url=['hackthebox.eu' 'root-me.org']<line_sep>some_url=[i<for>i inputurls<if>i<not><in>whitelisted_url]<line_sep>file1="model.pkl"<with_stmt>open(file1 'rb')<as>f1<block_start>l_regress=pickle.load(f1)<block_end>f1.close()<line_sep>file2="vector.pkl"<with_stmt>open(file2 'rb')<as>f2<block_start>url_vectorizer=pickle.load(f2)<block_end>f2.close()<line_sep>url_vectorizer=url_vectorizer<line_sep>x=url_vectorizer.transform(some_url)<line_sep>y_predict=l_regress.predict(x)<for_stmt>site whitelisted_url<block_start>some_url.append(site)<block_end>print(some_url)<line_sep>l_predict=list(y_predict)<for_stmt>j range(0 len(whitelisted_url))<block_start>l_predict.append('good')<block_end>print(l_predict)<line_sep>#use SVM
<import_from_stmt>sklearn.svm SVC<line_sep>svmModel=SVC()<line_sep>svmModel.fit(X_train y_train)<line_sep>#lsvcModel = svm.LinearSVC.fit(X_train, y_train)
svmModel.score(X_test y_test)<line_sep>file1="model.pkl"<with_stmt>open(file1 'rb')<as>f1<block_start>svm_model=pickle.load(f1)<block_end>f1.close()<line_sep>file2="pvector.pkl"<with_stmt>open(file2 'rb')<as>f2<block_start>url_vectorizer=pickle.load(f2)<block_end>f2.close()<line_sep>test_url="http://www.isitmalware.com"<line_sep>vec_test_url=url_vectorizer.transform([trim(test_url)])<line_sep>result=svm_model.predict(vec_test_url)<line_sep>print(test_url)<line_sep>print(result)<line_sep> |
<import_from_stmt>typing Optional<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn CrossEntropyLoss<import_from_stmt>transformers.models.bert.modeling_bert ACT2FN BertPreTrainingHeads<import_from_stmt>transformers.models.roberta.modeling_roberta RobertaLMHead<import_from_stmt>luke.model LukeModel LukeConfig<class_stmt>EntityPredictionHeadTransform(nn.Module)<block_start><def_stmt>__init__ self config:LukeConfig<block_start>super(EntityPredictionHeadTransform self).__init__()<line_sep>self.dense=nn.Linear(config.hidden_size config.entity_emb_size)<if_stmt>isinstance(config.hidden_act str)<block_start>self.transform_act_fn=ACT2FN[config.hidden_act]<block_end><else_stmt><block_start>self.transform_act_fn=config.hidden_act<block_end>self.LayerNorm=nn.LayerNorm(config.entity_emb_size eps=config.layer_norm_eps)<block_end><def_stmt>forward self hidden_states:torch.Tensor<block_start>hidden_states=self.dense(hidden_states)<line_sep>hidden_states=self.transform_act_fn(hidden_states)<line_sep>hidden_states=self.LayerNorm(hidden_states)<line_sep><return>hidden_states<block_end><block_end><class_stmt>EntityPredictionHead(nn.Module)<block_start><def_stmt>__init__ self config:LukeConfig<block_start>super(EntityPredictionHead self).__init__()<line_sep>self.config=config<line_sep>self.transform=EntityPredictionHeadTransform(config)<line_sep>self.decoder=nn.Linear(config.entity_emb_size config.entity_vocab_size bias=<false>)<line_sep>self.bias=nn.Parameter(torch.zeros(config.entity_vocab_size))<block_end><def_stmt>forward self hidden_states:torch.Tensor<block_start>hidden_states=self.transform(hidden_states)<line_sep>hidden_states=self.decoder(hidden_states)+self.bias<line_sep><return>hidden_states<block_end><block_end><class_stmt>LukePretrainingModel(LukeModel)<block_start><def_stmt>__init__ self config:LukeConfig<block_start>super(LukePretrainingModel self).__init__(config)<if_stmt>self.config.bert_model_name<and>"roberta"<in>self.config.bert_model_name<block_start>self.lm_head=RobertaLMHead(config)<line_sep>self.lm_head.decoder.weight=self.embeddings.word_embeddings.weight<block_end><else_stmt><block_start>self.cls=BertPreTrainingHeads(config)<line_sep>self.cls.predictions.decoder.weight=self.embeddings.word_embeddings.weight<block_end>self.entity_predictions=EntityPredictionHead(config)<line_sep>self.entity_predictions.decoder.weight=self.entity_embeddings.entity_embeddings.weight<line_sep>self.apply(self.init_weights)<block_end><def_stmt>forward self word_ids:torch.LongTensor word_segment_ids:torch.LongTensor word_attention_mask:torch.LongTensor entity_ids:torch.LongTensor entity_position_ids:torch.LongTensor entity_segment_ids:torch.LongTensor entity_attention_mask:torch.LongTensor masked_entity_labels:Optional[torch.LongTensor]=<none> masked_lm_labels:Optional[torch.LongTensor]=<none> **kwargs<block_start>model_dtype=next(self.parameters()).dtype# for fp16 compatibility
output=super(LukePretrainingModel self).forward(word_ids word_segment_ids word_attention_mask entity_ids entity_position_ids entity_segment_ids entity_attention_mask )<line_sep>word_sequence_output,entity_sequence_output=output[:2]<line_sep>loss_fn=CrossEntropyLoss(ignore_index=-1)<line_sep>ret=dict(loss=word_ids.new_tensor(0.0 dtype=model_dtype))<if_stmt>masked_entity_labels<is><not><none><block_start>entity_mask=masked_entity_labels<ne>-1<if_stmt>entity_mask.sum()<g>0<block_start>target_entity_sequence_output=torch.masked_select(entity_sequence_output entity_mask.unsqueeze(-1))<line_sep>target_entity_sequence_output=target_entity_sequence_output.view(-1 self.config.hidden_size)<line_sep>target_entity_labels=torch.masked_select(masked_entity_labels entity_mask)<line_sep>entity_scores=self.entity_predictions(target_entity_sequence_output)<line_sep>entity_scores=entity_scores.view(-1 self.config.entity_vocab_size)<line_sep>ret["masked_entity_loss"]=loss_fn(entity_scores target_entity_labels)<line_sep>ret["masked_entity_correct"]=(torch.argmax(entity_scores 1).data<eq>target_entity_labels.data).sum()<line_sep>ret["masked_entity_total"]=target_entity_labels.ne(-1).sum()<line_sep>ret["loss"]<augadd>ret["masked_entity_loss"]<block_end><else_stmt><block_start>ret["masked_entity_loss"]=word_ids.new_tensor(0.0 dtype=model_dtype)<line_sep>ret["masked_entity_correct"]=word_ids.new_tensor(0 dtype=torch.long)<line_sep>ret["masked_entity_total"]=word_ids.new_tensor(0 dtype=torch.long)<block_end><block_end><if_stmt>masked_lm_labels<is><not><none><block_start>masked_lm_mask=masked_lm_labels<ne>-1<if_stmt>masked_lm_mask.sum()<g>0<block_start>masked_word_sequence_output=torch.masked_select(word_sequence_output masked_lm_mask.unsqueeze(-1))<line_sep>masked_word_sequence_output=masked_word_sequence_output.view(-1 self.config.hidden_size)<if_stmt>self.config.bert_model_name<and>"roberta"<in>self.config.bert_model_name<block_start>masked_lm_scores=self.lm_head(masked_word_sequence_output)<block_end><else_stmt><block_start>masked_lm_scores=self.cls.predictions(masked_word_sequence_output)<block_end>masked_lm_scores=masked_lm_scores.view(-1 self.config.vocab_size)<line_sep>masked_lm_labels=torch.masked_select(masked_lm_labels masked_lm_mask)<line_sep>ret["masked_lm_loss"]=loss_fn(masked_lm_scores masked_lm_labels)<line_sep>ret["masked_lm_correct"]=(torch.argmax(masked_lm_scores 1).data<eq>masked_lm_labels.data).sum()<line_sep>ret["masked_lm_total"]=masked_lm_labels.ne(-1).sum()<line_sep>ret["loss"]<augadd>ret["masked_lm_loss"]<block_end><else_stmt><block_start>ret["masked_lm_loss"]=word_ids.new_tensor(0.0 dtype=model_dtype)<line_sep>ret["masked_lm_correct"]=word_ids.new_tensor(0 dtype=torch.long)<line_sep>ret["masked_lm_total"]=word_ids.new_tensor(0 dtype=torch.long)<block_end><block_end><return>ret<block_end><block_end> |
# encoding: utf-8
# module Autodesk.Revit.DB.Electrical calls itself Electrical
# from RevitAPI,Version=17.0.0.0,Culture=neutral,PublicKeyToken=null
# by generator 1.145
# no doc
# no imports
# no functions
# classes
<import_from_stmt>Electrical_parts.CableTrayConduitBase CableTrayConduitBase<import_from_stmt>Electrical_parts.CableTray CableTray<import_from_stmt>Electrical_parts.CableTrayConduitRunBase CableTrayConduitRunBase<import_from_stmt>Electrical_parts.CableTrayRun CableTrayRun<import_from_stmt>Electrical_parts.CableTraySettings CableTraySettings<import_from_stmt>Electrical_parts.CableTrayShape CableTrayShape<import_from_stmt>Electrical_parts.CableTraySizeIterator CableTraySizeIterator<import_from_stmt>Electrical_parts.CableTraySizes CableTraySizes<import_from_stmt>Electrical_parts.CableTrayType CableTrayType<import_from_stmt>Electrical_parts.CapitalizationForLoadNames CapitalizationForLoadNames<import_from_stmt>Electrical_parts.CircuitLoadCalculationMethod CircuitLoadCalculationMethod<import_from_stmt>Electrical_parts.CircuitSequence CircuitSequence<import_from_stmt>Electrical_parts.CircuitType CircuitType<import_from_stmt>Electrical_parts.Conduit Conduit<import_from_stmt>Electrical_parts.ConduitRun ConduitRun<import_from_stmt>Electrical_parts.ConduitSettings ConduitSettings<import_from_stmt>Electrical_parts.ConduitSize ConduitSize<import_from_stmt>Electrical_parts.ConduitSizeIterator ConduitSizeIterator<import_from_stmt>Electrical_parts.ConduitSizes ConduitSizes<import_from_stmt>Electrical_parts.ConduitSizeSettingIterator ConduitSizeSettingIterator<import_from_stmt>Electrical_parts.ConduitSizeSettings ConduitSizeSettings<import_from_stmt>Electrical_parts.ConduitType ConduitType<import_from_stmt>Electrical_parts.CorrectionFactor CorrectionFactor<import_from_stmt>Electrical_parts.CorrectionFactorSet CorrectionFactorSet<import_from_stmt>Electrical_parts.CorrectionFactorSetIterator CorrectionFactorSetIterator<import_from_stmt>Electrical_parts.DistributionSysType DistributionSysType<import_from_stmt>Electrical_parts.DistributionSysTypeSet DistributionSysTypeSet<import_from_stmt>Electrical_parts.DistributionSysTypeSetIterator DistributionSysTypeSetIterator<import_from_stmt>Electrical_parts.ElectricalDemandFactorDefinition ElectricalDemandFactorDefinition<import_from_stmt>Electrical_parts.ElectricalDemandFactorRule ElectricalDemandFactorRule<import_from_stmt>Electrical_parts.ElectricalDemandFactorValue ElectricalDemandFactorValue<import_from_stmt>Electrical_parts.ElectricalEquipment ElectricalEquipment<import_from_stmt>Electrical_parts.ElectricalLoadClassification ElectricalLoadClassification<import_from_stmt>Electrical_parts.ElectricalLoadClassificationData ElectricalLoadClassificationData<import_from_stmt>Electrical_parts.ElectricalLoadClassificationSpace ElectricalLoadClassificationSpace<import_from_stmt>Electrical_parts.ElectricalPhase ElectricalPhase<import_from_stmt>Electrical_parts.ElectricalPhaseConfiguration ElectricalPhaseConfiguration<import_from_stmt>Electrical_parts.ElectricalSetting ElectricalSetting<import_from_stmt>Electrical_parts.ElectricalSystem ElectricalSystem<import_from_stmt>Electrical_parts.ElectricalSystemSet ElectricalSystemSet<import_from_stmt>Electrical_parts.ElectricalSystemSetIterator ElectricalSystemSetIterator<import_from_stmt>Electrical_parts.ElectricalSystemType ElectricalSystemType<import_from_stmt>Electrical_parts.GroundConductorSize GroundConductorSize<import_from_stmt>Electrical_parts.GroundConductorSizeSet GroundConductorSizeSet<import_from_stmt>Electrical_parts.GroundConductorSizeSetIterator GroundConductorSizeSetIterator<import_from_stmt>Electrical_parts.InsulationType InsulationType<import_from_stmt>Electrical_parts.InsulationTypeSet InsulationTypeSet<import_from_stmt>Electrical_parts.InsulationTypeSetIterator InsulationTypeSetIterator<import_from_stmt>Electrical_parts.LightingDevice LightingDevice<import_from_stmt>Electrical_parts.LightingFixture LightingFixture<import_from_stmt>Electrical_parts.LoadClassification LoadClassification<import_from_stmt>Electrical_parts.LoadClassificationType LoadClassificationType<import_from_stmt>Electrical_parts.NeutralMode NeutralMode<import_from_stmt>Electrical_parts.PanelConfiguration PanelConfiguration<import_from_stmt>Electrical_parts.PanelScheduleData PanelScheduleData<import_from_stmt>Electrical_parts.PanelSchedulePhaseLoadType PanelSchedulePhaseLoadType<import_from_stmt>Electrical_parts.PanelScheduleSheetInstance PanelScheduleSheetInstance<import_from_stmt>Electrical_parts.PanelScheduleTemplate PanelScheduleTemplate<import_from_stmt>Electrical_parts.PanelScheduleType PanelScheduleType<import_from_stmt>Electrical_parts.PanelScheduleView PanelScheduleView<import_from_stmt>Electrical_parts.PowerFactorStateType PowerFactorStateType<import_from_stmt>Electrical_parts.TemperatureRatingType TemperatureRatingType<import_from_stmt>Electrical_parts.TemperatureRatingTypeSet TemperatureRatingTypeSet<import_from_stmt>Electrical_parts.TemperatureRatingTypeSetIterator TemperatureRatingTypeSetIterator<import_from_stmt>Electrical_parts.VoltageType VoltageType<import_from_stmt>Electrical_parts.VoltageTypeSet VoltageTypeSet<import_from_stmt>Electrical_parts.VoltageTypeSetIterator VoltageTypeSetIterator<import_from_stmt>Electrical_parts.Wire Wire<import_from_stmt>Electrical_parts.WireConduitType WireConduitType<import_from_stmt>Electrical_parts.WireConduitTypeSet WireConduitTypeSet<import_from_stmt>Electrical_parts.WireConduitTypeSetIterator WireConduitTypeSetIterator<import_from_stmt>Electrical_parts.WireMaterialType WireMaterialType<import_from_stmt>Electrical_parts.WireMaterialTypeSet WireMaterialTypeSet<import_from_stmt>Electrical_parts.WireMaterialTypeSetIterator WireMaterialTypeSetIterator<import_from_stmt>Electrical_parts.WireSet WireSet<import_from_stmt>Electrical_parts.WireSetIterator WireSetIterator<import_from_stmt>Electrical_parts.WireSize WireSize<import_from_stmt>Electrical_parts.WireSizeSet WireSizeSet<import_from_stmt>Electrical_parts.WireSizeSetIterator WireSizeSetIterator<import_from_stmt>Electrical_parts.WireType WireType<import_from_stmt>Electrical_parts.WireTypeSet WireTypeSet<import_from_stmt>Electrical_parts.WireTypeSetIterator WireTypeSetIterator<import_from_stmt>Electrical_parts.WiringType WiringType<line_sep> |
"""
feathers2.py
Smoothly scroll rainbow colored random curves across the front of a Waveshare Pico LCD 1.3
Display Module using a Raspberry Pi PICO.
Video: https://youtu.be/ZKrKsz7_CXo
"""<import_stmt>random<import_stmt>math<import_stmt>utime<import_from_stmt>machine Pin SPI<import_stmt>st7789<def_stmt>between left right along<block_start>"""returns a point along the curve from left to right"""<line_sep>d=(1-math.cos(along<times>math.pi))/2<line_sep><return>left<times>(1-d)+right<times>d<block_end><def_stmt>color_wheel WheelPos<block_start>"""returns a 565 color from the given position of the color wheel"""<line_sep>WheelPos=(255-WheelPos)%255<if_stmt>WheelPos<l>85<block_start><return>st7789.color565(255-WheelPos<times>3 0 WheelPos<times>3)<block_end><if_stmt>WheelPos<l>170<block_start>WheelPos<augsub>85<line_sep><return>st7789.color565(0 WheelPos<times>3 255-WheelPos<times>3)<block_end>WheelPos<augsub>170<line_sep><return>st7789.color565(WheelPos<times>3 255-WheelPos<times>3 0)<block_end><def_stmt>main # configure spi interface
<block_start>spi=SPI(1 baudrate=31250000 sck=Pin(10) mosi=Pin(11))<line_sep># initialize display
tft=st7789.ST7789(spi 240 320 reset=Pin(12 Pin.OUT) cs=Pin(9 Pin.OUT) dc=Pin(8 Pin.OUT) backlight=Pin(13 Pin.OUT) rotation=1)<line_sep># enable display and clear screen
tft.init()<line_sep>height=tft.height()<line_sep>width=tft.width()<line_sep>tfa=0# top free area
bfa=0# bottom free area
scroll=0<line_sep>wheel=0<line_sep>tft.vscrdef(tfa width bfa)<line_sep>tft.vscsad(scroll+tfa)<line_sep>tft.fill(st7789.BLACK)<line_sep>h=(height<rshift>1)-1# half the height of the dislay
interval=50# steps between new points
increment=1/interval# increment per step
counter=interval+1# step counter, overflow to start
current_y=0# current_y value (right point)
last_y=0# last_y value (left point)
x_offsets=[]<while_stmt><true># when the counter exceeds the interval, save current_y to last_y,
# choose a new random value for current_y between 0 and 1/2 the
# height of the display, choose a new random interval then reset
# the counter to 0
<block_start><if_stmt>counter<g>interval<block_start>offsets=random.randint(4 32)<line_sep>x_offsets=[width<floordiv>offsets<times>(x+1)-1<for>x range(offsets)]<line_sep>last_y=current_y<line_sep>current_y=random.randint(0 h)<line_sep>counter=0<line_sep>interval=random.randint(10 100)<block_end># clear the first column of the display and scroll it
tft.vline(scroll 0 height st7789.BLACK)<line_sep>tft.vscsad(scroll)<line_sep># get the next point between last_y and current_y
tween=int(between(last_y current_y counter<times>increment))<line_sep># draw mirrored pixels across the display at the offsets using the color_wheel effect
<for_stmt>i,x_offset enumerate(x_offsets)<block_start>tft.pixel((scroll+x_offset)%width h+tween color_wheel(wheel+(i<lshift>2)))<line_sep>tft.pixel((scroll+x_offset)%width h-tween color_wheel(wheel+(i<lshift>2)))<block_end># increment scroll, counter, and wheel
scroll<augadd>1<line_sep>scroll<augmod>width<line_sep>counter<augadd>1<line_sep>wheel<augadd>1<line_sep>wheel<augmod>256<line_sep># pause to slow down scrolling
utime.sleep(0.01)<block_end><block_end>main()<line_sep> |
<import_from_stmt>RecoMuon.MuonIdentification.muonShowerInformation_cfi *<line_sep>muonShowerInformation=cms.EDProducer("MuonShowerInformationProducer" MuonServiceProxy muonCollection=cms.InputTag("muons1stStep") trackCollection=cms.InputTag("generalTracks") ShowerInformationFillerParameters=MuonShowerParameters.MuonShowerInformationFillerParameters)<line_sep> |
"""
extended GCD algorithm
return s,t,g
such that a s + b t = GCD(a, b)
and s and t are coprime
"""<def_stmt>extended_gcd a b<block_start>old_s,s=1 0<line_sep>old_t,t=0 1<line_sep>old_r,r=a b<while_stmt>r<ne>0<block_start>quotient=old_r/r<line_sep>old_r,r=r old_r-quotient<times>r<line_sep>old_s,s=s old_s-quotient<times>s<line_sep>old_t,t=t old_t-quotient<times>t<block_end><return>old_s old_t old_r<block_end> |
<import_stmt>sys<import_from_stmt>decimal Decimal<import_from_stmt>math ceil<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>django.db.models Max<import_from_stmt>faker Faker<import_from_stmt>baserow.contrib.database.fields.field_helpers construct_all_possible_field_kwargs <import_from_stmt>baserow.contrib.database.fields.handler FieldHandler<import_from_stmt>baserow.contrib.database.rows.handler RowHandler<import_from_stmt>baserow.contrib.database.table.models Table<class_stmt>Command(BaseCommand)<block_start>help="Fills a table with random data."<def_stmt>add_arguments self parser<block_start>parser.add_argument("table_id" type=int help="The table that needs to be "<concat>"filled.")<line_sep>parser.add_argument("limit" type=int help="Amount of rows that need to be "<concat>"inserted.")<line_sep>parser.add_argument("--add-columns" action="store_true" help="Add a column for every field type other than link row to the table "<concat>"before populating it." )<block_end><def_stmt>handle self *args **options<block_start>table_id=options["table_id"]<line_sep>limit=options["limit"]<line_sep>add_columns="add_columns"<in>options<and>options["add_columns"]<try_stmt><block_start>table=Table.objects.get(pk=table_id)<block_end><except_stmt>Table.DoesNotExist<block_start>self.stdout.write(self.style.ERROR(f"The table with id {table_id} was not "<concat>f"found."))<line_sep>sys.exit(1)<block_end>fill_table(limit table add_columns=add_columns)<line_sep>self.stdout.write(self.style.SUCCESS(f"{limit} rows have been inserted."))<block_end><block_end><def_stmt>fill_table limit table add_columns=<false><block_start>fake=Faker()<line_sep>row_handler=RowHandler()<line_sep>cache={}<if_stmt>add_columns<block_start>create_a_column_for_every_type(table)<block_end>model=table.get_model()<line_sep># Find out what the highest order is because we want to append the new rows.
order=ceil(model.objects.aggregate(max=Max("order")).get("max")<or>Decimal("0"))<for_stmt>i range(0 limit)# Based on the random_value function we have for each type we can
# build a dict with a random value for each field.
<block_start>values={f"field_{field_id}":field_object["type"].random_value(field_object["field"] fake cache)<for>field_id,field_object model._field_objects.items()}<line_sep>values,manytomany_values=row_handler.extract_manytomany_values(values model)<line_sep>order<augadd>Decimal("1")<line_sep>values["order"]=order<line_sep># Insert the row with the randomly created values.
instance=model.objects.create(**values)<line_sep># Changes the set of the manytomany values.
<for_stmt>field_name,value manytomany_values.items()<block_start><if_stmt>value<and>len(value)<g>0<block_start>getattr(instance field_name).set(value)<block_end><block_end><block_end><block_end><def_stmt>create_a_column_for_every_type table<block_start>field_handler=FieldHandler()<line_sep>all_kwargs_per_type=construct_all_possible_field_kwargs(<none> <none> <none>)<for_stmt>field_type_name,all_possible_kwargs all_kwargs_per_type.items()<block_start><if_stmt>field_type_name<eq>"link_row"<block_start><continue><block_end>i=0<for_stmt>kwargs all_possible_kwargs<block_start>i=i+1<line_sep>field_handler.create_field(table.database.group.users.first() table field_type_name **kwargs)<block_end><block_end><block_end> |
<import_stmt>abc<import_stmt>collections<import_stmt>contextlib<import_stmt>dataclasses<import_stmt>pathlib<import_stmt>re<import_stmt>tempfile<import_from_stmt>types MappingProxyType<import_from_stmt>typing Any Callable Dict List Mapping Optional Sequence Tuple Type Union cast <import_from_stmt>uqbar.objects new<import_stmt>supriya.nonrealtime# noqa
<import_stmt>supriya.realtime# noqa
<import_from_stmt>supriya commands nonrealtime realtime<import_from_stmt>supriya.assets.synthdefs.default default<import_from_stmt>supriya.enums AddAction CalculationRate ParameterRate<import_from_stmt>supriya.nonrealtime Session<import_from_stmt>supriya.realtime AsyncServer BaseServer Server<import_from_stmt>supriya.synthdefs SynthDef<line_sep># with provider.at(): proxy = provider.add_buffer(file_path=file_path)
# with provider.at(): proxy.free()
@dataclasses.dataclass(frozen=<true>)<class_stmt>Proxy<block_start>provider:"Provider"<block_end>@dataclasses.dataclass(frozen=<true>)<class_stmt>BufferProxy<block_start>provider:"Provider"<line_sep>identifier:Union["supriya.nonrealtime.Buffer" int]<line_sep>channel_count:Optional[int]=<none><line_sep>frame_count:Optional[int]=<none><line_sep>file_path:Optional[str]=<none><line_sep>starting_frame:Optional[int]=<none><def_stmt>__float__ self<block_start><return>float(int(self))<block_end><def_stmt>__int__ self<block_start><if_stmt>self.provider.server<block_start><return>self.identifier<block_end><elif_stmt>self.provider.session<block_start><return>self.provider.identifier.session_id<block_end><block_end><def_stmt>close self<block_start><pass><block_end><def_stmt>free self<block_start>self.provider.free_buffer(self)<block_end><def_stmt>normalize self new_maximum=1.0<block_start><pass><block_end><def_stmt>read self file_path leave_open=<false><block_start><pass><block_end><def_stmt>write self file_path frame_count=<none> header_format="aiff" leave_open=<false> sample_format="int24" starting_frame=<none> <block_start><pass><block_end><def_stmt>as_allocate_request self<block_start>kwargs=dict(buffer_id=int(self) frame_count=self.frame_count)<if_stmt>self.file_path<is><none><block_start><return>commands.BufferAllocateRequest(**kwargs channel_count=self.channel_count)<block_end>kwargs.update(file_path=self.file_path starting_frame=self.starting_frame)<if_stmt>self.channel_count<is><none><block_start><return>commands.BufferAllocateReadRequest(**kwargs)<block_end><return>commands.BufferAllocateReadChannelRequest(**kwargs channel_indices=list(range(self.channel_count)))<block_end><def_stmt>as_free_request self<block_start><return>commands.BufferFreeRequest(buffer_id=int(self))<block_end><block_end>@dataclasses.dataclass(frozen=<true>)<class_stmt>OscCallbackProxy(Proxy)<block_start>provider:"Provider"<line_sep>identifier:Any<def_stmt>unregister self<block_start>self.provider.unregister_osc_callback(self)<block_end><block_end>@dataclasses.dataclass(frozen=<true>)<class_stmt>BusProxy(Proxy)<block_start>calculation_rate:CalculationRate<line_sep>provider:"Provider"<line_sep>identifier:Union["supriya.nonrealtime.Bus" int]<def_stmt>__float__ self<block_start><return>float(int(self))<block_end><def_stmt>__int__ self<block_start><if_stmt>self.provider.server<block_start><return>self.identifier<block_end><elif_stmt>self.provider.session<block_start><return>self.provider.identifier.session_id<block_end><block_end><def_stmt>set_ self value<block_start>self.provider.set_bus(self value)<block_end><def_stmt>free self<block_start>self.provider.free_bus(self)<block_end>@property<def_stmt>map_symbol self<block_start><if_stmt>self.calculation_rate<eq>CalculationRate.AUDIO<block_start><return>f"a{int(self)}"<block_end><return>f"c{int(self)}"<block_end><block_end>@dataclasses.dataclass(frozen=<true>)<class_stmt>BusGroupProxy(Proxy)<block_start>calculation_rate:CalculationRate<line_sep>channel_count:int<line_sep>identifier:Union["supriya.nonrealtime.BusGroup" int]<line_sep>provider:"Provider"<line_sep>buses:Sequence["BusProxy"]=dataclasses.field(init=<false>)<def_stmt>__post_init__ self<block_start><if_stmt>isinstance(self.identifier int)<block_start>bus_identifiers=range(self.identifier self.identifier+self.channel_count)<block_end><else_stmt><block_start>bus_identifiers=self.identifier[:]<block_end>object.__setattr__(self "buses" tuple(BusProxy(calculation_rate=self.calculation_rate provider=self.provider identifier=bus_identifier )<for>bus_identifier bus_identifiers) )<block_end><def_stmt>__float__ self<block_start><return>float(int(self))<block_end><def_stmt>__getitem__ self item<block_start><return>self.buses[item]<block_end><def_stmt>__int__ self<block_start><if_stmt>self.provider.server<block_start><return>self.identifier<block_end><elif_stmt>self.provider.session<block_start><return>self.provider.identifier.session_id<block_end><block_end><def_stmt>__len__ self<block_start><return>self.channel_count<block_end><def_stmt>free self<block_start>self.provider.free_bus_group(self)<block_end><block_end>@dataclasses.dataclass(frozen=<true>)<class_stmt>NodeProxy(Proxy)<block_start>identifier:Union["supriya.nonrealtime.Node" int]<line_sep>provider:"Provider"<def_stmt>__float__ self<block_start><return>float(int(self))<block_end><def_stmt>__int__ self<block_start><if_stmt>self.provider.server<block_start><return>self.identifier<block_end><elif_stmt>self.provider.session<block_start><return>self.provider.identifier.session_id<block_end><block_end><def_stmt>__setitem__ self key value<block_start>self.provider.set_node(self **{key:value})<block_end><def_stmt>add_group self * add_action:int=AddAction.ADD_TO_HEAD name:Optional[str]=<none><arrow>"GroupProxy"<block_start><return>self.provider.add_group(add_action=add_action target_node=self)<block_end><def_stmt>add_synth self * synthdef:SynthDef=<none> add_action:int=AddAction.ADD_TO_HEAD name:Optional[str]=<none> **settings <arrow>"SynthProxy"<block_start><return>self.provider.add_synth(add_action=add_action synthdef=synthdef target_node=self **settings)<block_end><def_stmt>as_move_request self add_action:AddAction target_node:"NodeProxy"<arrow>commands.MoveRequest<block_start>request_classes:Dict[int Type[commands.MoveRequest]]={AddAction.ADD_TO_HEAD:commands.GroupHeadRequest AddAction.ADD_TO_TAIL:commands.GroupTailRequest AddAction.ADD_BEFORE:commands.NodeBeforeRequest AddAction.ADD_AFTER:commands.NodeAfterRequest }<line_sep>request_class:Type[commands.MoveRequest]=request_classes[add_action]<line_sep><return>request_class(node_id_pairs=[request_class.NodeIdPair(int(self) int(target_node))])<block_end><def_stmt>as_set_request self **settings<block_start>coerced_settings={}<for_stmt>key,value settings.items()<block_start><if_stmt>isinstance(value (BusProxy BusGroupProxy))<block_start><if_stmt>value.calculation_rate<eq>CalculationRate.AUDIO<block_start>value=f"a{value.identifier}"<block_end><else_stmt><block_start>value=f"c{value.identifier}"<block_end><block_end>coerced_settings[key]=value<block_end><return>commands.NodeSetRequest(node_id=int(self) **coerced_settings)<block_end><def_stmt>dispose self<block_start>self.provider.dispose(self)<block_end><def_stmt>free self<block_start>self.provider.free_node(self)<block_end><def_stmt>move self add_action:AddAction target_node:"NodeProxy"<block_start>self.provider.move_node(self add_action target_node)<block_end><block_end>@dataclasses.dataclass(frozen=<true>)<class_stmt>GroupProxy(NodeProxy)<block_start>identifier:Union["supriya.nonrealtime.Node" int]<line_sep>provider:"Provider"<def_stmt>as_add_request self add_action target_node<block_start><return>commands.GroupNewRequest(items=[commands.GroupNewRequest.Item(node_id=int(self.identifier) add_action=add_action target_node_id=int(target_node) )])<block_end><def_stmt>as_free_request self force=<false><block_start><return>commands.NodeFreeRequest(node_ids=[int(self)])<block_end><block_end>@dataclasses.dataclass(frozen=<true>)<class_stmt>SynthProxy(NodeProxy)<block_start>identifier:Union["supriya.nonrealtime.Node" int]<line_sep>provider:"Provider"<line_sep>synthdef:SynthDef<line_sep>settings:Dict[str Union[float BusGroupProxy]]<def_stmt>as_add_request self add_action target_node# TODO: Handle map symbols
# If arg is a bus proxy, and synth param is scalar, cast to int
# Elif arg is a bus proxy, and synth param not scalar, map
# Else cast to float
<block_start>synthdef=self.synthdef<or>default<line_sep>synthdef_kwargs={}<for_stmt>_,parameter synthdef.indexed_parameters<block_start><if_stmt>parameter.name<not><in>self.settings<block_start><continue><block_end>value=self.settings[parameter.name]<if_stmt>value<eq>parameter.value<block_start><continue><block_end><if_stmt>parameter.parameter_rate<eq>ParameterRate.SCALAR<block_start>synthdef_kwargs[parameter.name]=float(value)<block_end><elif_stmt>parameter.name<in>("in_" "out")<block_start>synthdef_kwargs[parameter.name]=float(value)<block_end><elif_stmt>isinstance(value (BusProxy BusGroupProxy))<block_start>synthdef_kwargs[parameter.name]=value.map_symbol<block_end><else_stmt><block_start>synthdef_kwargs[parameter.name]=float(value)<block_end><block_end><return>commands.SynthNewRequest(node_id=int(self.identifier) add_action=add_action target_node_id=int(target_node) synthdef=synthdef **synthdef_kwargs )<block_end><def_stmt>as_free_request self force=<false><block_start><if_stmt>force<or>"gate"<not><in>self.synthdef.parameters<block_start><return>commands.NodeFreeRequest(node_ids=[int(self)])<block_end><return>commands.NodeSetRequest(node_id=int(self) gate=0)<block_end><block_end>@dataclasses.dataclass(frozen=<true>)<class_stmt>ProviderMoment<block_start>provider:"Provider"<line_sep>seconds:float<line_sep>bus_settings:List[Tuple[BusProxy float]]=dataclasses.field(default_factory=list)<line_sep>buffer_additions:List[BufferProxy]=dataclasses.field(default_factory=list)<line_sep>buffer_removals:List[BufferProxy]=dataclasses.field(default_factory=list)<line_sep>node_reorderings:List[Tuple[NodeProxy AddAction NodeProxy]]=dataclasses.field(default_factory=list)<line_sep>node_additions:List[Tuple[NodeProxy AddAction NodeProxy]]=dataclasses.field(default_factory=list)<line_sep>node_removals:List[NodeProxy]=dataclasses.field(default_factory=list)<line_sep>node_settings:List[Tuple[NodeProxy Dict[str Union[float BusGroupProxy]]]]=dataclasses.field(default_factory=list)<line_sep>wait:bool=dataclasses.field(default=<false>)<line_sep>exit_stack:contextlib.ExitStack=dataclasses.field(init=<false> default_factory=contextlib.ExitStack compare=<false>)<def_stmt>__postinit__ self<block_start>self.exit_stack=contextlib.ExitStack()<block_end><async_keyword><def_stmt>__aenter__ self<block_start><if_stmt>self.provider.server<and><not>isinstance(self.provider.server AsyncServer)<block_start><raise>RuntimeError(repr(self.provider.server))<block_end><return>self._enter()<block_end><async_keyword><def_stmt>__aexit__ self *args<block_start>results=self._exit()<if_stmt><not>results<block_start><return><block_end>timestamp,request_bundle,synthdefs=results<line_sep>server=self.provider.server<line_sep># The underlying asyncio UDP transport will silently drop oversize packets
<if_stmt>len(request_bundle.to_datagram())<le>8192<block_start><if_stmt>self.wait# If waiting, the original ProviderMoment timestamp can be ignored
<block_start><await>request_bundle.communicate_async(server=server sync=<true>)<block_end><else_stmt><block_start>server.send(request_bundle.to_osc())<block_end><block_end><else_stmt># If over the UDP packet limit, partition the message
<block_start>requests=request_bundle.contents<line_sep># Always wait for SynthDefs to load.
<if_stmt>synthdefs<block_start>synthdef_request=requests[0]<line_sep>requests=synthdef_request.callback.contents<or>[]<line_sep>synthdef_request=new(synthdef_request callback=<none>)<line_sep><await>synthdef_request.communicate_async(sync=<true> server=server)<block_end><if_stmt>self.wait# If waiting, the original ProviderMoment timestamp can be ignored
<block_start><for_stmt>bundle commands.RequestBundle.partition(requests)<block_start><await>bundle.communicate_async(server=server sync=<true>)<block_end><block_end><else_stmt><block_start><for_stmt>bundle commands.RequestBundle.partition(requests timestamp=timestamp)<block_start>server.send(bundle.to_osc())<block_end><block_end><block_end><block_end><def_stmt>__enter__ self<block_start><if_stmt>self.provider.session<is><not><none><block_start>self.exit_stack.enter_context(self.provider.session.at(self.seconds<or>0))<block_end><if_stmt>self.provider.server<and><not>isinstance(self.provider.server Server)<block_start><raise>RuntimeError(repr(self.provider.server))<block_end><return>self._enter()<block_end><def_stmt>__exit__ self *args<block_start>results=self._exit()<if_stmt><not>results<block_start><return><block_end>timestamp,request_bundle,synthdefs=results<try_stmt><block_start>self.provider.server.send(request_bundle.to_osc())<block_end><except_stmt>OSError<block_start>requests=request_bundle.contents<if_stmt>synthdefs<block_start>synthdef_request=requests[0]<line_sep>requests=synthdef_request.callback.contents<or>[]<line_sep>synthdef_request=new(synthdef_request callback=<none>)<line_sep>synthdef_request.communicate(sync=<true> server=self.provider.server)<block_end><for_stmt>bundle commands.RequestBundle.partition(requests timestamp=timestamp)<block_start>self.provider.server.send(bundle.to_osc())<block_end><block_end><block_end><def_stmt>_enter self<block_start>self.provider._moments.append(self)<line_sep>self.provider._counter[self.seconds]<augadd>1<line_sep><return>self<block_end><def_stmt>_exit self<block_start>self.exit_stack.close()<line_sep>self.provider._moments.pop()<line_sep>self.provider._counter[self.seconds]<augsub>1<if_stmt><not>self.provider.server<block_start><return><block_end><elif_stmt>self.provider._counter[self.seconds]<block_start><return><block_end>requests=[]<line_sep>synthdefs=set()<line_sep>new_nodes=set()<for_stmt>buffer_proxy self.buffer_additions<block_start>requests.append(buffer_proxy.as_allocate_request())<block_end><for_stmt>node_proxy,add_action,target_node self.node_additions<block_start>request=node_proxy.as_add_request(add_action target_node)<if_stmt>isinstance(request commands.SynthNewRequest)<block_start><if_stmt>request.synthdef<not><in>self.provider.server<block_start>synthdefs.add(request.synthdef)<block_end><block_end>requests.append(request)<line_sep>new_nodes.add(node_proxy.identifier)<block_end><for_stmt>node_proxy,add_action,target_node self.node_reorderings<block_start>requests.append(node_proxy.as_move_request(add_action target_node))<block_end><for_stmt>node_proxy,settings self.node_settings<block_start>requests.append(node_proxy.as_set_request(**settings))<block_end><for_stmt>node_proxy self.node_removals<block_start>requests.append(node_proxy.as_free_request(force=node_proxy.identifier<in>new_nodes))<block_end><for_stmt>buffer_proxy self.buffer_removals<block_start>requests.append(buffer_proxy.as_free_request())<block_end><if_stmt>self.bus_settings<block_start>sorted_pairs=sorted(dict((int(bus_proxy.identifier) value)<for>bus_proxy,value self.bus_settings).items())<line_sep>request=commands.ControlBusSetRequest(index_value_pairs=sorted_pairs)<line_sep>requests.append(request)<block_end><if_stmt><not>requests<block_start><return><block_end>timestamp=self.seconds<if_stmt>timestamp<is><not><none><block_start>timestamp<augadd>self.provider._latency<block_end><if_stmt>synthdefs<block_start>request_bundle=commands.RequestBundle(timestamp=timestamp contents=[commands.SynthDefReceiveRequest(synthdefs=sorted(synthdefs key=<lambda>x:x.actual_name) callback=commands.RequestBundle(contents=requests) )] )<line_sep># check bundle size, write synthdefs to disk and do /d_load
<if_stmt>len(request_bundle.to_datagram(with_placeholders=<true>))<g>8192<block_start>directory_path=pathlib.Path(tempfile.mkdtemp())<line_sep># directory_path = pathlib.Path("~/Desktop").expanduser()
<for_stmt>synthdef synthdefs<block_start>name=synthdef.anonymous_name<if_stmt>synthdef.name<block_start>name<augadd>"-"+re.sub(r"[^\w]" "-" synthdef.name)<block_end>file_name="{}.scsyndef".format(name)<line_sep>synthdef_path=directory_path/file_name<line_sep>synthdef_path.write_bytes(synthdef.compile())<block_end>request_bundle=commands.RequestBundle(timestamp=timestamp contents=[supriya.commands.SynthDefLoadDirectoryRequest(directory_path=directory_path callback=commands.RequestBundle(contents=requests) )] )<block_end><block_end><else_stmt><block_start>request_bundle=commands.RequestBundle(timestamp=timestamp contents=requests)<block_end><for_stmt>synthdef synthdefs<block_start>synthdef._register_with_local_server(server=self.provider.server)<block_end><return>timestamp request_bundle synthdefs<block_end><block_end><class_stmt>Provider(metaclass=abc.ABCMeta)<block_start>"""
Provides limited realtime/non-realtime compatibility layer.
"""<line_sep>### INITIALIZER ###
<def_stmt>__init__ self latency=0.1<block_start>self._moments:List[ProviderMoment]=[]<line_sep>self._counter=collections.Counter()<line_sep>self._server=<none><line_sep>self._session=<none><line_sep>self._latency=latency<line_sep>self._annotation_map:Dict[Union["supriya.nonrealtime.Node" int] str]={}<block_end>### PUBLIC METHODS ###
@abc.abstractmethod<def_stmt>add_buffer self * channel_count:Optional[int]=<none> file_path:Optional[str]=<none> frame_count:Optional[int]=<none> starting_frame:Optional[int]=<none> <arrow>BufferProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>add_bus self calculation_rate=CalculationRate.CONTROL<arrow>BusProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>add_bus_group self channel_count=1 calculation_rate=CalculationRate.CONTROL<arrow>BusGroupProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>add_group self * target_node=<none> add_action=AddAction.ADD_TO_HEAD name:Optional[str]=<none> <arrow>GroupProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>add_synth self * synthdef:SynthDef=<none> target_node=<none> add_action=AddAction.ADD_TO_HEAD name:Optional[str]=<none> **settings <arrow>SynthProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>boot self **kwargs<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>dispose self node_proxy:NodeProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>free_buffer self buffer_proxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>free_bus self bus_proxy:BusProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>free_bus_group self bus_group_proxy:BusGroupProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>free_node self node_proxy:NodeProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>move_node self node_proxy:NodeProxy add_action:AddAction target_node:NodeProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>set_bus self bus_proxy:BusProxy value:float<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>set_node self node_proxy:NodeProxy **settings<block_start><raise>NotImplementedError<block_end><def_stmt>at self seconds=<none> wait=<false><block_start><if_stmt>self._moments<and>self._moments[-1].seconds<eq>seconds<block_start>provider_moment=self._moments[-1]<block_end><else_stmt><block_start>provider_moment=ProviderMoment(provider=self seconds=seconds wait=wait)<block_end><return>provider_moment<block_end>@classmethod<def_stmt>from_context cls context latency=0.1<arrow>"Provider"<block_start><if_stmt>isinstance(context Session)<block_start><return>NonrealtimeProvider(context latency=latency)<block_end><elif_stmt>isinstance(context BaseServer)<block_start><return>RealtimeProvider(context latency=latency)<block_end><raise>ValueError("Unknown context")<block_end>@classmethod<def_stmt>nonrealtime cls<arrow>"NonrealtimeProvider"<block_start>session=Session()<line_sep><return>cast("NonrealtimeProvider" cls.from_context(session))<block_end>@abc.abstractmethod<def_stmt>quit self<block_start><raise>NotImplementedError<block_end>@classmethod<def_stmt>realtime cls scsynth_path=<none> options=<none> port=<none> **kwargs<arrow>"RealtimeProvider"<block_start>server=Server()<line_sep>server.boot(port=port scsynth_path=scsynth_path options=options **kwargs)<line_sep><return>cast("RealtimeProvider" cls.from_context(server))<block_end>@classmethod<async_keyword><def_stmt>realtime_async cls scsynth_path=<none> options=<none> port=<none> **kwargs<arrow>"RealtimeProvider"<block_start>server=AsyncServer()<line_sep><await>server.boot(port=port scsynth_path=scsynth_path options=options **kwargs)<line_sep><return>cast("RealtimeProvider" cls.from_context(server))<block_end>@abc.abstractmethod<def_stmt>register_osc_callback self pattern:Tuple[Union[str float] <ellipsis>] procedure:Callable<arrow>OscCallbackProxy<block_start><raise>NotImplementedError<block_end>@abc.abstractmethod<def_stmt>unregister_osc_callback self proxy:OscCallbackProxy<block_start><raise>NotImplementedError<block_end>### PUBLIC PROPERTIES ###
@property<def_stmt>annotation_map self<arrow>Mapping[Union["supriya.nonrealtime.Node" int] str]<block_start><return>MappingProxyType(self._annotation_map)<block_end>@property<def_stmt>latency self<block_start><return>self._latency<block_end>@property<def_stmt>moment self<arrow>Optional[ProviderMoment]<block_start><if_stmt>self._moments<block_start><return>self._moments[-1]<block_end><return><none><block_end>@property<def_stmt>server self<arrow>Server<block_start><return>self._server<block_end>@property<def_stmt>session self<arrow>Session<block_start><return>self._session<block_end><block_end><class_stmt>NonrealtimeProvider(Provider)### INITIALIZER ###
<block_start><def_stmt>__init__ self session latency=0.1<block_start><if_stmt><not>isinstance(session Session)<block_start><raise>ValueError(f"Expected session, got {session}")<block_end>Provider.__init__(self latency=latency)<line_sep>self._session=session<block_end>### SPECIAL METHODS ###
<def_stmt>__str__ self<block_start><return>f"<{type(self).__name__} {self._session!r}>"<block_end>### PRIVATE METHODS ###
<def_stmt>_resolve_target_node self target_node<arrow>nonrealtime.Node<block_start><if_stmt>target_node<is><none><block_start>target_node=self.session.root_node<block_end><elif_stmt>isinstance(target_node NodeProxy)<block_start>target_node=target_node.identifier<block_end><return>target_node<block_end>### PUBLIC METHODS ###
<def_stmt>add_buffer self * channel_count:Optional[int]=<none> file_path:Optional[str]=<none> frame_count:Optional[int]=<none> starting_frame:Optional[int]=<none> <arrow>BufferProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>identifier=self.session.add_buffer(channel_count=channel_count file_path=file_path frame_count=frame_count starting_frame=starting_frame )<line_sep><return>BufferProxy(channel_count=channel_count file_path=file_path frame_count=frame_count identifier=identifier provider=self starting_frame=starting_frame )<block_end><def_stmt>add_bus self calculation_rate=CalculationRate.CONTROL<arrow>BusProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>calculation_rate=CalculationRate.from_expr(calculation_rate)<if_stmt>calculation_rate<not><in>(CalculationRate.AUDIO CalculationRate.CONTROL)<block_start><raise>ValueError(f"Invalid calculation rate: {calculation_rate!r}")<block_end>identifier=self.session.add_bus(calculation_rate=calculation_rate)<line_sep><return>BusProxy(calculation_rate=calculation_rate identifier=identifier provider=self)<block_end><def_stmt>add_bus_group self channel_count=1 calculation_rate=CalculationRate.CONTROL<arrow>BusGroupProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>calculation_rate=CalculationRate.from_expr(calculation_rate)<if_stmt>calculation_rate<not><in>(CalculationRate.AUDIO CalculationRate.CONTROL)<block_start><raise>ValueError(f"Invalid calculation rate: {calculation_rate!r}")<block_end><if_stmt>channel_count<l>1<block_start><raise>ValueError("Channel-count must be positive, non-zero integer")<block_end>identifier=self.session.add_bus_group(bus_count=channel_count calculation_rate=calculation_rate)<line_sep><return>BusGroupProxy(calculation_rate=calculation_rate channel_count=channel_count identifier=identifier provider=self )<block_end><def_stmt>add_group self * target_node=<none> add_action=AddAction.ADD_TO_HEAD name:Optional[str]=<none> <arrow>GroupProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>identifier=self._resolve_target_node(target_node).add_group(add_action=add_action)<line_sep>proxy=GroupProxy(identifier=identifier provider=self)<line_sep><return>proxy<block_end><def_stmt>add_synth self * synthdef:SynthDef=<none> target_node=<none> add_action=AddAction.ADD_TO_HEAD name:Optional[str]=<none> **settings <arrow>SynthProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>sanitized_settings={}<for_stmt>key,value settings.items()<block_start><if_stmt>isinstance(value (BusProxy BusGroupProxy))<block_start>value=value.identifier<block_end>sanitized_settings[key]=value<block_end>identifier=self._resolve_target_node(target_node).add_synth(add_action=add_action synthdef=synthdef **sanitized_settings)<line_sep>proxy=SynthProxy(identifier=identifier provider=self synthdef=synthdef<or>default settings=settings )<line_sep><return>proxy<block_end><def_stmt>free_buffer self buffer_:BufferProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end><return><block_end># This is currently a no-op
<def_stmt>boot self **kwargs<block_start><pass><block_end># no-op
<def_stmt>dispose self node_proxy:NodeProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end><return><block_end># This is currently a no-op
<def_stmt>free_bus self bus:BusProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end><return><block_end># This is currently a no-op
<def_stmt>free_bus_group self bus_group:BusGroupProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end><return><block_end># This is currently a no-op
<def_stmt>free_node self node_proxy:NodeProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>cast(nonrealtime.Node node_proxy.identifier).free()<block_end><def_stmt>move_node self node_proxy:NodeProxy add_action:AddAction target_node:Union[NodeProxy nonrealtime.Node] <block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>self._resolve_target_node(target_node).move_node(node_proxy.identifier add_action=add_action)<block_end><def_stmt>set_bus self bus_proxy:BusProxy value:float<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end><elif_stmt>bus_proxy.calculation_rate<ne>CalculationRate.CONTROL<block_start><raise>ValueError("Can only set control-rate buses")<block_end>cast(nonrealtime.Bus bus_proxy.identifier).set_(value)<block_end><def_stmt>set_node self node_proxy:NodeProxy **settings<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end><for_stmt>key,value settings.items()<block_start><if_stmt>isinstance(value (BusProxy BusGroupProxy))<block_start>value=value.identifier<block_end>cast(nonrealtime.Node node_proxy.identifier)[key]=value<block_end><block_end><def_stmt>quit self<block_start><pass><block_end># no-op
<def_stmt>register_osc_callback self pattern:Tuple[Union[str float] <ellipsis>] procedure:Callable<arrow>OscCallbackProxy<block_start><return>OscCallbackProxy(provider=self identifier=<none>)<block_end><def_stmt>unregister_osc_callback self proxy:OscCallbackProxy<block_start><pass><block_end><block_end># no-op
<class_stmt>RealtimeProvider(Provider)### INITIALIZER ###
<block_start><def_stmt>__init__ self server latency=0.1<block_start><if_stmt><not>isinstance(server BaseServer)<block_start><raise>ValueError(f"Expected Server, got {server}")<block_end>Provider.__init__(self latency=latency)<line_sep>self._server=server<block_end>### SPECIAL METHODS ###
<def_stmt>__str__ self<block_start><return>f"<{type(self).__name__} {self._server!r}>"<block_end>### PRIVATE METHODS ###
<def_stmt>_resolve_target_node self target_node<block_start><if_stmt>target_node<is><none># TODO: Will this work with AsyncServer?
<block_start>target_node=self.server.default_group<block_end><return>target_node<block_end>### PUBLIC METHODS ###
<def_stmt>add_buffer self * channel_count:Optional[int]=<none> file_path:Optional[str]=<none> frame_count:Optional[int]=<none> starting_frame:Optional[int]=<none> <arrow>BufferProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>identifier=self.server.buffer_allocator.allocate(1)<line_sep>proxy=BufferProxy(channel_count=channel_count file_path=file_path frame_count=frame_count identifier=identifier provider=self starting_frame=starting_frame )<line_sep>self.moment.buffer_additions.append(proxy)<line_sep><return>proxy<block_end><def_stmt>add_bus self calculation_rate=CalculationRate.CONTROL<arrow>BusProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>calculation_rate=CalculationRate.from_expr(calculation_rate)<if_stmt>calculation_rate<not><in>(CalculationRate.AUDIO CalculationRate.CONTROL)<block_start><raise>ValueError(f"Invalid calculation rate: {calculation_rate!r}")<block_end>allocator=realtime.Bus._get_allocator(calculation_rate server=self.server)<line_sep>identifier=allocator.allocate(1)<line_sep><return>BusProxy(calculation_rate=calculation_rate identifier=identifier provider=self)<block_end><def_stmt>add_bus_group self channel_count=1 calculation_rate=CalculationRate.CONTROL<arrow>BusGroupProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>calculation_rate=CalculationRate.from_expr(calculation_rate)<if_stmt>calculation_rate<not><in>(CalculationRate.AUDIO CalculationRate.CONTROL)<block_start><raise>ValueError(f"Invalid calculation rate: {calculation_rate!r}")<block_end><if_stmt>channel_count<l>1<block_start><raise>ValueError("Channel-count must be positive, non-zero integer")<block_end>allocator=realtime.Bus._get_allocator(calculation_rate server=self.server)<line_sep>identifier=allocator.allocate(channel_count)<if_stmt>identifier<is><none><block_start><raise>RuntimeError<block_end><return>BusGroupProxy(calculation_rate=calculation_rate channel_count=channel_count identifier=identifier provider=self )<block_end><def_stmt>add_group self * target_node=<none> add_action=AddAction.ADD_TO_HEAD name:Optional[str]=<none> <arrow>GroupProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>target_node=self._resolve_target_node(target_node)<line_sep>identifier=self.server.node_id_allocator.allocate_node_id(1)<line_sep>proxy=GroupProxy(identifier=identifier provider=self)<line_sep>self.moment.node_additions.append((proxy add_action target_node))<if_stmt>name<block_start>self._annotation_map[identifier]=name<block_end><return>proxy<block_end><def_stmt>add_synth self * synthdef:SynthDef=<none> target_node=<none> add_action=AddAction.ADD_TO_HEAD name:Optional[str]=<none> **settings <arrow>SynthProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>target_node=self._resolve_target_node(target_node)<line_sep>identifier=self.server.node_id_allocator.allocate_node_id(1)<line_sep>proxy=SynthProxy(identifier=identifier provider=self synthdef=synthdef<or>default settings=settings )<line_sep>self.moment.node_additions.append((proxy add_action target_node))<if_stmt>name<block_start>self._annotation_map[identifier]=name<block_end><return>proxy<block_end><def_stmt>boot self **kwargs<block_start>self.server.boot(**kwargs)<block_end><def_stmt>dispose self node_proxy:NodeProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end><return><block_end># This is currently a no-op
<def_stmt>free_buffer self buffer_:BufferProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>self.moment.buffer_removals.append(buffer_)<block_end><def_stmt>free_bus self bus_proxy:BusProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>allocator=realtime.Bus._get_allocator(bus_proxy.calculation_rate server=self.server)<line_sep>allocator.free(cast(int bus_proxy.identifier))<block_end><def_stmt>free_bus_group self bus_group_proxy:BusGroupProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>allocator=realtime.Bus._get_allocator(bus_group_proxy.calculation_rate server=self.server)<line_sep>allocator.free(cast(int bus_group_proxy.identifier))<block_end><def_stmt>free_node self node_proxy:NodeProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>self.moment.node_removals.append(node_proxy)<line_sep>self._annotation_map.pop(node_proxy.identifier <none>)<block_end><def_stmt>move_node self node_proxy:NodeProxy add_action:AddAction target_node:NodeProxy<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>target_node=self._resolve_target_node(target_node)<line_sep>self.moment.node_reorderings.append((node_proxy add_action target_node))<block_end><def_stmt>quit self<block_start>self.server.quit()<block_end><def_stmt>set_bus self bus_proxy:BusProxy value:float<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end><elif_stmt>bus_proxy.calculation_rate<ne>CalculationRate.CONTROL<block_start><raise>ValueError("Can only set control-rate buses")<block_end>self.moment.bus_settings.append((bus_proxy value))<block_end><def_stmt>set_node self node_proxy:NodeProxy **settings<block_start><if_stmt><not>self.moment<block_start><raise>ValueError("No current moment")<block_end>self.moment.node_settings.append((node_proxy settings))<block_end><def_stmt>register_osc_callback self pattern:Tuple[Union[str float] <ellipsis>] procedure:Callable<arrow>OscCallbackProxy<block_start>identifier=self.server.osc_protocol.register(pattern=pattern procedure=procedure)<line_sep><return>OscCallbackProxy(provider=self identifier=identifier)<block_end><def_stmt>unregister_osc_callback self proxy:OscCallbackProxy<block_start>self.server.osc_protocol.unregister(proxy.identifier)<block_end><block_end> |
<import_from_stmt>enum Enum IntEnum<class_stmt>LivenessClient(Enum)<block_start>"""Liveness client type."""<line_sep>WEB="web"<line_sep>TASK="task"<block_end><class_stmt>ReadTriesValues(IntEnum)<block_start>"""Enum of allowed number of tries to read secrets."""<line_sep>THREE=3<line_sep>FIVE=5<line_sep>TEN=10<line_sep>@classmethod<def_stmt>default cls# pylint: disable=missing-function-docstring
<block_start><return>cls.FIVE.value<block_end><block_end># Needs .value as its being passed to Jinja
<class_stmt>SecretExpirationValues(Enum)<block_start>"""Enum of allowed expiration values."""<line_sep>_10_MINUTES="10m"<line_sep>_30_MINUTES="30m"<line_sep>_AN_HOUR="1h"<line_sep>_3_HOURS="3h"<line_sep>_6_HOURS="6h"<line_sep>_A_DAY="1d"<line_sep>_2_DAYS="2d"<line_sep>_3_DAYS="3d"<line_sep>_5_DAYS="5d"<line_sep>_A_WEEK="7d"<line_sep>@classmethod<def_stmt>default cls# pylint: disable=missing-function-docstring
<block_start><return>cls._3_DAYS.value<block_end>@classmethod<def_stmt>dict cls<arrow>dict<block_start>"""Return a dict of human friendly data."""<line_sep><return>{i.name[1:].replace("_" " ").capitalize():i.value<for>i cls}<block_end><block_end><class_stmt>EnvConfig(Enum)<block_start>"""Environment config values."""<line_sep>TESTING="testing"<line_sep>DEV_LOCAL="dev-local"<line_sep>DEV_DOCKER="dev-docker"<line_sep>HEROKU="heroku"<line_sep>PRODUCTION="production"<block_end> |
<import_stmt>re<import_from_stmt>functools reduce<import_from_stmt>itertools chain<import_from_stmt>typing Union Dict List<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<import_from_stmt>.common *<line_sep>DataFrameType=Union[pd.DataFrame Dict[str pd.DataFrame] List[Dict[str pd.DataFrame]]]<line_sep># Serialization helper functions
# -------------------------------
<def_stmt>_serializer series<arrow>pd.DataFrame<block_start>df=pd.DataFrame(series.get('values' []) columns=series['columns'])<if_stmt>'time'<not><in>df.columns<block_start><return>df<block_end>df:pd.DataFrame=df.set_index(pd.to_datetime(df['time'])).drop('time' axis=1)<line_sep>df.index=df.index.tz_localize('UTC')<line_sep>df.index.name=<none><if_stmt>'tags'<in>series<block_start><for_stmt>k,v series['tags'].items()<block_start>df[k]=v<block_end><block_end><if_stmt>'name'<in>series<block_start>df.name=series['name']<block_end><return>df<block_end><def_stmt>_get_name series<block_start>tags=[f'{k}={v}'<for>k,v series.get('tags' {}).items()]<line_sep><return>','.join(filter(<none> [series.get('name') *tags]))<or><none><block_end><def_stmt>_drop_zero_index df<block_start><if_stmt>isinstance(df.index pd.DatetimeIndex)<block_start><if_stmt>all(i.value<eq>0<for>i df.index)<block_start><return>df.reset_index(drop=<true>)<block_end><block_end><return>df<block_end><def_stmt>parse resp<arrow>DataFrameType<block_start>"""Makes a dictionary of DataFrames from a response object"""<line_sep>statements=[]<for_stmt>statement resp['results']<block_start>series={}<for_stmt>s statement.get('series' [])<block_start>series[_get_name(s)]=_drop_zero_index(_serializer(s))<block_end>statements.append(series)<block_end><if_stmt>len(statements)<eq>1<block_start>series:dict=statements[0]<if_stmt>len(series)<eq>1<block_start><return>list(series.values())[0]# DataFrame
<block_end><else_stmt><block_start><return>series# dict
<block_end><block_end><return>statements<block_end># list
# Parsing helper functions
# -------------------------
<def_stmt>_itertuples df<block_start>"""Custom implementation of ``DataFrame.itertuples`` that
returns plain tuples instead of namedtuples. About 50% faster.
"""<line_sep>cols=[df.iloc[: k]<for>k range(len(df.columns))]<line_sep><return>zip(df.index *cols)<block_end><def_stmt>_replace df<block_start>obj_cols={k<for>k,v dict(df.dtypes).items()<if>v<is>np.dtype('O')}<line_sep>other_cols=set(df.columns)-obj_cols<line_sep>obj_nans=(f'{k}="nan"'<for>k obj_cols)<line_sep>other_nans=(f'{k}=nani?'<for>k other_cols)<line_sep>replacements=[('|'.join(chain(obj_nans other_nans)) '') (',{2,}' ',') ('|'.join([', ,' ', ' ' ,']) ' ') ]<line_sep><return>replacements<block_end><def_stmt>serialize df measurement tag_columns=<none> **extra_tags<arrow>bytes<block_start>"""Converts a Pandas DataFrame into line protocol format"""<line_sep># Pre-processing
<if_stmt>measurement<is><none><block_start><raise>ValueError("Missing 'measurement'")<block_end><if_stmt><not>isinstance(df.index pd.DatetimeIndex)<block_start><raise>ValueError('DataFrame index is not DatetimeIndex')<block_end>tag_columns=set(tag_columns<or>[])<line_sep>isnull=df.isnull().any(axis=1)<line_sep># Make parser function
tags=[]<line_sep>fields=[]<for_stmt>k,v extra_tags.items()<block_start>tags.append(f"{k}={escape(v key_escape)}")<block_end><for_stmt>i,(k v) enumerate(df.dtypes.items())<block_start>k=k.translate(key_escape)<if_stmt>k<in>tag_columns<block_start>tags.append(f"{k}={{p[{i+1}]}}")<block_end><elif_stmt>issubclass(v.type np.integer)<block_start>fields.append(f"{k}={{p[{i+1}]}}i")<block_end><elif_stmt>issubclass(v.type (np.float np.bool_ np.floating))<block_start>fields.append(f"{k}={{p[{i+1}]}}")<block_end><else_stmt># String escaping is skipped for performance reasons
# Strings containing double-quotes can cause strange write errors
# and should be sanitized by the user.
# e.g., df[k] = df[k].astype('str').str.translate(str_escape)
<block_start>fields.append(f"{k}=\"{{p[{i+1}]}}\"")<block_end><block_end>fmt=(f'{measurement}' f'{","<if>tags<else>""}' ','.join(tags) ' ' ','.join(fields) ' {p[0].value}')<line_sep>f=eval("lambda p: f'{}'".format(''.join(fmt)))<line_sep># Map/concat
<if_stmt>isnull.any()<block_start>lp=map(f _itertuples(df[~isnull]))<line_sep>rep=_replace(df)<line_sep>lp_nan=(reduce(<lambda>a b:re.sub(*b a) rep f(p))<for>p _itertuples(df[isnull]))<line_sep><return>'\n'.join(chain(lp lp_nan)).encode('utf-8')<block_end><else_stmt><block_start><return>'\n'.join(map(f _itertuples(df))).encode('utf-8')<block_end><block_end> |
<import_from_stmt>django.db.models Q QuerySet<import_from_stmt>utilities.permissions permission_is_exempt<class_stmt>RestrictedQuerySet(QuerySet)<block_start><def_stmt>restrict self user action='view'<block_start>"""
Filter the QuerySet to return only objects on which the specified user has been granted the specified
permission.
:param user: User instance
:param action: The action which must be permitted (e.g. "view" for "dcim.view_site"); default is 'view'
"""<line_sep># Resolve the full name of the required permission
app_label=self.model._meta.app_label<line_sep>model_name=self.model._meta.model_name<line_sep>permission_required=f'{app_label}.{action}_{model_name}'<line_sep># Bypass restriction for superusers and exempt views
<if_stmt>user.is_superuser<or>permission_is_exempt(permission_required)<block_start>qs=self<block_end># User is anonymous or has not been granted the requisite permission
<elif_stmt><not>user.is_authenticated<or>permission_required<not><in>user.get_all_permissions()<block_start>qs=self.none()<block_end># Filter the queryset to include only objects with allowed attributes
<else_stmt><block_start>attrs=Q()<for_stmt>perm_attrs user._object_perm_cache[permission_required]<block_start><if_stmt>type(perm_attrs)<is>list<block_start><for_stmt>p perm_attrs<block_start>attrs<augor>Q(**p)<block_end><block_end><elif_stmt>perm_attrs<block_start>attrs<augor>Q(**perm_attrs)<block_end><else_stmt># Any permission with null constraints grants access to _all_ instances
<block_start>attrs=Q()<line_sep><break><block_end><block_end>qs=self.filter(attrs)<block_end><return>qs<block_end><block_end> |
# Helper for testing.
#
# <NAME> [http://eli.thegreenplace.net]
# This code is in the public domain.
<import_stmt>sys<import_stmt>time<def_stmt>main <block_start>count=1<while_stmt><true><block_start>sys.stdout.write(f'{count} ')<if_stmt>count%20<eq>0<block_start>sys.stdout.write('\n')<block_end>time.sleep(0.05)<line_sep>count<augadd>1<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
<import_stmt>unittest<try_stmt><block_start><import_from_stmt>unittest mock<block_end><except_stmt>ImportError<block_start><import_stmt>mock<block_end><import_from_stmt>azure_devtools.scenario_tests.utilities create_random_name get_sha1_hash is_text_payload is_json_payload<class_stmt>TestUtilityFunctions(unittest.TestCase)<block_start><def_stmt>test_create_random_name_default_value self<block_start>default_generated_name=create_random_name()<line_sep>self.assertTrue(default_generated_name.startswith("aztest"))<line_sep>self.assertEqual(24 len(default_generated_name))<line_sep>self.assertTrue(isinstance(default_generated_name str))<block_end><def_stmt>test_create_random_name_randomness self<block_start>self.assertEqual(100 len(set([create_random_name()<for>_ range(100)])))<block_end><def_stmt>test_create_random_name_customization self<block_start>customized_name=create_random_name(prefix="pauline" length=61)<line_sep>self.assertTrue(customized_name.startswith("pauline"))<line_sep>self.assertEqual(61 len(customized_name))<line_sep>self.assertTrue(isinstance(customized_name str))<block_end><def_stmt>test_create_random_name_exception_long_prefix self<block_start>prefix="prefix-too-long"<with_stmt>self.assertRaises(ValueError)<as>cm<block_start>create_random_name(prefix length=len(prefix)-1)<block_end>self.assertEqual(str(cm.exception) "The length of the prefix must not be longer than random name length")<line_sep>self.assertTrue(create_random_name(prefix length=len(prefix)+4).startswith(prefix))<block_end><def_stmt>test_create_random_name_exception_not_enough_space_for_randomness self<block_start>prefix="prefix-too-long"<for_stmt>i range(4)<block_start><with_stmt>self.assertRaises(ValueError)<as>cm<block_start>create_random_name(prefix length=len(prefix)+i)<block_end>self.assertEqual(str(cm.exception) "The randomized part of the name is shorter than 4, which may not be "<concat>"able to offer enough randomness" )<block_end><block_end><def_stmt>test_get_sha1_hash self<block_start><import_stmt>tempfile<with_stmt>tempfile.NamedTemporaryFile()<as>f<block_start>content=b"""
All the world's a stage,
And all the men and women merely players;
They have their exits and their entrances,
And one man in his time plays many parts,
His acts being seven ages. At first, the infant,
Mewling and puking in the nurse's arms.
Then the whining schoolboy, with his satchel
And shining morning face, creeping like snail
Unwillingly to school. And then the lover,
Sighing like furnace, with a woeful ballad
Made to his mistress' eyebrow. Then a soldier,
Full of strange oaths and bearded like the pard,
Jealous in honor, sudden and quick in quarrel,
Seeking the bubble reputation
Even in the cannon's mouth. And then the justice,
In fair round belly with good capon lined,
With eyes severe and beard of formal cut,
Full of wise saws and modern instances;
And so he plays his part. The sixth age shifts
Into the lean and slippered pantaloon,
With spectacles on nose and pouch on side;
His youthful hose, well saved, a world too wide
For his shrunk shank, and his big manly voice,
Turning again toward childish treble, pipes
And whistles in his sound. Last scene of all,
That ends this strange eventful history,
Is second childishness and mere oblivion,
Sans teeth, sans eyes, sans taste, sans everything.
<NAME>
"""<line_sep>f.write(content)<line_sep>f.seek(0)<line_sep>hash_value=get_sha1_hash(f.name)<line_sep>self.assertEqual("6487bbdbd848686338d729e6076da1a795d1ae747642bf906469c6ccd9e642f9" hash_value)<block_end><block_end><def_stmt>test_text_payload self<block_start>http_entity=mock.MagicMock()<line_sep>headers={}<line_sep>http_entity.headers=headers<line_sep>headers["content-type"]="foo/"<line_sep>self.assertFalse(is_text_payload(http_entity))<line_sep>headers["content-type"]="text/html; charset=utf-8"<line_sep>self.assertTrue(is_text_payload(http_entity))<line_sep>headers["content-type"]="APPLICATION/JSON; charset=utf-8"<line_sep>self.assertTrue(is_text_payload(http_entity))<line_sep>headers["content-type"]="APPLICATION/xml"<line_sep>self.assertTrue(is_text_payload(http_entity))<line_sep>http_entity.headers=<none># default to text mode if there is no header
self.assertTrue(is_text_payload(http_entity))<block_end><def_stmt>test_json_payload self<block_start>http_entity=mock.MagicMock()<line_sep>headers={}<line_sep>http_entity.headers=headers<line_sep>headers["content-type"]="APPLICATION/JSON; charset=utf-8"<line_sep>self.assertTrue(is_json_payload(http_entity))<line_sep>headers["content-type"]="application/json; charset=utf-8"<line_sep>self.assertTrue(is_json_payload(http_entity))<line_sep>headers["content-type"]="application/xml; charset=utf-8"<line_sep>self.assertFalse(is_json_payload(http_entity))<block_end><block_end> |
<import_from_stmt>. anova hotelling<line_sep> |
<import_stmt>numpy<as>np<import_from_stmt>..utils GeneticAlgorithm<as>GA<import_from_stmt>..utils round_vars<import_from_stmt>.lcb_merit lcb_merit<def_stmt>lcb_ga num_pts opt_prob surrogate X fX Xpend=<none> kappa=2.0 dtol=1e-3 lcb_target=<none><block_start>"""Minimize the LCB using a genetic algorithm.
:param num_pts: Number of points to generate
:type num_pts: int
:param opt_prob: Optimization problem
:type opt_prob: object
:param surrogate: Surrogate model object
:type surrogate: object
:param X: Previously evaluated points, of size n x dim
:type X: numpy.array
:param fX: Values at previously evaluated points, of size n x 1
:type fX: numpy.array
:param Xpend: Pending evaluations
:type Xpend: numpy.array
:param dtol: Minimum distance between evaluated and pending points
:type dtol: float
:param lcb_target: Return None if we don't find an LCB value <= lcb_target
:type lcb_target: float
:return: num_pts new points to evaluate
:rtype: numpy.array of size num_pts x dim
"""<if_stmt>Xpend<is><none># cdist can't handle None arguments
<block_start>Xpend=np.empty([0 opt_prob.dim])<block_end>XX=np.vstack((X Xpend))<line_sep>new_points=np.zeros((num_pts opt_prob.dim))<for_stmt>i range(num_pts)<block_start><def_stmt>obj Y<block_start>"""Round integer variables and compute LCB."""<line_sep>Y=round_vars(Y.copy() opt_prob.int_var opt_prob.lb opt_prob.ub)<line_sep><return>lcb_merit(X=Y surrogate=surrogate fX=fX XX=XX dtol=dtol kappa=kappa)<block_end>ga=GA(function=obj dim=opt_prob.dim lb=opt_prob.lb ub=opt_prob.ub int_var=opt_prob.int_var pop_size=max([2<times>opt_prob.dim 100]) num_gen=100 )<line_sep>x_best,f_min=ga.optimize()<if_stmt>f_min<g>lcb_target<block_start><return><none><block_end># Give up
new_points[i :]=x_best<line_sep>XX=np.vstack((XX x_best))<block_end><return>new_points<block_end> |
<import_stmt>os<import_from_stmt>random random sample<import_stmt>numpy<as>np<import_from_stmt>PIL Image ImageDraw<import_from_stmt>skimage.segmentation felzenszwalb<import_from_stmt>skimage.morphology skeletonize remove_small_objects<import_from_stmt>skimage.util invert<import_from_stmt>tqdm tqdm<import_stmt>cv2<def_stmt>cv2pil cv2_img<block_start><if_stmt>len(cv2_img.shape)<eq>2<or>cv2_img.shape[2]<eq>1<block_start>cv2_img=cv2.cvtColor(cv2_img cv2.COLOR_GRAY2RGB)<block_end><else_stmt><block_start>cv2_img=cv2.cvtColor(cv2_img cv2.COLOR_BGR2RGB)<block_end>pil_img=Image.fromarray(cv2_img.astype('uint8'))<line_sep><return>pil_img<block_end><def_stmt>pil2cv pil_img<block_start>pil_img=pil_img.convert('RGB')<line_sep>cv2_img=np.array(pil_img)<line_sep>cv2_img=cv2.cvtColor(cv2_img cv2.COLOR_RGB2BGR)<line_sep>cv2_img=cv2_img[: : ::-1].copy()<line_sep><return>cv2_img<block_end><def_stmt>posterize im n<block_start>indices=np.arange(0 256)# List of all colors
divider=np.linspace(0 255 n+1)[1]# we get a divider
quantiz=np.int0(np.linspace(0 255 n))# we get quantization colors
color_levels=np.clip(np.int0(indices/divider) 0 n-1)# color levels 0,1,2..
palette=quantiz[color_levels]# Creating the palette
im2=palette[im]# Applying palette on image
im2=cv2.convertScaleAbs(im2)# Converting image back to uint8
<return>im2<block_end><def_stmt>canny im1<block_start>im1=pil2cv(im1)<line_sep>im2=cv2.GaussianBlur(im1 (5 5) 0)<line_sep>im2=cv2.Canny(im2 100 150)<line_sep>im2=cv2.cvtColor(im2 cv2.COLOR_GRAY2RGB)<line_sep>im2=cv2pil(im2)<line_sep><return>im2<block_end><def_stmt>image2colorlabels img colors<block_start>h,w=img.height img.width<line_sep>pixels=np.array(list(img.getdata()))<line_sep>dists=np.array([np.sum(np.abs(pixels-c) axis=1)<for>c colors])<line_sep>classes=np.argmin(dists axis=0)<block_end><def_stmt>colorize_labels img colors<block_start>h,w=img.height img.width<line_sep>classes=image2colorlabels(img colors)<line_sep>img=Image.fromarray(np.uint8(classes.reshape((h w 3))))<line_sep><return>img<block_end><def_stmt>quantize_colors img colors<block_start>h,w=img.height img.width<line_sep>classes=image2colorlabels(img colors)<line_sep>pixels_clr=np.array([colors[p]<for>p classes]).reshape((h w 3))<line_sep>img=Image.fromarray(np.uint8(pixels_clr))<line_sep><return>img<block_end><def_stmt>segment img<block_start>img=pil2cv(img)<line_sep>h,w=img.shape[0:2]<line_sep>img=cv2.bilateralFilter(img 9 100 100)<line_sep>scale=int(h<times>w/1000)<line_sep>segments=felzenszwalb(img scale=scale sigma=0.5 min_size=150)<line_sep>out_image=np.zeros((h w 3))<line_sep>num_segments=len(np.unique(segments))<for_stmt>s tqdm(range(num_segments))<block_start>label_map=segments<eq>s<line_sep>label_map3=np.dstack([label_map]<times>3)<line_sep>masked_img=np.multiply(label_map3 img)<line_sep>#avg_color = np.sum(np.sum(masked_img, axis=0), axis=0) / np.count_nonzero(label_map) # maybe median is better
nonzeros=[masked_img[: : c].reshape((h<times>w))<for>c range(3)]<line_sep>median_color=[np.median(np.take(nonzeros[c] nonzeros[c].nonzero()))<for>c range(3)]<line_sep>smooth_segment=(label_map3<times>median_color).astype('uint8')<line_sep>out_image<augadd>smooth_segment<block_end>out_image=Image.fromarray(out_image.astype('uint8'))<line_sep><return>out_image<block_end><def_stmt>trace img<block_start>img=pil2cv(img)<line_sep>im2=cv2.GaussianBlur(img (5 5) 0)<line_sep>im3=cv2.cvtColor(im2 cv2.COLOR_RGB2GRAY)<line_sep>ret,im4=cv2.threshold(im3 127 255 0)<line_sep>ret,img=cv2.threshold(im3 255 255 0)<line_sep>im5,contours,hierarchy=cv2.findContours(im4 cv2.RETR_TREE cv2.CHAIN_APPROX_SIMPLE)<line_sep>contours=[c<for>c contours<if>cv2.arcLength(c <true>)<g>8]#and cv2.contourArea(c) > 10]
<for_stmt>contour contours<block_start>cv2.drawContours(img [contour] 0 (255) 2)<block_end>img=cv2pil(img)<line_sep><return>img<block_end><def_stmt>simplify img hed_model_path<block_start><import_stmt>hed_processing<line_sep>w,h=img.width img.height<line_sep>size_thresh=0.001<times>w<times>h<line_sep>img=pil2cv(img)<line_sep>img=cv2.GaussianBlur(img (3 3) 0)<line_sep>img=cv2.GaussianBlur(img (3 3) 0)<line_sep>img=hed_processing.run_hed(cv2pil(img) hed_model_path)<line_sep>ret,img=cv2.threshold(pil2cv(img) 50 255 0)<line_sep>img=cv2.cvtColor(img cv2.COLOR_BGR2GRAY)<line_sep>img=remove_small_objects(img.astype('bool') size_thresh)<line_sep>img=255<times>skeletonize(img).astype('uint8')<line_sep>img=cv2pil(img)<line_sep><return>img<block_end><def_stmt>upsample img w2 h2<block_start>h1,w1=img.height img.width<line_sep>r=max(float(w2)/w1 float(h2)/h1)<line_sep>img=img.resize((int(r<times>w1) int(r<times>h1)) resample=Image.BICUBIC)<line_sep><return>img<block_end><def_stmt>crop_rot_resize img frac w2 h2 ang stretch centered<block_start><if_stmt>w2<is><none><block_start>w2=img.width<block_end><if_stmt>h2<is><none><block_start>h2=img.height<block_end><if_stmt>img.height<l>h2<or>img.width<l>w2<block_start>img=upsample(img w2 h2)<block_end><if_stmt>stretch<ne>0<block_start>v=random()<l>0.5<line_sep>h=1.0<if><not>v<else>(1.0+stretch)<line_sep>w=1.0<if>v<else>(1.0+stretch)<line_sep>img=img.resize((int(img.width<times>w) int(img.height<times>h)) resample=Image.BICUBIC)<block_end><if_stmt>ang<g>0<block_start>img=img.rotate(ang resample=Image.BICUBIC expand=<false>)<block_end>ar=float(w2/h2)<line_sep>h1,w1=img.height img.width<if_stmt>float(w1)/h1<g>ar<block_start>h1_crop=max(h2 h1<times>frac)<line_sep>w1_crop=h1_crop<times>ar<block_end><else_stmt><block_start>w1_crop=max(w2 w1<times>frac)<line_sep>h1_crop=w1_crop/ar<block_end>xr,yr=(0.5 0.5)<if>centered<else>(random() random())<line_sep>x_crop,y_crop=(w1-w1_crop-1)<times>xr (h1-h1_crop-1)<times>yr<line_sep>h1_crop,w1_crop,y_crop,x_crop=int(h1_crop) int(w1_crop) int(y_crop) int(x_crop)<line_sep>img_crop=img.crop((x_crop y_crop x_crop+w1_crop y_crop+h1_crop))<line_sep>img_resize=img_crop.resize((w2 h2) resample=Image.BICUBIC)<line_sep><return>img_resize<block_end> |
<import_from_stmt>itertools chain<class_stmt>Var<block_start><def_stmt>__init__ self name var vartype='lineedit' pulldown_objs=<none> pulldown_type_limit=<none> enabled=<true> pulldown_allow_zero=<false> required=<true><block_start>self.name=name<line_sep>self.var=var<line_sep>self.vartype=vartype<line_sep>self.pulldown_objs=pulldown_objs<line_sep>self.pulldown_type_limit=pulldown_type_limit<line_sep>self.pulldown_allow_zero=pulldown_allow_zero<line_sep>self.enabled=enabled<line_sep>self.required=required<assert_stmt>vartype<in>['lineedit' 'lineedit_table' 'pulldown' 'spinner'] vartype<block_end><def_stmt>__repr__ self<block_start><return>(f'Var(name={self.name}, var={self.var}, vartype={self.vartype}, '<concat>f'pulldown_objs={self.pulldown_objs}, enabled={self.enabled}, required={self.required})')<block_end><block_end><class_stmt>TransposedVars<block_start><def_stmt>__init__ self variables<block_start>self.variables=variables<block_end><block_end>shell_ptypes=['PSHELL' 'PCOMP']<line_sep>bar_ptypes=['PBAR' 'PBARL']<line_sep>beam_ptypes=['PBEAM' 'PBEAML' 'PBCOMP']<line_sep>ELEMENTS_MAP={'CQUAD4':[Var('Element ID' 'eid' enabled=<false>) Var('Property ID' 'pid' vartype='pulldown' pulldown_objs='properties' pulldown_type_limit=shell_ptypes) Var('Nodes' 'nodes' vartype='pulldown' pulldown_objs='nodes') #Var('Node 1', 'nodes', vartype='pulldown', pulldown_objs='nodes'),
#Var('Node 2', 'nodes', vartype='pulldown', pulldown_objs='nodes'),
#Var('Node 3', 'nodes', vartype='pulldown', pulldown_objs='nodes'),
#Var('Node 4', 'nodes', vartype='pulldown', pulldown_objs='nodes'),
Var('Theta/Material Coord' 'theta_mcid' vartype='lineedit') Var('Z Offset' 'zoffset' vartype='lineedit') ] 'CTRIA3':[Var('Element ID' 'eid' enabled=<false>) Var('Property ID' 'pid' vartype='pulldown' pulldown_objs='properties' pulldown_type_limit=shell_ptypes) Var('Nodes' 'nodes' vartype='pulldown' pulldown_objs='nodes') Var('Theta/Material Coord' 'theta_mcid' vartype='lineedit') Var('Z Offset' 'zoffset' vartype='lineedit') ] 'CBAR':[Var('Element ID' 'eid' enabled=<false>) Var('Property ID' 'pid' vartype='pulldown' pulldown_objs='properties' pulldown_type_limit=bar_ptypes ) Var('Node 1' 'ga' vartype='pulldown' pulldown_objs='nodes') Var('Node 2' 'gb' vartype='pulldown' pulldown_objs='nodes') Var('OFFT' 'offt' vartype='pulldown' pulldown_objs=['GGG' 'GOO' 'BOO']) Var('g0' 'g0' vartype='pulldown' pulldown_objs='nodes' required=<false>) Var('x' 'x' vartype='lineedit') Var('wa' 'wa' vartype='lineedit') Var('wb' 'wa' vartype='lineedit') ] 'CBEAM':[#bit : None
#is_bit : False
#is_offt : True
Var('Element ID' 'eid' enabled=<false>) Var('Property ID' 'pid' vartype='pulldown' pulldown_objs='properties' pulldown_type_limit=beam_ptypes ) Var('Node 1' 'ga' vartype='pulldown' pulldown_objs='nodes') Var('Node 2' 'gb' vartype='pulldown' pulldown_objs='nodes') Var('OFFT' 'offt' vartype='pulldown' pulldown_objs=['GGG' 'GOO' 'BOO']) Var('g0' 'g0' vartype='pulldown' pulldown_objs='nodes' required=<false>) Var('x' 'x' vartype='lineedit') Var('Pin A, pa' 'pa' vartype='lineedit') Var('Pin B, pa' 'pa' vartype='lineedit') Var('Warping A, sa' 'sa' vartype='lineedit') Var('Warping B, sb' 'sb' vartype='lineedit') Var('wa' 'wa' vartype='lineedit') Var('wb' 'wa' vartype='lineedit') ] }<line_sep>MASSES_MAP={'CONM2':[Var('Element ID' 'eid' enabled=<false>) Var('Node' 'nid' vartype='pulldown' pulldown_objs='nodes') Var('Coord' 'cid' vartype='pulldown' pulldown_objs='coords') Var('Offset' 'X' vartype='lineedit') Var('Mass' 'mass' vartype='lineedit') Var('I, Inertia' 'I' vartype='lineedit') ] }<line_sep>bar_types=['ROD' 'TUBE' 'TUBE2' 'I' 'CHAN' 'T' 'BOX' 'BAR' 'CROSS' 'H' 'T1' 'I1' 'CHAN1' 'Z' 'CHAN2' 'T2' 'BOX1' 'HEXA' 'HAT' 'HAT1' 'DBOX' 'L']<line_sep>#shell_mtypes = ['MAT1', 'MAT2', 'MAT8']
PROPERTIES_MAP={'PSHELL':[Var('Property ID' 'pid' enabled=<false>) [Var('Material ID 1' 'mid1' pulldown_objs='materials' required=<false>) Var('Thickness' 't' vartype='lineedit' required=<false>) ] [Var('Material ID 2' 'mid2' pulldown_objs='materials' required=<false>) Var('12I/t^3' 'twelveIt3' vartype='lineedit' required=<false>) ] [Var('Material ID 3' 'mid3' pulldown_objs='materials' required=<false>) Var('ts/t' 'tst' vartype='lineedit' required=<false>) ] Var('Material ID 4' 'mid4' pulldown_objs='materials' required=<false>) Var('z1' 'z1' vartype='lineedit' required=<false>) #Var('z2', 'z2', vartype='lineedit', required=False),
Var('nsm' 'nsm' vartype='lineedit' required=<false>) ] 'PCOMP':[Var('Property ID' 'pid' enabled=<false>) TransposedVars([Var('Material ID' 'mids' vartype='pulldown' pulldown_objs='materials') Var('Material Angle, Theta' 'thetas') Var('Thickness' 'thicknesses') Var('SOUT' 'souts' vartype='pulldown' pulldown_objs=['YES' 'NO']) ]) Var('TRef' 'tref' vartype='lineedit') Var('lam' 'lam' vartype='lineedit' required=<false>) Var('ft' 'ft' vartype='lineedit' required=<false>) Var('sb' 'sb' vartype='lineedit' required=<false>) Var('z0' 'z0' vartype='lineedit') Var('Damping' 'ge' vartype='lineedit') Var('nsm' 'nsm' vartype='lineedit' required=<false>) ] 'PBARL':[Var('Property ID' 'pid' enabled=<false>) Var('Material ID 1' 'mid' vartype='pulldown' pulldown_objs='materials' pulldown_type_limit=['MAT1'] ) Var('Bar Type' 'beam_type' vartype='pulldown' pulldown_objs=bar_types enabled=<false>) Var('Dimensions' 'dim' vartype='lineedit' required=<false>) Var('Group' 'group' vartype='lineedit' required=<false>) Var('nsm' 'nsm' vartype='lineedit' required=<false>) ] 'PBAR':[Var('Property ID' 'pid' enabled=<false>) Var('Material ID' 'mid' vartype='pulldown' pulldown_objs='materials' pulldown_type_limit=['MAT1'] ) [Var('Area' 'A' vartype='lineedit') Var('I1' 'i1' vartype='lineedit') Var('I2' 'i2' vartype='lineedit') Var('I12' 'i12' vartype='lineedit') Var('J' 'j' vartype='lineedit') ] [Var('C1' 'c1' vartype='lineedit') Var('D1' 'd1' vartype='lineedit') Var('E1' 'e1' vartype='lineedit') Var('F1' 'f1' vartype='lineedit') ] [Var('C2' 'c2' vartype='lineedit') Var('D2' 'd2' vartype='lineedit') Var('E2' 'e2' vartype='lineedit') Var('F2' 'f2' vartype='lineedit') ] [Var('K1' 'k1' vartype='lineedit') Var('K2' 'k2' vartype='lineedit') ] Var('nsm' 'nsm' vartype='lineedit' required=<false>) ] 'PBEAML':[Var('Property ID' 'pid' enabled=<false>) Var('Material ID' 'mid' vartype='pulldown' pulldown_objs='materials' pulldown_type_limit=['MAT1'] ) Var('Beam Type' 'beam_type' vartype='pulldown' pulldown_objs=bar_types enabled=<false>) Var('Group' 'group' vartype='lineedit' required=<false>) #TransposedVars([
Var('SOUT' 'so' vartype='pulldown' pulldown_objs=['YES' 'NO']) Var('x/xb' 'xxb' vartype='lineedit') Var('nsm' 'nsm' vartype='lineedit' required=<false>) Var('Dimensions' 'dim' vartype='lineedit_table') ] }<line_sep>MATERIALS_MAP={'MAT1':[Var('Material ID' 'mid' enabled=<false>) Var("E, Young's Modulus" 'e' vartype='lineedit' required=<false>) Var("G, Shear Modulus" 'g' vartype='lineedit' required=<false>) Var("nu, Poisson's Ratio" 'nu' vartype='lineedit' required=<false>) Var('TRef' 'tref' vartype='lineedit') Var('Damping' 'ge' vartype='lineedit') Var('Density' 'rho' vartype='lineedit') Var('Material Coord' 'mcsid' vartype='lineedit') ] 'MAT8':[Var('Material ID' 'mid' enabled=<false>) Var("E11" 'e11' vartype='lineedit') Var("E22" 'e22' vartype='lineedit') [Var("G12" 'g12' vartype='lineedit') Var("G1z" 'g1z' vartype='lineedit') Var("G2z" 'g2z' vartype='lineedit') ] Var("nu12" 'nu12' vartype='lineedit') Var('Density' 'rho' vartype='lineedit') Var('Damping' 'ge' vartype='lineedit') Var('TRef' 'tref' vartype='lineedit') Var('A1' 'a1' vartype='lineedit') Var('A2' 'a2' vartype='lineedit') [Var('Xt' 'Xt' vartype='lineedit') Var('Xc' 'Xc' vartype='lineedit') Var('Yt' 'Yt' vartype='lineedit') Var('Yc' 'Yc' vartype='lineedit') Var('S' 'S' vartype='lineedit') Var('F12' 'F12' vartype='lineedit') ] Var('strn' 'strn' vartype='lineedit') ] }<line_sep>CAERO_MAP={'CAERO1':[Var('Element ID' 'eid' enabled=<false>) Var('Property ID' 'pid' vartype='pulldown' pulldown_objs='paeros') Var('iGroup' 'igroup') [Var('nSpan Boxes' 'nspan' vartype='spinner') Var('AEFACT Span' 'lspan' vartype='pulldown' pulldown_objs='aefacts' pulldown_allow_zero=<true>) ] [Var('nChord Boxes' 'nchord' vartype='spinner') Var('AEFACT Chord' 'lchord' vartype='pulldown' pulldown_objs='aefacts' pulldown_allow_zero=<true>) ] Var('Point 1' 'p1') Var('Distance 12' 'x12') Var('Point 4' 'p4') Var('Distance 43' 'x43') ] }<line_sep>MODIFY_MAP=dict(chain(ELEMENTS_MAP.items() MASSES_MAP.items() PROPERTIES_MAP.items() MATERIALS_MAP.items() CAERO_MAP.items() ))<line_sep>UPDATE_MAP={#'GRID' : 'update_grid',
#'CONROD' : 'update_element',
'CAERO1':'update_caeros' }<line_sep> |
<import_stmt>logging<import_stmt>os<if_stmt>os.getcwd().split('/')[-1]<eq>'API'<block_start>path='../logs/scan.log'<block_end><else_stmt><block_start>path='logs/scan.log'<block_end>logger=logging.getLogger()<line_sep>fh=logging.FileHandler(path)<line_sep>logger.addHandler(fh)<line_sep>logger.setLevel(logging.INFO)<line_sep>#logging.basicConfig(filename=path, level=logging.INFO)
|
<class_stmt>HumanReadableError(Exception)<block_start><pass><block_end> |
# ------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License (MIT). See LICENSE in the repo root for license information.
# ------------------------------------------------------------------------------------------
<import_stmt>logging<import_from_stmt>dataclasses dataclass<import_from_stmt>enum Enum<import_from_stmt>typing Tuple List Dict Any<import_stmt>numpy<as>np<import_from_stmt>InnerEyeDataQuality.evaluation.metrics compute_accuracy compute_label_entropy total_variation<line_sep>STAT_FIELDS=["relabelling_score" "ambiguity" "label_correctness"]<line_sep>@dataclass(frozen=<true>)<class_stmt>SelectionType(Enum)<block_start>"""
Defines the 5 possible types of selections that can be made in an iteration
"""<line_sep>MISLABELLED_CASE_SELECTED_CORRECTED=1<line_sep>MISLABELLED_CASE_SELECTED_NOT_CORRECTED=2<line_sep>AMBIGUOUS_CASE_SELECTED_CORRECTED=3<line_sep>AMBIGUOUS_CASE_SELECTED_NOT_CORRECTED=4<line_sep>CLEAN_CASE_SELECTED=5<block_end><def_stmt>compute_selection_type_of_current_iter sample_id:int true_ambiguous_cases:np.ndarray true_label_counts:np.ndarray mislabelled_ids_current:np.ndarray ambiguous_case_ids_current:np.ndarray mislabelled_ids_prev:np.ndarray ambiguous_case_ids_prev:np.ndarray<arrow>SelectionType<block_start>"""
Compute the type of selection that occurred between the previous and current iteration.
:param sample_id: The sample id.
:param true_ambiguous_cases: The ids for the true ambiguous samples.
:param true_label_counts: The label counts for the true label distribution.
:param mislabelled_ids_current: The ids for the current iteration remaining not ambiguous mislabelled samples.
:param ambiguous_case_ids_current: The ids for the current iteration remaining ambiguous mislabelled samples.
:param mislabelled_ids_prev: The ids for the previous iteration remaining not ambiguous mislabelled samples.
:param ambiguous_case_ids_prev: The ids for the previous iteration remaining ambiguous mislabelled samples.
:return: An enum representing the selection type that occurred between the previous and current iteration.
"""<if_stmt>sample_id<in>true_ambiguous_cases<block_start><if_stmt>len(set(ambiguous_case_ids_prev)-set(ambiguous_case_ids_current))<g>0<block_start><return>SelectionType.AMBIGUOUS_CASE_SELECTED_CORRECTED<block_end><else_stmt><block_start><return>SelectionType.AMBIGUOUS_CASE_SELECTED_NOT_CORRECTED<block_end><block_end><else_stmt><block_start><if_stmt>len(set(mislabelled_ids_prev)-set(mislabelled_ids_current))<g>0<block_start><return>SelectionType.MISLABELLED_CASE_SELECTED_CORRECTED<block_end><elif_stmt>len(np.unique(np.where(true_label_counts[sample_id])[0]))<eq>1<block_start><return>SelectionType.CLEAN_CASE_SELECTED<block_end><else_stmt><block_start><return>SelectionType.MISLABELLED_CASE_SELECTED_NOT_CORRECTED<block_end><block_end><block_end><def_stmt>get_mislabelled_sample_ids true_label_counts:np.ndarray current_label_counts:np.ndarray<arrow>np.ndarray<block_start>"""
Compute which samples are mislabelled.
:param true_label_counts: The label counts for the true label distribution.
:param current_label_counts: The label counts for the current distribution.
:return: An array with the ids of the mislabeled samples (majority voting)
"""<line_sep>true_class=np.argmax(true_label_counts axis=1)<line_sep>current_class=np.argmax(current_label_counts axis=1)<line_sep><return>np.where(true_class<ne>current_class)<block_end><def_stmt>get_ambiguous_sample_ids true_label_counts:np.ndarray threshold:float=0.30<arrow>np.ndarray<block_start>"""
Compute which samples are ambiguous
:param true_label_counts: The label counts for the true label distribution.
:param threshold: The label entropy threshold above which a sample is considered ambiguous
:return: An array with the ids of the ambiguous samples
"""<line_sep>label_entropy=compute_label_entropy(true_label_counts)<line_sep><return>np.where(label_entropy<g>threshold)[0]<block_end><class_stmt>SimulationStats<block_start>"""
A class that keeps track of statistics/metrics during the simulation
"""<def_stmt>__init__ self name:str true_label_counts:np.ndarray initial_labels:np.ndarray<block_start>"""
:param name: The name of the simulation
:param true_label_counts: The label counts for the true label distribution
np.ndarray [num_samples x num_classes]
:param initial_labels: The initial label counts, np.ndarray [num_samples x num_classes]
"""<line_sep>self.name=name<line_sep>self.initial_labels=np.copy(initial_labels)<line_sep>self.true_label_counts=true_label_counts<line_sep>self.true_ambiguous_cases=get_ambiguous_sample_ids(true_label_counts)<line_sep>self.true_distribution=true_label_counts/np.sum(true_label_counts axis=-1 keepdims=<true>)<line_sep>self.selected_sample_id:List[int]=list()<line_sep>self.num_fetches:List[int]=list()<line_sep>self.accuracy:List[float]=list()<line_sep>self.avg_total_variation:List[float]=list()<line_sep>self.selection_type:List[SelectionType]=list()<line_sep>self.selector_stats:Dict[str Any]={key:list()<for>key STAT_FIELDS}<line_sep>mislabelled_ids_current,ambiguous_case_ids_current=self.get_noisy_and_ambiguous_cases(initial_labels)<line_sep>self.mislabelled_not_ambiguous_sample_ids=[mislabelled_ids_current]<line_sep>self.mislabelled_ambiguous_sample_ids=[ambiguous_case_ids_current]<line_sep>self.num_initial_mislabelled_not_ambiguous=self.mislabelled_not_ambiguous_sample_ids[0].size<line_sep>self.num_initial_mislabelled_ambiguous=self.mislabelled_ambiguous_sample_ids[0].size<line_sep>self.num_remaining_mislabelled_not_ambiguous:List[int]=list()<line_sep>self.num_remaining_mislabelled_ambiguous:List[int]=list()<block_end><def_stmt>get_noisy_and_ambiguous_cases self current_label_counts:np.ndarray<arrow>Tuple[np.ndarray np.ndarray]<block_start>"""
Compute which of the current labels are still mislabelled, separate the former into ambiguous and not ambiguous
samples
:param current_label_counts: The label counts of the current iteration
:return: A tuple containing an array with the current mislabelled not ambiguous sample ids and an array with
the current mislabelled ambiguous sample ids.
"""<line_sep># Find the potential label noise and ambiguous cases
label_mismatch_ids_current=get_mislabelled_sample_ids(self.true_label_counts current_label_counts)<line_sep># Split the label mismatch cases into ambiguous and clear label noise types
mislabelled_ids_current=np.setdiff1d(label_mismatch_ids_current self.true_ambiguous_cases)<line_sep>ambiguous_case_ids_current=np.array(np.intersect1d(label_mismatch_ids_current self.true_ambiguous_cases))<line_sep><return>mislabelled_ids_current ambiguous_case_ids_current<block_end><def_stmt>record_selector_stats self selector_stats:Dict[str Any]<arrow><none><block_start>"""
"""<if_stmt>len(selector_stats)<eq>0<block_start><return><block_end><for_stmt>key STAT_FIELDS<block_start><if_stmt>key<in>selector_stats<block_start>self.selector_stats[key].append(selector_stats[key])<block_end><block_end><block_end><def_stmt>record_iteration self selected_sample_id:int num_fetches:int current_label_counts:np.ndarray<arrow><none><block_start>"""
:param selected_sample_id: The sample id that was selected at this iteration
:param num_fetches: The number of fetches (relabels) it took to achieve a majority
:param current_label_counts: The labels counts for the current iteration
:return:
"""<line_sep>self.selected_sample_id.append(selected_sample_id)<line_sep>self.num_fetches.append(num_fetches)<line_sep>self.accuracy.append(compute_accuracy(current_label_counts self.true_label_counts))<line_sep>current_distribution=current_label_counts/np.sum(current_label_counts axis=-1 keepdims=<true>)<line_sep>self.avg_total_variation.append(np.nanmean(total_variation(self.true_distribution current_distribution)))<line_sep>mislabelled_ids_current,ambiguous_case_ids_current=self.get_noisy_and_ambiguous_cases(current_label_counts)<line_sep>mislabelled_ids_prev=self.mislabelled_not_ambiguous_sample_ids[-1]<line_sep>ambiguous_case_ids_prev=self.mislabelled_ambiguous_sample_ids[-1]<line_sep>selection_type=compute_selection_type_of_current_iter(selected_sample_id self.true_ambiguous_cases self.true_label_counts mislabelled_ids_current ambiguous_case_ids_current mislabelled_ids_prev ambiguous_case_ids_prev)<line_sep>self.selection_type.append(selection_type)<line_sep>self.num_remaining_mislabelled_not_ambiguous.append(len(mislabelled_ids_current))<line_sep>self.num_remaining_mislabelled_ambiguous.append(len(ambiguous_case_ids_current))<line_sep>self.mislabelled_not_ambiguous_sample_ids.append(mislabelled_ids_current)<line_sep>self.mislabelled_ambiguous_sample_ids.append(ambiguous_case_ids_current)<block_end><def_stmt>log_last_iter self<arrow><none><block_start>"""
Log the statistics of the last iteration
:return: None
"""<line_sep>logging.info(f"Method: {self.name}, selected_id: {self.selected_sample_id[-1]} "<concat>f"accuracy: {self.accuracy[-1]}")<line_sep>logging.info(f"Remaining label clear noise cases: {self.num_remaining_mislabelled_not_ambiguous[-1]} "<concat>f"and ambiguous noise cases: {self.num_remaining_mislabelled_ambiguous[-1]}")<block_end><block_end><class_stmt>SimulationStatsDistribution(object)<block_start>"""
A class that takes a list of simulation statistics and creates a distribution over them.
"""<def_stmt>__init__ self simulation_stats_list:List[SimulationStats]<block_start>"""
:param simulation_stats_list: A list of SimulationStats objects
"""<line_sep>self.simulation_stats=simulation_stats_list<line_sep>end_point=max([np.max(np.cumsum(sim_stats.num_fetches))<for>sim_stats simulation_stats_list])<line_sep>start_point=min([np.min(np.cumsum(sim_stats.num_fetches))<for>sim_stats simulation_stats_list])<line_sep>self.num_initial_mislabelled_not_ambiguous=simulation_stats_list[0].num_initial_mislabelled_not_ambiguous<line_sep>self.num_initial_mislabelled_ambiguous=simulation_stats_list[0].num_initial_mislabelled_ambiguous<line_sep>self.name=simulation_stats_list[0].name<line_sep>self.num_fetches=np.arange(start_point end_point)<line_sep>self.accuracy=self._interpolate_and_make_dist_array(self.num_fetches simulation_stats_list 'accuracy')<line_sep>self.avg_total_variation=self._interpolate_and_make_dist_array(self.num_fetches simulation_stats_list 'avg_total_variation')<line_sep>self.num_remaining_mislabelled_not_ambiguous=self._interpolate_and_make_dist_array(self.num_fetches simulation_stats_list 'num_remaining_mislabelled_not_ambiguous')<line_sep>self.num_remaining_mislabelled_ambiguous=self._interpolate_and_make_dist_array(self.num_fetches simulation_stats_list 'num_remaining_mislabelled_ambiguous')<block_end>@staticmethod<def_stmt>_interpolate_and_make_dist_array num_fetches:np.ndarray simulation_stats_list:List[SimulationStats] fp_attr_name:str<arrow>np.ndarray<block_start><return>np.array([np.interp(num_fetches np.cumsum(sim_stats.num_fetches) sim_stats.__getattribute__(fp_attr_name))<for>sim_stats simulation_stats_list])<block_end><block_end> |
<import_stmt>datetime<import_stmt>logging<import_stmt>os<import_stmt>time<import_stmt>threading<import_stmt>requests<import_stmt>urllib3<import_stmt>utils<line_sep>urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)<class_stmt>BiliVideoChecker(threading.Thread)<block_start><def_stmt>__init__ self bvid:str path:str config:dict<block_start>threading.Thread.__init__(self)<line_sep>default_headers={'Accept':'application/json, text/javascript, */*; q=0.01' 'Accept-Encoding':'gzip, deflate' 'Accept-Language':'zh-CN,zh;q=0.8,en-US;q=0.6,en;q=0.4,zh-TW;q=0.2' 'Connection':'keep-alive' 'User-Agent':'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_12_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/59.0.3071.115 Safari/537.36 '}<line_sep>self.headers={**default_headers ** config.get('root' {}).get('request_header' {})}<line_sep>self.session=requests.session()<line_sep>self.bvid=bvid<line_sep>self.path=path<line_sep>self.config=config<line_sep>self.check_url="https://api.bilibili.com/x/web-interface/view"<line_sep>self.check_interval=config['root']['check_interval']<block_end><def_stmt>common_request self method:str url:str params:dict=<none> data:dict=<none><arrow>requests.Response<block_start>connection=<none><if_stmt>method<eq>'GET'<block_start>connection=self.session.get(url headers=self.headers params=params verify=<false>)<block_end><if_stmt>method<eq>'POST'<block_start>connection=self.session.post(url headers=self.headers params=params data=data verify=<false>)<block_end><return>connection<block_end><def_stmt>run self<arrow><none><block_start>logging.basicConfig(level=utils.get_log_level(self.config) format='%(asctime)s %(thread)d %(threadName)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s' datefmt='%a, %d %b %Y %H:%M:%S' handlers=[logging.FileHandler(os.path.join(self.config.get('root' {}).get('logger' {}).get('log_path' "./log") "VideoChecker_"+datetime.datetime.now().strftime('%Y-%m-%d_%H-%M-%S')+'.log') "a" encoding="utf-8")])<while_stmt><true><block_start>video_info=self.common_request("GET" self.check_url {'bvid':self.bvid}).json()<try_stmt><block_start><if_stmt>video_info['code']<eq>0<and>video_info['data']['state']<eq>0<block_start>logging.info("稿件%s 已开放浏览,准备删除 %s" self.bvid self.path)<line_sep>utils.del_files_and_dir(self.path)<line_sep><return><block_end><else_stmt><block_start>logging.info("稿件%s 未开放浏览" self.bvid)<line_sep>time.sleep(self.check_interval)<block_end><block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><block_end><block_end> |
__all__=["utils" "logclass"]<import_from_stmt>.preprocess *<import_from_stmt>.feature_engineering *<import_from_stmt>.models *<import_from_stmt>.reporting *<line_sep> |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
<import_from_stmt>.delete_kb_contents_dto DeleteKbContentsDTO<class_stmt>UpdateKbOperationDTODelete(DeleteKbContentsDTO)<block_start>"""An instance of DeleteKbContentsDTO for delete Operation.
:param ids: List of Qna Ids to be deleted
:type ids: list[int]
:param sources: List of sources to be deleted from knowledgebase.
:type sources: list[str]
"""<line_sep>_attribute_map={'ids':{'key':'ids' 'type':'[int]'} 'sources':{'key':'sources' 'type':'[str]'} }<def_stmt>__init__ self **kwargs<block_start>super(UpdateKbOperationDTODelete self).__init__(**kwargs)<block_end><block_end> |
<import_stmt>hashlib<import_from_stmt>functools reduce<def_stmt>to_number bytes<block_start><return>reduce(<lambda>a b:a<times>256+b bytes)<block_end><def_stmt>to_base n b<block_start><if_stmt><not>n<block_start><return>[]<block_end><else_stmt><block_start>rest,digit=divmod(n b)<line_sep>answer=to_base(rest b)<line_sep>answer.append(digit)<line_sep><return>answer<block_end><block_end><class_stmt>DeidGenerator(object)<block_start><def_stmt>__init__ self seed salt bytes=8<block_start><assert_stmt>(bytes<l>20)<line_sep>self.seed="%s:%s"%(seed salt)<line_sep>self.bytes=bytes<line_sep>self.number=self._get_number()<block_end><def_stmt>_get_number self<block_start><return>to_number(self._sha1_bytes())<block_end><def_stmt>_sha1_bytes self<block_start>byte_list=hashlib.sha1(self.seed.encode('utf-8')).digest()<line_sep><yield><from>byte_list[:self.bytes]<block_end><def_stmt>digest self alphabet="0123456789"<block_start>b=len(alphabet)<line_sep>answer=[alphabet[i]<for>i to_base(self.number b)]<if_stmt>isinstance(alphabet str)<block_start>answer=''.join(answer)<block_end><return>answer<block_end><def_stmt>random_hash self<block_start>"""Generate a 'random' hash of 10 alphanumerics (ALL CAPS)"""<line_sep>id=self.digest("0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ")[-10:]<while_stmt>len(id)<l>10<block_start>id="0"+id<block_end><return>id<block_end><def_stmt>random_number self low high<block_start>"""Generate a 'random' number such that low <= n < high"""<line_sep><return>self.digest(list(range(low high)))[-1]<block_end><block_end> |
<import_from_stmt>test_hltrigreport_base_cfg process<line_sep>process.hlTrigReport.resetBy="run"<line_sep>process.hlTrigReport.reportBy="lumi"<line_sep> |
# vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2014 Intel Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
<import_from_stmt>vsm.api common<import_stmt>logging<line_sep>LOG=logging.getLogger(__name__)<class_stmt>ViewBuilder(common.ViewBuilder)<block_start>_collection_name="poolusages"<def_stmt>basic self poolusage<block_start><if_stmt><not>poolusage.get('id' "")<block_start>poolusage['id']=""<line_sep><return>poolusage<block_end><return>{"poolusage":{"id":poolusage.get("id" 0) "pool_id":poolusage.get("pool_id" "") "vsmapp_id":poolusage.get("vsmapp_id" "") "cinder_volume_host":poolusage.get("cinder_volume_host" "") "as_glance_store_pool":poolusage.get("as_glance_store_pool" "") "attach_status":poolusage.get("attach_status" "") "attach_at":poolusage.get("attach_at" "")}}<block_end><def_stmt>index self poolusages<block_start>"""Show a list of poolusages without many details."""<line_sep><return>self._list_view(self.basic poolusages)<block_end><def_stmt>_list_view self func poolusages<block_start>"""Provide a view for a list of poolusages."""<line_sep>node_list=[func(poolusage)["poolusage"]<for>poolusage poolusages]<line_sep>nodes_dict=dict(poolusages=node_list)<line_sep><return>nodes_dict<block_end><block_end> |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
<import_from_stmt>msrest.serialization Model<class_stmt>IpTag(Model)<block_start>"""Contains the IpTag associated with the public IP address.
:param ip_tag_type: Gets or sets the ipTag type: Example FirstPartyUsage.
:type ip_tag_type: str
:param tag: Gets or sets value of the IpTag associated with the public IP.
Example SQL, Storage etc
:type tag: str
"""<line_sep>_attribute_map={'ip_tag_type':{'key':'ipTagType' 'type':'str'} 'tag':{'key':'tag' 'type':'str'} }<def_stmt>__init__ self * ip_tag_type:str=<none> tag:str=<none> **kwargs<arrow><none><block_start>super(IpTag self).__init__(**kwargs)<line_sep>self.ip_tag_type=ip_tag_type<line_sep>self.tag=tag<block_end><block_end> |
<import_from_future_stmt> annotations<import_stmt>requests<def_stmt>get_hackernews_story story_id:str<arrow>dict<block_start>url=f"https://hacker-news.firebaseio.com/v0/item/{story_id}.json?print=pretty"<line_sep><return>requests.get(url).json()<block_end><def_stmt>hackernews_top_stories max_stories:int=10<arrow>list[dict]<block_start>"""
Get the top max_stories posts from HackerNews - https://news.ycombinator.com/
"""<line_sep>url="https://hacker-news.firebaseio.com/v0/topstories.json?print=pretty"<line_sep>story_ids=requests.get(url).json()[:max_stories]<line_sep><return>[get_hackernews_story(story_id)<for>story_id story_ids]<block_end><def_stmt>hackernews_top_stories_as_markdown max_stories:int=10<arrow>str<block_start>stories=hackernews_top_stories(max_stories)<line_sep><return>"\n".join("* [{title}]({url})".format(**story)<for>story stories)<block_end><if_stmt>__name__<eq>"__main__"<block_start>print(hackernews_top_stories_as_markdown())<block_end> |
<import_stmt>pytest<import_from_stmt>dbt.tests.util run_dbt get_manifest<line_sep>my_model_sql="""
select 1 as fun
"""<line_sep>@pytest.fixture(scope="class")<def_stmt>models <block_start><return>{"my_model.sql":my_model_sql}<block_end><def_stmt>test_basic project# Tests that a project with a single model works
<block_start>results=run_dbt(["run"])<assert_stmt>len(results)<eq>1<line_sep>manifest=get_manifest(project.project_root)<assert_stmt>"model.test.my_model"<in>manifest.nodes<block_end> |
<import_stmt>torch<import_from_stmt>torch_geometric.nn.reshape Reshape<def_stmt>test_reshape <block_start>x=torch.randn(10 4)<line_sep>op=Reshape(5 2 4)<assert_stmt>op.__repr__()<eq>'Reshape(5, 2, 4)'<assert_stmt>op(x).size()<eq>(5 2 4)<assert_stmt>op(x).view(10 4).tolist()<eq>x.tolist()<block_end> |
"""show_system.py
NXOS parsers for the following show commands:
* 'show system internal sysmgr service name <WORD>'
* 'show system internal l2fwder Mac'
* 'show system internal processes memory'
"""<line_sep># Python
<import_stmt>re<line_sep># Metaparser
<import_from_stmt>genie.metaparser MetaParser<import_from_stmt>genie.metaparser.util.schemaengine Schema Any Optional Or And Default Use<line_sep># ====================================================================
# Schema for 'show system internal sysmgr service name <process>'
# ====================================================================
<class_stmt>ShowSystemInternalSysmgrServiceNameSchema(MetaParser)<block_start>"""Schema for show system internal sysmgr service name <process>"""<line_sep>schema={'instance':{Any():{'tag':{Any():{'process_name':str 'internal_id':int 'uuid':str 'state':str 'plugin_id':str 'state_start_date':str Optional('last_restart_date'):str Optional('pid'):int Optional('previous_pid'):int Optional('sap'):int Optional('restart_count'):int Optional('reboot_state'):str Optional('last_terminate_reason'):str}} }} }<block_end><class_stmt>ShowSystemInternalSysmgrServiceName(ShowSystemInternalSysmgrServiceNameSchema)<block_start>"""Parser for show system internal sysmgr service name <process>"""<line_sep>cli_command='show system internal sysmgr service name {process}'<def_stmt>cli self process output=<none><block_start><if_stmt>process<block_start>cmd=self.cli_command.format(process=process)<block_end><else_stmt><block_start>cmd=""<block_end><if_stmt>output<is><none><block_start>out=self.device.execute(cmd)<block_end><else_stmt><block_start>out=output<block_end>ret_dict={}<for_stmt>line out.splitlines()<block_start>line=line.strip()<line_sep># Service "bfdc" ("bfdc", 3):
# Service "__inst_012__isis" ("isis", 61):
# Service "feature-mgr" ("feature-mgr", 135):
p1=re.compile(r'^Service +\"(?P<inst>[\w\-]+)\" *'<concat>'\(\"(?P<process_name>[\w\-]+)\", *'<concat>'(?P<internal_id>\d+)\):$')<line_sep>m=p1.match(line)<if_stmt>m<block_start><if_stmt>'instance'<not><in>ret_dict<block_start>ret_dict['instance']={}<block_end>inst=m.groupdict()['inst']<if_stmt>inst<not><in>ret_dict['instance']<block_start>ret_dict['instance'][inst]={}<block_end>process_name=m.groupdict()['process_name']<line_sep>internal_id=int(m.groupdict()['internal_id'])<line_sep># initial for each process
pid=sap=restart_count=previous_pid=<none><line_sep>last_restart_date=reboot_state=last_terminate_reason=<none><line_sep><continue><block_end># UUID = 0x2C7, PID = 6547, SAP = 1008
# UUID = 0x42000118, -- Currently not running --
p2=re.compile(r'^UUID *= *(?P<uuid>\w+), *'<concat>'((PID *= *(?P<pid>\d+), *'<concat>'SAP *= *(?P<sap>\d+))'<concat>'|(-- Currently not running --))$')<line_sep>m=p2.match(line)<if_stmt>m<block_start>uuid=m.groupdict()['uuid']<if_stmt>m.groupdict()['pid']<block_start>pid=int(m.groupdict()['pid'])<block_end><else_stmt><block_start>pid=<none><block_end><if_stmt>m.groupdict()['sap']<block_start>sap=int(m.groupdict()['sap'])<block_end><else_stmt><block_start>sap=<none><block_end><continue><block_end># State: SRV_STATE_WAIT_SPAWN_CONDITION (entered at time Tue Mar 26 17:31:06 2013).
# State: SRV_STATE_HAP_FAILED [unstable] (entered at time Thu Oct 26 13:46:32 2017).
p3=re.compile(r'^State: *(?P<state>[\w\s\[\]]+) *'<concat>'\(entered +at +time +'<concat>'(?P<state_start_date>[\w\s\:]+)\).$')<line_sep>m=p3.match(line)<if_stmt>m<block_start>state=m.groupdict()['state'].strip()<line_sep>state_start_date=m.groupdict()['state_start_date']<line_sep><continue><block_end># Restart count: 1
p4=re.compile(r'^Restart +count: +(?P<restart_count>\d+)$')<line_sep>m=p4.match(line)<if_stmt>m<block_start><if_stmt>m.groupdict()['restart_count']<block_start>restart_count=int(m.groupdict()['restart_count'])<block_end><else_stmt><block_start>restart_count=<none><block_end><continue><block_end># Time of last restart: Sat Jul 1 14:49:10 2017.
p5=re.compile(r'^Time +of +last +restart: +'<concat>'(?P<last_restart_date>[\w\s\:]+).$')<line_sep>m=p5.match(line)<if_stmt>m<block_start>last_restart_date=m.groupdict()['last_restart_date']<line_sep><continue><block_end># The service never crashed since the last reboot.
# The service has never been started since the last reboot.
p6=re.compile(r'The service never crashed since the last reboot.')<line_sep>m=p6.match(line)<if_stmt>m<block_start>reboot_state='never_crashed'<line_sep><continue><block_end>p6_1=re.compile(r'The service has never been started since the last reboot.')<line_sep>m=p6_1.match(line)<if_stmt>m<block_start>reboot_state='never_started'<line_sep><continue><block_end># Previous PID: 2176
p7=re.compile(r'^Previous +PID: +(?P<previous_pid>\d+)$')<line_sep>m=p7.match(line)<if_stmt>m<block_start>previous_pid=int(m.groupdict()['previous_pid'])<line_sep><continue><block_end># Reason of last termination: SYSMGR_DEATH_REASON_FAILURE_SIGNAL
p8=re.compile(r'^Reason +of +last +termination: +'<concat>'(?P<last_terminate_reason>\w+)$')<line_sep>m=p8.match(line)<if_stmt>m<block_start>last_terminate_reason=m.groupdict()['last_terminate_reason']<line_sep><continue><block_end># Plugin ID: 0
p9=re.compile(r'^Plugin +ID: +(?P<plugin_id>\d+)$')<line_sep>m=p9.match(line)<if_stmt>m<block_start>plugin_id=m.groupdict()['plugin_id']<line_sep>ret_dict['instance'][inst]['tag'][tag]['plugin_id']=plugin_id<line_sep><continue><block_end># Tag = N/A
# Tag = 100
# Tag = l3vpn
p10=re.compile(r'^Tag *= *(?P<tag>(N\/A)|(\S+))$')<line_sep>m=p10.match(line)<if_stmt>m<block_start>tag=m.groupdict()['tag']<if_stmt>'tag'<not><in>ret_dict['instance'][inst]<block_start>ret_dict['instance'][inst]['tag']={}<block_end><if_stmt>tag<not><in>ret_dict['instance'][inst]['tag']<block_start>ret_dict['instance'][inst]['tag'][tag]={}<block_end><if_stmt>'process_name'<block_start>ret_dict['instance'][inst]['tag'][tag]['process_name']=process_name<block_end><if_stmt>'internal_id'<block_start>ret_dict['instance'][inst]['tag'][tag]['internal_id']=internal_id<block_end><if_stmt>'uuid'<block_start>ret_dict['instance'][inst]['tag'][tag]['uuid']=uuid<block_end><if_stmt>'state'<block_start>ret_dict['instance'][inst]['tag'][tag]['state']=state<block_end><if_stmt>'state_start_date'<block_start>ret_dict['instance'][inst]['tag'][tag]['state_start_date']=state_start_date<block_end><if_stmt>last_restart_date<block_start>ret_dict['instance'][inst]['tag'][tag]['last_restart_date']=last_restart_date<block_end><if_stmt>pid<block_start>ret_dict['instance'][inst]['tag'][tag]['pid']=pid<block_end><if_stmt>previous_pid<block_start>ret_dict['instance'][inst]['tag'][tag]['previous_pid']=previous_pid<block_end><if_stmt>sap<block_start>ret_dict['instance'][inst]['tag'][tag]['sap']=sap<block_end><if_stmt>restart_count<block_start>ret_dict['instance'][inst]['tag'][tag]['restart_count']=restart_count<block_end><if_stmt>reboot_state<block_start>ret_dict['instance'][inst]['tag'][tag]['reboot_state']=reboot_state<block_end><if_stmt>last_terminate_reason<block_start>ret_dict['instance'][inst]['tag'][tag]['last_terminate_reason']=last_terminate_reason<block_end><continue><block_end><block_end><return>ret_dict<block_end><block_end># ====================================================================
# Schema for 'show system internal l2fwder Mac'
# ====================================================================
<class_stmt>ShowSystemInternalL2fwderMacSchema(MetaParser)<block_start>"""Schema for show system internal l2fwder Mac"""<line_sep>schema={'vlans':{Any():{'mac_addresses':{Any():{'mac_type':str 'mac_aging_time':str 'entry':str 'secure':str 'ntfy':str 'ports':str }} }} }<block_end># ====================================================================
# Parser for 'show system internal l2fwder Mac'
# ====================================================================
<class_stmt>ShowSystemInternalL2fwderMac(ShowSystemInternalL2fwderMacSchema)<block_start>"""Parser for show system internal l2fwder Mac"""<line_sep>cli_command='show system internal l2fwder Mac'<def_stmt>cli self output=<none><block_start><if_stmt>output<is><none><block_start>out=self.device.execute(self.cli_command)<block_end><else_stmt><block_start>out=output<block_end>ret_dict={}<for_stmt>line out.splitlines()<block_start>line=line.strip()<line_sep># G 1008 5e01.80ff.0007 static - F F sup-eth1(R)
p1=re.compile(r'^\s*(?P<entry>[A-Z\*\(\+\)]+) +(?P<vlan>[0-9]+) '<concat>'+(?P<mac_address>[0-9a-z\.]+) +(?P<mac_type>[a-z]+) '<concat>'+(?P<age>[0-9\-\:]+) +(?P<secure>[A-Z]+) +(?P<ntfy>[A-Z]+) '<concat>'+(?P<ports>[a-zA-Z0-9\-\(\)\s\.\/]+)$')<line_sep>m=p1.match(line)<if_stmt>m<block_start>vlan=str(m.groupdict()['vlan'])<line_sep>mac_address=str(m.groupdict()['mac_address'])<if_stmt>'vlans'<not><in>ret_dict<block_start>ret_dict['vlans']={}<block_end><if_stmt>vlan<not><in>ret_dict['vlans']<block_start>ret_dict['vlans'][vlan]={}<block_end><if_stmt>'mac_addresses'<not><in>ret_dict['vlans'][vlan]<block_start>ret_dict['vlans'][vlan]['mac_addresses']={}<block_end>ret_dict['vlans'][vlan]['mac_addresses'][mac_address]={}<line_sep>ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['mac_type']=str(m.groupdict()['mac_type'])<line_sep>ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['mac_aging_time']=str(m.groupdict()['age'])<line_sep>ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['entry']=str(m.groupdict()['entry'])<line_sep>ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['secure']=str(m.groupdict()['secure'])<line_sep>ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['ntfy']=str(m.groupdict()['ntfy'])<line_sep>ret_dict['vlans'][vlan]['mac_addresses'][mac_address]['ports']=str(m.groupdict()['ports'])<line_sep><continue><block_end><block_end><return>ret_dict<block_end><block_end><class_stmt>ShowSystemInternalKernelMeminfoSchema(MetaParser)<block_start>"""
Schema for show system internal kernel meminfo
"""<line_sep>schema={'mem':{'memtotal_kb':int 'memfree_kb':int 'memavailable_kb':int } 'buffers_kb':int 'cached_kb':int 'active':{'active_kb':int 'inactive_kb':int 'active(anon)_kb':int 'inactive(anon)_kb':int 'active(file)_kb':int 'inactive(file)_kb':int } 'unevictable_kb':int 'mlocked_kb':int 'swap':{'swapcached_kb':int 'swaptotal_kb':int 'swapfree_kb':int } 'dirty_kb':int 'writeback_kb':int 'anonpages_kb':int 'mapped_kb':int 'shmem_kb':int 'slab_kb':int 'sreclaimable_kb':int 'sunreclaim_kb':int 'kernelstack_kb':int 'pagetables_kb':int 'nfs_unstable_kb':int 'bounce_kb':int 'writebacktmp_kb':int 'commitlimit_kb':int 'committed_as_kb':int 'vmalloc':{'vmalloctotal_kb':int 'vmallocused_kb':int 'vmallocchunk_kb':int } 'hardwarecorrupted_kb':int 'hugepages':{'hugepages_total':int 'hugepages_free':int 'hugepages_rsvd':int 'hugepages_surp':int 'hugepagesize_kb':int } 'directmap4k_kb':int 'directmap2m_kb':int }<block_end><class_stmt>ShowSystemInternalKernelMeminfo(ShowSystemInternalKernelMeminfoSchema)<block_start>"""
Parser for show system internal kernel meminfo
"""<line_sep>cli_command='show system internal system internal kernel meminfo'<def_stmt>cli self output=<none><block_start><if_stmt><not>output<block_start>out=self.device.execute(self.cli_command)<block_end><else_stmt><block_start>out=output<block_end># MemTotal: 5873172 kB
p1=re.compile(r'(?P<mem_type>Mem.+):\s+(?P<amount>\d+)\skB$')<line_sep># Active(file): 236740 kB
p2=re.compile(r'(?i)(?P<active_state>[in]*active.*):\s+(?P<amount>\d+)\skB$')<line_sep># SwapTotal: 0 kB
p3=re.compile(r'(?P<swap_type>Swap.+):\s+(?P<amount>\d+)\skB$')<line_sep># VmallocChunk: 34359477316 kB
p4=re.compile(r'(?P<vmalloc_type>Vmalloc.+):\s+(?P<amount>\d+)\skB$')<line_sep># HugePages_Surp: 0
p5=re.compile(r'(?P<hugepages_type>Huge.+):\s+(?P<amount>\d+)$')<line_sep># Hugepagesize: 2048 kB
p6=re.compile(r'(?P<hugepages_type>Huge.+):\s+(?P<amount>\d+)\s+kB$')<line_sep># Buffers: 38212 kB
p7=re.compile(r'(?P<key>.+):\s+(?P<amount>\d+)(\skB)?$')<line_sep>ret_dict={}<for_stmt>line out.splitlines()<block_start>line=line.strip()<line_sep># MemTotal: 5873172 kB
m=p1.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>mem_dict=ret_dict.setdefault('mem' {})<line_sep>key=group['mem_type'].lower()+'_kb'<line_sep>mem_dict[key]=int(group['amount'])<line_sep><continue><block_end># Active(file): 236740 kB
m=p2.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>active_dict=ret_dict.setdefault('active' {})<line_sep>key=group['active_state'].lower()+'_kb'<line_sep>active_dict[key]=int(group['amount'])<line_sep><continue><block_end># SwapTotal: 0 kB
m=p3.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>swap_dict=ret_dict.setdefault('swap' {})<line_sep>key=group['swap_type'].lower()+'_kb'<line_sep>swap_dict[key]=int(group['amount'])<line_sep><continue><block_end># VmallocChunk: 34359477316 kB
m=p4.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>vmalloc_dict=ret_dict.setdefault('vmalloc' {})<line_sep>key=group['vmalloc_type'].lower()+'_kb'<line_sep>vmalloc_dict[key]=int(group['amount'])<line_sep><continue><block_end># HugePages_Surp: 0
m=p5.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>hugepages_dict=ret_dict.setdefault('hugepages' {})<line_sep>key=group['hugepages_type'].lower()<line_sep>hugepages_dict[key]=int(group['amount'])<line_sep><continue><block_end># Hugepagesize: 2048 kB
m=p6.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>hugepages_dict=ret_dict.setdefault('hugepages' {})<line_sep>key=group['hugepages_type'].lower()+'_kb'<line_sep>hugepages_dict[key]=int(group['amount'])<line_sep><continue><block_end># Buffers: 38212 kB
m=p7.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>key=group['key'].lower()+'_kb'<line_sep>ret_dict[key]=int(group['amount'])<line_sep><continue><block_end><block_end><return>ret_dict<block_end><block_end><class_stmt>ShowSystemResourcesSchema(MetaParser)<block_start>"""
Schema for show system resources
"""<line_sep>schema={'load_avg':{'load_avg_1min':float 'load_avg_5min':float 'load_avg_15min':float } 'processes':{'processes_total':int 'processes_running':int } 'cpu_state':{'cpu_state_user':float 'cpu_state_kernel':float 'cpu_state_idle':float 'cpus':{Any():{'cpu_state_user':float 'cpu_state_kernel':float 'cpu_state_idle':float }}} 'memory_usage':{'memory_usage_total_kb':int 'memory_usage_used_kb':int 'memory_usage_free_kb':int } 'kernel':{'kernel_vmalloc_total_kb':int 'kernel_vmalloc_free_kb':int 'kernel_buffers_kb':int 'kernel_cached_kb':int } 'current_memory_status':str }<block_end><class_stmt>ShowSystemResources(ShowSystemResourcesSchema)<block_start>"""
Parser for show system resources
"""<line_sep>cli_command='show system resources'<def_stmt>cli self output=<none># execute command to get output
<block_start><if_stmt>output<is><none><block_start>out=self.device.execute(self.cli_command)<block_end><else_stmt><block_start>out=output<block_end># Load average: 1 minute: 0.34 5 minutes: 0.40 15 minutes: 0.66
p1=re.compile(r'^Load average\s*:\s+1 minute:\s+(?P<minute_one>[\d\.]+)\s+5 minutes:\s+'<concat>r'(?P<minute_five>\d+\.\d+)\s+15 minutes:\s+(?P<minute_fifteen>\d+\.\d+)$')<line_sep># Processes : 901 total, 2 running
p2=re.compile(r'^Processes\s*:\s+(?P<processes_total>\d+)\s+total,\s+'<concat>r'(?P<processes_running>\d+)\s+running$')<line_sep># CPU states : 2.11% user, 11.64% kernel, 86.24% idle
# CPU0 states : 3.33% user, 12.22% kernel, 84.44% idle
p3=re.compile(r'^CPU(?P<cpu_num>\d*)\s+states\s+:\s+(?P<user>\d+\.\d+)%\s+user,\s+'<concat>r'(?P<kernel>[\d+\.]+)%\s+kernel,\s+(?P<idle>\d+\.\d+)%\s+idle$')<line_sep># Memory usage: 5873172K total, 4189652K used, 1683520K free
p4=re.compile(r'^Memory usage\s*:\s+(?P<total>\d+)K total,\s+'<concat>r'(?P<used>\d+)K used,\s+(?P<free>\d+)K free$')<line_sep># Kernel vmalloc: 0K total, 0K free
p5=re.compile(r'^Kernel vmalloc\s*:\s+(?P<total>\d+)'<concat>r'K total,\s+(?P<free>\d+)K free$')<line_sep># Kernel buffers: 144876K Used
p6=re.compile(r'^Kernel buffers\s*:\s+(?P<buffers>\d+)K Used$')<line_sep># Kernel cached : 2296916K Used
p7=re.compile(r'^Kernel cached\s*:\s+(?P<cached>\d+)K Used$')<line_sep># Current memory status: OK
p8=re.compile(r'^Current memory status\s*:\s+(?P<status>\w+)$')<line_sep>ret_dict={}<for_stmt>line out.splitlines()<block_start><if_stmt>line<block_start>line=line.strip()<block_end><else_stmt><block_start><continue><block_end># Load average: 1 minute: 0.34 5 minutes: 0.40 15 minutes: 0.66
m=p1.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>load_avg_dict=ret_dict.setdefault('load_avg' {})<line_sep>load_avg_dict["load_avg_1min"]=float(group['minute_one'])<line_sep>load_avg_dict["load_avg_5min"]=float(group['minute_five'])<line_sep>load_avg_dict["load_avg_15min"]=float(group['minute_five'])<line_sep><continue><block_end># Processes : 901 total, 2 running
m=p2.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>processes_dict=ret_dict.setdefault('processes' {})<line_sep>processes_dict["processes_total"]=int(group['processes_total'])<line_sep>processes_dict["processes_running"]=int(group['processes_running'])<line_sep><continue><block_end># CPU states : 2.11% user, 11.64% kernel, 86.24% idle
# CPU0 states : 3.33% user, 12.22% kernel, 84.44% idle
m=p3.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>cpu_state_dict=ret_dict.setdefault('cpu_state' {})<if_stmt>group['cpu_num']<block_start>cpu_id_dict=cpu_state_dict.setdefault('cpus' {}).setdefault(int(group['cpu_num']) {})<line_sep>cpu_id_dict['cpu_state_user']=float(group['user'])<line_sep>cpu_id_dict['cpu_state_kernel']=float(group['kernel'])<line_sep>cpu_id_dict['cpu_state_idle']=float(group['idle'])<line_sep><continue><block_end>cpu_state_dict['cpu_state_user']=float(group['user'])<line_sep>cpu_state_dict['cpu_state_kernel']=float(group['kernel'])<line_sep>cpu_state_dict['cpu_state_idle']=float(group['idle'])<line_sep><continue><block_end># Memory usage: 5873172K total, 4189652K used, 1683520K free
m=p4.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>memory_usage_dict=ret_dict.setdefault('memory_usage' {})<line_sep>memory_usage_dict['memory_usage_total_kb']=int(group['total'])<line_sep>memory_usage_dict['memory_usage_used_kb']=int(group['used'])<line_sep>memory_usage_dict['memory_usage_free_kb']=int(group['free'])<line_sep><continue><block_end># Kernel vmalloc: 0K total, 0K free
m=p5.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>kernel_dict=ret_dict.setdefault('kernel' {})<line_sep>kernel_dict['kernel_vmalloc_total_kb']=int(group['total'])<line_sep>kernel_dict['kernel_vmalloc_free_kb']=int(group['free'])<line_sep><continue><block_end># Kernel buffers: 144876K Used
m=p6.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>kernel_dict=ret_dict.setdefault('kernel' {})<line_sep>kernel_dict['kernel_buffers_kb']=int(group['buffers'])<line_sep><continue><block_end># Kernel cached : 2296916K Used
m=p7.match(line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>kernel_dict=ret_dict.setdefault('kernel' {})<line_sep>kernel_dict['kernel_cached_kb']=int(group['cached'])<line_sep><continue><block_end># Current memory status: OK
m=p8.match(line)<if_stmt>m<block_start>ret_dict["current_memory_status"]=m.groupdict()['status']<line_sep><continue><block_end><block_end><return>ret_dict<block_end><block_end><class_stmt>ShowSystemInternalProcessesMemorySchema(MetaParser)<block_start>"""
Schema for show system internal processes memory
"""<line_sep>schema={'pid':{Any():{'stat':str 'time':str 'majflt':int 'trs':int 'rss':int 'vsz':int 'mem_percent':float 'command':str 'tty':str}}}<block_end><class_stmt>ShowSystemInternalProcessesMemory(ShowSystemInternalProcessesMemorySchema)<block_start>"""
Parser for show system internal processes memory
"""<line_sep>cli_command="show system internal processes memory"<def_stmt>cli self output=<none><block_start><if_stmt><not>output<block_start>out=self.device.execute(self.cli_command)<block_end><else_stmt><block_start>out=output<block_end># 7482 ? Ssl 00:05:05 158 0 219576 1053628 3.7 /opt/mtx/bin/grpc -i 2626 -I
# 27344 pts/0 Sl+ 00:00:20 0 63 117180 709928 1.9 /isan/bin/vsh.bin
p1=re.compile(r'^(?P<pid>\d+)\s+(?P<tty>\S+)\s+(?P<stat>\S+)\s+(?P<time>[\d:]+)\s+(?P<majflt>\d+)\s+(?P<trs>\d+)\s+'<concat>r'(?P<rss>\d+)\s+(?P<vsz>\d+)\s+(?P<mem_percent>[\d.]+)\s+(?P<command>.+$)')<line_sep>ret_dict={}<for_stmt>line out.splitlines()<block_start>stripped_line=line.strip()<line_sep># 27344 pts/0 Sl+ 00:00:20 0 63 117180 709928 1.9 /isan/bin/vsh.bin
# 7482 ? Ssl 00:05:05 158 0 219576 1053628 3.7 /opt/mtx/bin/grpc -i 2626 -I
m=p1.match(stripped_line)<if_stmt>m<block_start>group=m.groupdict()<line_sep>pid=int(group['pid'])<line_sep>pid_dict=ret_dict.setdefault('pid' {}).setdefault(pid {})<line_sep>pid_dict['stat']=group['stat']<line_sep>pid_dict['majflt']=int(group['majflt'])<line_sep>pid_dict['trs']=int(group['trs'])<line_sep>pid_dict['rss']=int(group['rss'])<line_sep>pid_dict['vsz']=int(group['vsz'])<line_sep>pid_dict['mem_percent']=float(group['mem_percent'])<line_sep>pid_dict['command']=group['command']<line_sep>pid_dict['tty']=group['tty']<line_sep>pid_dict['time']=group['time']<block_end><block_end><return>ret_dict<block_end><block_end> |
# Copyright (C) 2018-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
<import_from_stmt>extensions.ops.Cast Cast<import_from_stmt>mo.front.extractor FrontExtractorOp<import_from_stmt>mo.front.onnx.extractors.utils get_onnx_datatype_as_numpy onnx_attr<class_stmt>CastFrontExtractor(FrontExtractorOp)<block_start>op='Cast'<line_sep>enabled=<true><line_sep>@classmethod<def_stmt>extract cls node<block_start>to=onnx_attr(node 'to' 'i' default=<none>)<line_sep>Cast.update_node_stat(node {'dst_type':get_onnx_datatype_as_numpy(to)})<line_sep><return>cls.enabled<block_end><block_end> |
"""show_prefix_list.py
IOS parsers for the following show commands:
* show ip prefix-list detail
* show ipv6 prefix-list detail
"""<import_from_stmt>genie.libs.parser.iosxe.show_prefix_list ShowIpPrefixListDetail<as>ShowIpPrefixListDetail_iosxe ShowIpv6PrefixListDetail<as>ShowIpv6PrefixListDetail_iosxe<class_stmt>ShowIpPrefixListDetail(ShowIpPrefixListDetail_iosxe)<block_start>"""Parser for:
show ip prefix-list detail
show ipv6 prefix-list detail"""<line_sep><pass><block_end><class_stmt>ShowIpv6PrefixListDetail(ShowIpv6PrefixListDetail_iosxe)<block_start>"""Parser for show ipv6 prefix-list detail"""<line_sep><pass><block_end> |
# This example is based on the following examples
# https://www.tensorflow.org/tutorials/quickstart/beginner
<import_stmt>tensorflow<as>tf<import_from_stmt>labml experiment<import_from_stmt>labml.utils.keras LabMLKerasCallback<def_stmt>main <block_start>experiment.create(name='MNIST Keras')<line_sep>(x_train y_train),(x_test y_test)=tf.keras.datasets.mnist.load_data()<line_sep>x_train,x_test=x_train/255.0 x_test/255.0<line_sep>model=tf.keras.models.Sequential([tf.keras.layers.Flatten(input_shape=(28 28)) tf.keras.layers.Dense(128 activation='relu') tf.keras.layers.Dropout(0.2) tf.keras.layers.Dense(10)])<line_sep>loss_fn=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=<true>)<line_sep>model.compile(optimizer='adam' loss=loss_fn metrics=['accuracy'])<with_stmt>experiment.start()<block_start>model.fit(x_train y_train epochs=5 validation_data=(x_test y_test) callbacks=[LabMLKerasCallback()] verbose=<none>)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# --------------------------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# --------------------------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-statements
<import_from_stmt>azure.cli.core.commands CliCommandType<import_from_stmt>._client_factory topics_factory domains_factory domain_topics_factory system_topics_factory system_topic_event_subscriptions_factory event_subscriptions_factory topic_types_factory extension_topics_factory partner_registrations_factory partner_namespaces_factory event_channels_factory partner_topics_factory partner_topic_event_subscriptions_factory <def_stmt>load_command_table self _<block_start>topics_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#TopicsOperations.{}' client_factory=topics_factory client_arg_name='self')<line_sep>extension_topics_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#ExtensionTopicsOperations.{}' client_factory=extension_topics_factory client_arg_name='self')<line_sep>domains_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#DomainsOperations.{}' client_factory=domains_factory client_arg_name='self')<line_sep>domain_topics_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#DomainTopicsOperations.{}' client_factory=domain_topics_factory client_arg_name='self')<line_sep>system_topics_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#SystemTopicsOperations.{}' client_factory=system_topics_factory client_arg_name='self')<line_sep>system_topic_event_subscriptions_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#SystemTopicEventSubscriptionsOperations.{}' client_factory=system_topic_event_subscriptions_factory client_arg_name='self')<line_sep>partner_registrations_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#PartnerRegistrationsOperations.{}' client_factory=partner_registrations_factory client_arg_name='self')<line_sep>partner_namespaces_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#PartnerNamespacesOperations.{}' client_factory=partner_namespaces_factory client_arg_name='self')<line_sep>event_channels_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#EventChannelsOperations.{}' client_factory=event_channels_factory client_arg_name='self')<line_sep>partner_topics_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#PartnerTopicsOperations.{}' client_factory=partner_topics_factory client_arg_name='self')<line_sep>partner_topic_event_subscriptions_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#PartnerTopicEventSubscriptionsOperations.{}' client_factory=partner_topic_event_subscriptions_factory client_arg_name='self')<line_sep>topic_type_mgmt_util=CliCommandType(operations_tmpl='azure.mgmt.eventgrid.operations#TopicTypesOperations.{}' client_factory=topic_types_factory client_arg_name='self')<with_stmt>self.command_group('eventgrid topic' topics_mgmt_util client_factory=topics_factory)<as>g<block_start>g.show_command('show' 'get')<line_sep>g.command('key list' 'list_shared_access_keys')<line_sep>g.command('delete' 'begin_delete')<line_sep>g.custom_command('key regenerate' 'cli_topic_regenerate_key')<line_sep>g.custom_command('list' 'cli_topic_list')<line_sep>g.custom_command('create' 'cli_topic_create_or_update')<line_sep>g.custom_command('update' 'cli_topic_update')<block_end><with_stmt>self.command_group('eventgrid extension-topic' extension_topics_mgmt_util client_factory=extension_topics_factory)<as>g<block_start>g.show_command('show' 'get')<block_end><with_stmt>self.command_group('eventgrid domain topic' domain_topics_mgmt_util client_factory=domain_topics_factory)<as>g<block_start>g.show_command('show' 'get')<line_sep>g.custom_command('list' 'cli_domain_topic_list')<line_sep>g.custom_command('delete' 'cli_domain_topic_delete')<line_sep>g.custom_command('create' 'cli_domain_topic_create_or_update')<block_end><with_stmt>self.command_group('eventgrid domain' domains_mgmt_util client_factory=domains_factory)<as>g<block_start>g.show_command('show' 'get')<line_sep>g.command('key list' 'list_shared_access_keys')<line_sep>g.custom_command('key regenerate' 'cli_domain_regenerate_key')<line_sep>g.custom_command('list' 'cli_domain_list')<line_sep>g.custom_command('create' 'cli_domain_create_or_update')<line_sep>g.command('delete' 'begin_delete')<line_sep>g.custom_command('update' 'cli_domain_update')<block_end><with_stmt>self.command_group('eventgrid system-topic' system_topics_mgmt_util client_factory=system_topics_factory)<as>g<block_start>g.show_command('show' 'get')<line_sep>g.command('delete' 'begin_delete' confirmation=<true>)<line_sep>g.custom_command('list' 'cli_system_topic_list')<line_sep>g.custom_command('create' 'cli_system_topic_create_or_update')<line_sep>g.custom_command('update' 'cli_system_topic_update')<block_end><with_stmt>self.command_group('eventgrid system-topic event-subscription' system_topic_event_subscriptions_mgmt_util client_factory=system_topic_event_subscriptions_factory)<as>g<block_start>g.custom_show_command('show' 'cli_system_topic_event_subscription_get')<line_sep>g.command('delete' 'begin_delete' confirmation=<true>)<line_sep>g.custom_command('list' 'cli_system_topic_event_subscription_list')<line_sep>g.custom_command('create' 'cli_system_topic_event_subscription_create_or_update')<line_sep>g.custom_command('update' 'cli_system_topic_event_subscription_update')<block_end><with_stmt>self.command_group('eventgrid partner registration' partner_registrations_mgmt_util client_factory=partner_registrations_factory is_preview=<true>)<as>g<block_start>g.show_command('show' 'get')<line_sep>g.command('delete' 'delete' confirmation=<true>)<line_sep>g.custom_command('list' 'cli_partner_registration_list')<line_sep>g.custom_command('create' 'cli_partner_registration_create_or_update')<line_sep># g.custom_command('update', 'cli_partner_registration_update')
<block_end><with_stmt>self.command_group('eventgrid partner namespace' partner_namespaces_mgmt_util client_factory=partner_namespaces_factory is_preview=<true>)<as>g<block_start>g.show_command('show' 'get')<line_sep>g.command('delete' 'begin_delete' confirmation=<true>)<line_sep>g.custom_command('list' 'cli_partner_namespace_list')<line_sep>g.custom_command('create' 'cli_partner_namespace_create_or_update')<line_sep>g.command('key list' 'list_shared_access_keys')<line_sep>g.custom_command('key regenerate' 'cli_partner_namespace_regenerate_key')<line_sep># g.custom_command('update', 'cli_partner_namespace_update')
<block_end><with_stmt>self.command_group('eventgrid partner namespace event-channel' event_channels_mgmt_util client_factory=event_channels_factory is_preview=<true>)<as>g<block_start>g.show_command('show' 'get')<line_sep>g.command('delete' 'begin_delete' confirmation=<true>)<line_sep>g.custom_command('list' 'cli_event_channel_list')<line_sep># g.custom_command('update', 'cli_event_channel_update')
g.custom_command('create' 'cli_event_channel_create_or_update')<block_end><with_stmt>self.command_group('eventgrid partner topic' partner_topics_mgmt_util client_factory=partner_topics_factory is_preview=<true>)<as>g<block_start>g.show_command('show' 'get')<line_sep>g.command('delete' 'begin_delete' confirmation=<true>)<line_sep>g.command('activate' 'activate')<line_sep>g.command('deactivate' 'deactivate')<line_sep>g.custom_command('list' 'cli_partner_topic_list')<line_sep># g.custom_command('create', 'cli_partner_topic_create_or_update')
# g.custom_command('update', 'cli_partner_topic_update')
<block_end><with_stmt>self.command_group('eventgrid partner topic event-subscription' partner_topic_event_subscriptions_mgmt_util client_factory=partner_topic_event_subscriptions_factory is_preview=<true>)<as>g<block_start>g.custom_show_command('show' 'cli_partner_topic_event_subscription_get')<line_sep>g.command('delete' 'begin_delete' confirmation=<true>)<line_sep>g.custom_command('list' 'cli_partner_topic_event_subscription_list')<line_sep>g.custom_command('create' 'cli_partner_topic_event_subscription_create_or_update')<line_sep>g.custom_command('update' 'cli_partner_topic_event_subscription_update')<block_end>custom_tmpl='azure.cli.command_modules.eventgrid.custom#{}'<line_sep>eventgrid_custom=CliCommandType(operations_tmpl=custom_tmpl)<with_stmt>self.command_group('eventgrid event-subscription' client_factory=event_subscriptions_factory)<as>g<block_start>g.custom_command('create' 'cli_eventgrid_event_subscription_create')<line_sep>g.custom_show_command('show' 'cli_eventgrid_event_subscription_get')<line_sep>g.custom_command('delete' 'cli_eventgrid_event_subscription_delete')<line_sep>g.custom_command('list' 'cli_event_subscription_list')<line_sep>g.generic_update_command('update' getter_type=eventgrid_custom setter_type=eventgrid_custom getter_name='event_subscription_getter' setter_name='event_subscription_setter' custom_func_name='update_event_subscription')<block_end><with_stmt>self.command_group('eventgrid topic-type' topic_type_mgmt_util)<as>g<block_start>g.command('list' 'list')<line_sep>g.show_command('show' 'get')<line_sep>g.command('list-event-types' 'list_event_types')<block_end><block_end> |
<import_stmt>httpx<import_stmt>pytest<import_stmt>rubrix<import_stmt>rubrix<as>rb<import_from_stmt>rubrix.metrics.token_classification Annotations entity_capitalness entity_consistency entity_density entity_labels f1 mention_length tokens_length token_length token_frequency token_capitalness <import_from_stmt>tests.server.test_helpers client<def_stmt>mocking_client monkeypatch<block_start>monkeypatch.setattr(httpx "post" client.post)<line_sep>monkeypatch.setattr(httpx "get" client.get)<line_sep>monkeypatch.setattr(httpx "delete" client.delete)<line_sep>monkeypatch.setattr(httpx "put" client.put)<line_sep>monkeypatch.setattr(httpx "stream" client.stream)<block_end><def_stmt>log_some_data dataset:str<block_start>rubrix.delete(dataset)<line_sep>text="My first rubrix example"<line_sep>tokens=text.split(" ")<line_sep>rb.log([rb.TokenClassificationRecord(id=1 text=text tokens=tokens prediction=[("CARDINAL" 3 8)] annotation=[("CARDINAL" 3 8)] ) rb.TokenClassificationRecord(id=2 text=text tokens=tokens prediction=[("CARDINAL" 3 8)] annotation=[("CARDINAL" 3 8)] ) rb.TokenClassificationRecord(id=3 text=text tokens=tokens prediction=[("NUMBER" 3 8)] annotation=[("NUMBER" 3 8)] ) rb.TokenClassificationRecord(id=4 text=text tokens=tokens prediction=[("PERSON" 3 8)] annotation=[("PERSON" 3 8)] ) ] name=dataset )<block_end><def_stmt>test_search_by_nested_metric monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_search_by_nested_metric"<line_sep>rb.delete(dataset)<line_sep>log_some_data(dataset)<line_sep>df=rb.load(dataset query="metrics.predicted.mentions.capitalness: LOWER")<assert_stmt>len(df)<g>0<block_end><def_stmt>test_tokens_length monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_tokens_length"<line_sep>log_some_data(dataset)<line_sep>results=tokens_length(dataset)<assert_stmt>results<assert_stmt>results.data<eq>{"4.0":4}<line_sep>results.visualize()<block_end><def_stmt>test_token_length monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_token_length"<line_sep>log_some_data(dataset)<line_sep>results=token_length(dataset)<assert_stmt>results<assert_stmt>results.data<eq>{"2.0":4 "3.0":0 "4.0":0 "5.0":4 "6.0":4 "7.0":4}<line_sep>results.visualize()<block_end><def_stmt>test_token_frequency monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_token_frequency"<line_sep>log_some_data(dataset)<line_sep>results=token_frequency(dataset)<assert_stmt>results<assert_stmt>results.data<eq>{"My":4 "example":4 "first":4 "rubrix":4}<line_sep>results.visualize()<block_end><def_stmt>test_token_capitalness monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_token_capitalness"<line_sep>log_some_data(dataset)<line_sep>results=token_capitalness(dataset)<assert_stmt>results<assert_stmt>results.data<eq>{"LOWER":12 "FIRST":4}<line_sep>results.visualize()<block_end><def_stmt>test_mentions_length monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_mentions_length"<line_sep>log_some_data(dataset)<line_sep>results=mention_length(dataset)<assert_stmt>results<assert_stmt>results.data<eq>{"1.0":4}<line_sep>results.visualize()<line_sep>results=mention_length(dataset level="char")<assert_stmt>results<assert_stmt>results.data<eq>{"5.0":4}<line_sep>results.visualize()<line_sep>results=mention_length(dataset compute_for=Annotations)<assert_stmt>results<assert_stmt>results.data<eq>{"1.0":4}<line_sep>results.visualize()<line_sep>results=mention_length(dataset compute_for=Annotations level="char")<assert_stmt>results<assert_stmt>results.data<eq>{"5.0":4}<line_sep>results.visualize()<block_end><def_stmt>test_compute_for_as_string monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_compute_for_as_string"<line_sep>log_some_data(dataset)<line_sep>results=entity_density(dataset compute_for="Predictions")<assert_stmt>results<assert_stmt>results.data<eq>{"0.25":4}<line_sep>results.visualize()<with_stmt>pytest.raises(ValueError match="not-found is not a valid ComputeFor, please select one of \['annotations', 'predictions'\]" )<block_start>entity_density(dataset compute_for="not-found")<block_end><block_end><def_stmt>test_entity_density monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_entity_density"<line_sep>log_some_data(dataset)<line_sep>results=entity_density(dataset)<assert_stmt>results<assert_stmt>results.data<eq>{"0.25":4}<line_sep>results.visualize()<line_sep>results=entity_density(dataset compute_for=Annotations)<assert_stmt>results<assert_stmt>results.data<eq>{"0.25":4}<line_sep>results.visualize()<block_end><def_stmt>test_entity_labels monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_entity_labels"<line_sep>log_some_data(dataset)<line_sep>results=entity_labels(dataset)<assert_stmt>results<assert_stmt>results.data<eq>{"CARDINAL":2 "NUMBER":1 "PERSON":1}<line_sep>results.visualize()<line_sep>results=entity_labels(dataset compute_for=Annotations)<assert_stmt>results<assert_stmt>results.data<eq>{"CARDINAL":2 "NUMBER":1 "PERSON":1}<line_sep>results.visualize()<block_end><def_stmt>test_entity_capitalness monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_entity_capitalness"<line_sep>rubrix.delete(dataset)<line_sep>log_some_data(dataset)<line_sep>results=entity_capitalness(dataset)<assert_stmt>results<assert_stmt>results.data<eq>{"LOWER":4}<line_sep>results.visualize()<line_sep>results=entity_capitalness(dataset compute_for=Annotations)<assert_stmt>results<assert_stmt>results.data<eq>{"LOWER":4}<line_sep>results.visualize()<block_end><def_stmt>test_entity_consistency monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_entity_consistency"<line_sep>rubrix.delete(dataset)<line_sep>log_some_data(dataset)<line_sep>results=entity_consistency(dataset threshold=2)<assert_stmt>results<assert_stmt>results.data<eq>{"mentions":[{"mention":"first" "entities":[{"count":2 "label":"CARDINAL"} {"count":1 "label":"NUMBER"} {"count":1 "label":"PERSON"} ] }]}<line_sep>results.visualize()<line_sep>results=entity_consistency(dataset compute_for=Annotations threshold=2)<assert_stmt>results<assert_stmt>results.data<eq>{"mentions":[{"mention":"first" "entities":[{"count":2 "label":"CARDINAL"} {"count":1 "label":"NUMBER"} {"count":1 "label":"PERSON"} ] }]}<line_sep>results.visualize()<block_end>@pytest.mark.parametrize(("metric" "expected_results") [(entity_consistency {"mentions":[]}) (mention_length {}) (entity_density {}) (entity_capitalness {}) (entity_labels {}) ] )<def_stmt>test_metrics_without_data metric expected_results monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_metrics_without_data"<line_sep>rb.delete(dataset)<line_sep>text="M"<line_sep>tokens=text.split(" ")<line_sep>rb.log(rb.TokenClassificationRecord(id=1 text=text tokens=tokens ) name=dataset )<line_sep>results=metric(dataset)<assert_stmt>results<assert_stmt>results.data<eq>expected_results<line_sep>results.visualize()<block_end><def_stmt>test_metrics_for_text_classification monkeypatch<block_start>mocking_client(monkeypatch)<line_sep>dataset="test_metrics_for_token_classification"<line_sep>text="test the f1 metric of the token classification task"<line_sep>rb.log(rb.TokenClassificationRecord(id=1 text=text tokens=text.split() prediction=[("a" 0 4) ("b" 5 8) ("b" 9 11)] annotation=[("a" 0 4) ("b" 5 8) ("a" 9 11)] ) name=dataset )<line_sep>results=f1(dataset)<assert_stmt>results<assert_stmt>results.data<eq>{"f1_macro":pytest.approx(0.75) "f1_micro":pytest.approx(0.6666666666666666) "a_f1":pytest.approx(0.6666666666666666) "a_precision":pytest.approx(1.0) "a_recall":pytest.approx(0.5) "b_f1":pytest.approx(0.6666666666666666) "b_precision":pytest.approx(0.5) "b_recall":pytest.approx(1.0) "precision_macro":pytest.approx(0.75) "precision_micro":pytest.approx(0.6666666666666666) "recall_macro":pytest.approx(0.75) "recall_micro":pytest.approx(0.6666666666666666) }<line_sep>results.visualize()<block_end> |
"""Test playing selectors."""<import_from_stmt>.. util<class_stmt>TestPlaying(util.TestCase)<block_start>"""Test playing selectors."""<line_sep>MARKUP="""
<!DOCTYPE html>
<html>
<body>
<video id="vid" width="320" height="240" controls>
<source src="movie.mp4" type="video/mp4">
<source src="movie.ogg" type="video/ogg">
Your browser does not support the video tag.
</video>
</body>
</html>
"""<def_stmt>test_playing self<block_start>"""Test playing (matches nothing)."""<line_sep># Not actually sure how this is used, but it won't match anything anyways
self.assert_selector(self.MARKUP "video:playing" [] flags=util.HTML)<block_end><def_stmt>test_not_playing self<block_start>"""Test not playing."""<line_sep>self.assert_selector(self.MARKUP "video:not(:playing)" ["vid"] flags=util.HTML)<block_end><block_end> |
"""
Making LimeTabularExplainer Accessible
"""<import_from_stmt>lime.lime_tabular LimeTabularExplainer<line_sep> |
# -*- coding: utf-8 -*-
# !/usr/bin/env python3 -u
# copyright: sktime developers, BSD-3-Clause License (see LICENSE file)
"""Croston's Forecasting Method."""<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>sktime.forecasting.base BaseForecaster<import_from_stmt>sktime.forecasting.base._base DEFAULT_ALPHA<class_stmt>Croston(BaseForecaster)<block_start>r"""Croston's method for forecasting intermittent time series.
Implements the method proposed by Croston in [1]_ and described in [2]_.
Croston's method is a modification of (vanilla) exponential smoothing to handle
intermittent time series. A time series is considered intermittent if many
of its values are zero and the gaps between non-zero entries are not periodic.
Croston's method will predict a constant value for all future times, so
Croston's method essentially provides another notion for the average value
of a time series.
The method is (equivalent to) the following:
- Let :math:`v_0,\ldots,v_n` be the non-zero values of the time series
- Let :math:`v` be the exponentially smoothed average of :math:`v_0,\ldots,v_n`
- Let :math:`z_0,\ldots,z_n` be the number of consecutive zeros plus 1 between
the :math:`v_i` in the original time series.
- Let :math:`z` be the exponentially smoothed average of :math:`z_0,\ldots,z_n`
- Then the forecast is :math:`\frac{v}{z}`
The intuition is that :math:`v` is a weighted average of the non-zero time
series values and :math:`\frac{1}{z}` estimates the probability of getting a
non-zero value.
Example to illustrate the :math:`v` and :math:`z` notation.
- If the original time series is :math:`0,0,2,7,0,0,0,-5` then:
- The :math:`v`'s are :math:`2,7,-5`
- The :math:`z`'s are :math:`3,1,4`
Parameters
----------
smoothing : float, default = 0.1
Smoothing parameter in exponential smoothing
Examples
--------
>>> from sktime.forecasting.croston import Croston
>>> from sktime.datasets import load_PBS_dataset
>>> y = load_PBS_dataset()
>>> forecaster = Croston(smoothing=0.1)
>>> forecaster.fit(y)
Croston(...)
>>> y_pred = forecaster.predict(fh=[1,2,3])
See Also
--------
ExponentialSmoothing
References
----------
.. [1] <NAME>. Forecasting and stock control for intermittent demands.
Operational Research Quarterly (1970-1977), 23(3):pp. 289–303, 1972.
.. [2] <NAME>. Forecasting Intermittent Demand with the Croston Model.
https://towardsdatascience.com/croston-forecast-model-for-intermittent-demand-360287a17f5f
"""<line_sep>_tags={"requires-fh-in-fit":<false> # is forecasting horizon already required in fit?
}<def_stmt>__init__ self smoothing=0.1# hyperparameter
<block_start>self.smoothing=smoothing<line_sep>self._f=<none><line_sep>super(Croston self).__init__()<block_end><def_stmt>_fit self y X=<none> fh=<none><block_start>"""Fit to training data.
Parameters
----------
y : pd.Series
Target time series to which to fit the forecaster.
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored.
Returns
-------
self : returns an instance of self.
"""<line_sep>n_timepoints=len(y)# Historical period: i.e the input array's length
smoothing=self.smoothing<line_sep>y=y.to_numpy()# Transform the input into a numpy array
# Fit the parameters: level(q), periodicity(a) and forecast(f)
q,a,f=np.full((3 n_timepoints+1) np.nan)<line_sep>p=1# periods since last demand observation
# Initialization:
first_occurrence=np.argmax(y[:n_timepoints]<g>0)<line_sep>q[0]=y[first_occurrence]<line_sep>a[0]=1+first_occurrence<line_sep>f[0]=q[0]/a[0]<line_sep># Create t+1 forecasts:
<for_stmt>t range(0 n_timepoints)<block_start><if_stmt>y[t]<g>0<block_start>q[t+1]=smoothing<times>y[t]+(1-smoothing)<times>q[t]<line_sep>a[t+1]=smoothing<times>p+(1-smoothing)<times>a[t]<line_sep>f[t+1]=q[t+1]/a[t+1]<line_sep>p=1<block_end><else_stmt><block_start>q[t+1]=q[t]<line_sep>a[t+1]=a[t]<line_sep>f[t+1]=f[t]<line_sep>p<augadd>1<block_end><block_end>self._f=f<line_sep><return>self<block_end><def_stmt>_predict self fh=<none> X=<none> return_pred_int=<false> alpha=DEFAULT_ALPHA <block_start>"""Predict forecast.
Parameters
----------
fh : int, list or np.array, optional (default=None)
The forecasters horizon with the steps ahead to to predict.
X : pd.DataFrame, optional (default=None)
Exogenous variables are ignored.
Returns
-------
forecast : pd.series
Predicted forecasts.
"""<line_sep>len_fh=len(self.fh)<line_sep>f=self._f<line_sep># Predicting future forecasts:to_numpy()
y_pred=np.full(len_fh f[-1])<line_sep>index=self.fh.to_absolute(self.cutoff)<line_sep><return>pd.Series(y_pred index=index)<block_end><block_end> |
""" Cisco_IOS_XR_shellutil_cfg
This module contains a collection of YANG definitions
for Cisco IOS\-XR shellutil package configuration.
This module contains definitions
for the following management objects\:
host\-names\: Container Schema for hostname configuration
Copyright (c) 2013\-2018 by Cisco Systems, Inc.
All rights reserved.
"""<import_stmt>sys<import_from_stmt>collections OrderedDict<import_from_stmt>ydk.types Entity<as>_Entity_<import_from_stmt>ydk.types EntityPath Identity Enum YType YLeaf YLeafList YList LeafDataList Bits Empty Decimal64<import_from_stmt>ydk.types Entity EntityPath Identity Enum YType YLeaf YLeafList YList LeafDataList Bits Empty Decimal64<import_from_stmt>ydk.filters YFilter<import_from_stmt>ydk.errors YError YModelError<import_from_stmt>ydk.errors.error_handler handle_type_error<as>_handle_type_error<class_stmt>HostNames(_Entity_)<block_start>"""
Container Schema for hostname configuration
.. attribute:: host_name
Configure system's hostname
**type**\: str
"""<line_sep>_prefix='shellutil-cfg'<line_sep>_revision='2015-10-12'<def_stmt>__init__ self<block_start><if_stmt>sys.version_info<g>(3 )<block_start>super().__init__()<block_end><else_stmt><block_start>super(HostNames self).__init__()<block_end>self._top_entity=<none><line_sep>self.yang_name="host-names"<line_sep>self.yang_parent_name="Cisco-IOS-XR-shellutil-cfg"<line_sep>self.is_top_level_class=<true><line_sep>self.has_list_ancestor=<false><line_sep>self.ylist_key_names=[]<line_sep>self._child_classes=OrderedDict([])<line_sep>self._leafs=OrderedDict([('host_name' (YLeaf(YType.str 'host-name') ['str'])) ])<line_sep>self.host_name=<none><line_sep>self._segment_path=<lambda>:"Cisco-IOS-XR-shellutil-cfg:host-names"<line_sep>self._is_frozen=<true><block_end><def_stmt>__setattr__ self name value<block_start>self._perform_setattr(HostNames ['host_name'] name value)<block_end><def_stmt>clone_ptr self<block_start>self._top_entity=HostNames()<line_sep><return>self._top_entity<block_end>@staticmethod<def_stmt>_meta_info <block_start><import_from_stmt>ydk.models.cisco_ios_xr._meta _Cisco_IOS_XR_shellutil_cfg<as>meta<line_sep><return>meta._meta_table['HostNames']['meta_info']<block_end><block_end> |
# Py3 compat layer
<import_from_future_stmt> unicode_literals<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_stmt>arcpy<import_stmt>glob<import_stmt>os<import_stmt>shutil<import_stmt>sys<line_sep># create a handle to the windows kernel; want to make Win API calls
<try_stmt><block_start><import_stmt>ctypes<import_from_stmt>ctypes wintypes<line_sep># pass str() to avoid bpo29082 in Python 2.7.13
kdll=ctypes.windll.LoadLibrary(str("kernel32.dll"))<block_end><except_stmt>(ImportError TypeError)<block_start>kdll=<none><block_end><import_from_stmt>.bootstrap_r execute_r<import_from_stmt>.github_release save_url release_info<import_from_stmt>.rpath r_lib_path r_path r_pkg_path r_pkg_version r_user_lib_path r_version arcmap_exists arcmap_path fnf_exception handle_fnf <import_from_stmt>.utils mkdtemp set_env_tmpdir<import_from_stmt>.fs getvolumeinfo hardlinks_supported junctions_supported<try_stmt><block_start><import_stmt>winreg<block_end><except_stmt>ImportError# py 2
<block_start><import_stmt>_winreg<as>winreg<block_end>PACKAGE_NAME='arcgisbinding'<line_sep>PACKAGE_VERSION=r_pkg_version()<def_stmt>bridge_running product<block_start>""" Check if the R ArcGIS bridge is running. Installation wil fail
if the DLL is currently loaded."""<line_sep>running=<false><line_sep># check for the correct DLL
<if_stmt>product<eq>'Pro'<block_start>proxy_name="rarcproxy_pro.dll"<block_end><else_stmt><block_start>proxy_name="rarcproxy.dll"<block_end>kdll.GetModuleHandleW.restype=wintypes.HMODULE<line_sep>kdll.GetModuleHandleW.argtypes=[wintypes.LPCWSTR]<line_sep>dll_handle=kdll.GetModuleHandleW(proxy_name)# memory address of DLL
<if_stmt>dll_handle<is><not><none><block_start>running=<true><block_end><return>running<block_end><def_stmt>arcgis_platform <block_start>""" ArcGIS platform details used internally."""<line_sep>info=arcpy.GetInstallInfo()<line_sep>install_dir=info['InstallDir']<line_sep>arc_version=info['Version']<if_stmt>info['ProductName']<eq>'ArcGISPro'<block_start>product='Pro'<block_end><else_stmt># there are other levels, but this is a PYT run from toolbox,
# so unlikely to be a non-ArcMap context
<block_start>product='ArcMap'<block_end><return>(install_dir arc_version product)<block_end><def_stmt>validate_environment overwrite=<none><block_start>"""Make sure we have a version of the product that works, and that
the library isn't already loaded."""<line_sep>(install_dir arc_version product)=arcgis_platform()<line_sep># earlier versions excluded by virtue of not having Python toolbox support
no_hook_versions=('10.1' '10.2' '10.2.1' '10.2.2' '10.3')<line_sep>valid_env=<true><line_sep>msg=[]<if_stmt>arc_version<in>no_hook_versions<and>product<is><not>'Pro'<block_start>msg.append("The ArcGIS R bridge requires ArcGIS 10.3.1 or later.")<line_sep>valid_env=<false><block_end><if_stmt>arc_version<in>('1.0' '1.0.2')<and>product<eq>'Pro'<block_start>msg.append("The ArcGIS R bridge requires ArcGIS Pro 1.1 or later.")<line_sep>valid_env=<false><block_end><if_stmt><not>overwrite<and>PACKAGE_VERSION<block_start>msg.append("The ArcGIS R bridge is already installed, and "<concat>"overwrite is disabled.")<line_sep>valid_env=<false><block_end><if_stmt>kdll<is><none><block_start>msg.append("Unable to connect to your Windows configuration, "<concat>"this is likely due to an incorrect Python installation. "<concat>"Try repairing your ArcGIS installation.")<line_sep>valid_env=<false><block_end># check the library isn't loaded
<if_stmt>kdll<is><not><none><and>bridge_running(product)<block_start>msg.append("The ArcGIS R bridge is currently in-use, restart the "<concat>"application and try again.")<line_sep>valid_env=<false><block_end><if_stmt>r_version()<is><none><block_start>msg.append("It doesn't look like R is installed. Install R prior "<concat>"to running this tool.")<line_sep>valid_env=<false><block_end><if_stmt><not>valid_env<block_start>arcpy.AddError("\n\n".join(msg))<line_sep>sys.exit()<block_end><block_end><def_stmt>create_registry_entry product arc_version<block_start>"""Create a registry link back to the arcgisbinding package."""<line_sep>root_key=winreg.HKEY_CURRENT_USER<if_stmt>product<eq>'Pro'<block_start>product_name="ArcGISPro"<block_end><else_stmt><block_start>product_name="Desktop{}".format(arc_version)<block_end>reg_path="SOFTWARE\\Esri\\{}".format(product_name)<line_sep>package_key='RintegrationProPackagePath'<line_sep>link_key=<none><try_stmt><block_start>full_access=(winreg.KEY_WOW64_64KEY+winreg.KEY_ALL_ACCESS)<line_sep># find the key, 64- or 32-bit we want it all
link_key=winreg.OpenKey(root_key reg_path 0 full_access)<block_end><except_stmt>fnf_exception<as>error<block_start>handle_fnf(error)<block_end><if_stmt>link_key<block_start><try_stmt><block_start>arcpy.AddMessage("Using registry key to link install.")<line_sep>binding_path="{}\\{}".format(r_lib_path() "arcgisbinding")<line_sep>winreg.SetValueEx(link_key package_key 0 winreg.REG_SZ binding_path)<block_end><except_stmt>fnf_exception<as>error<block_start>handle_fnf(error)<block_end><block_end><block_end><def_stmt>install_package overwrite=<false> r_library_path=r_lib_path()<block_start>"""Install ArcGIS R bindings onto this machine."""<if_stmt>overwrite<is><true><block_start>overwrite=<true><block_end><else_stmt><block_start>overwrite=<false><block_end>(install_dir arc_version product)=arcgis_platform()<line_sep>arcmap_needs_link=<false><line_sep># check that we're in a sane installation environment
validate_environment(overwrite)<line_sep># detect if we we have a 10.3.1 install that needs linking
<if_stmt>product<eq>'Pro'<and>arcmap_exists("10.3")<block_start>arcmap_needs_link=<true><line_sep>msg_base="Pro side by side with 10.3 detected,"<if_stmt>arcmap_path()<is><not><none><block_start>msg="{} installing bridge for both environments.".format(msg_base)<line_sep>arcpy.AddMessage(msg)<block_end><else_stmt><block_start>msg="{} but unable to find install path.".format(msg_base)+"ArcGIS bridge must be manually installed in ArcGIS 10.3."<line_sep>arcpy.AddWarning(msg)<block_end><block_end># if we're going to install the bridge in 10.3.1, create the appropriate
# directory before trying to install.
<if_stmt>arc_version<eq>'10.3.1'<and>product<eq>'ArcMap'<or>arcmap_needs_link<block_start>r_integration_dir=os.path.join(arcmap_path() "Rintegration")<line_sep># TODO escalate privs here? test on non-admin user
<if_stmt><not>os.path.exists(r_integration_dir)<block_start><try_stmt><block_start>write_test=os.path.join(install_dir 'test.txt')<with_stmt>open(write_test 'w')<as>f<block_start>f.write('test')<block_end>os.remove(write_test)<line_sep>os.makedirs(r_integration_dir)<block_end><except_stmt>IOError<block_start>arcpy.AddError("Insufficient privileges to create 10.3.1 bridge directory."<concat>" Please start {} as an administrator, by right clicking"<concat>" the icon, selecting \"Run as Administrator\", then run this"<concat>" script again.".format(product))<line_sep><return><block_end><block_end><block_end># set an R-compatible temporary folder, if needed.
orig_tmpdir=os.getenv("TMPDIR")<if_stmt><not>orig_tmpdir<block_start>set_env_tmpdir()<block_end>download_url=release_info()[0]<if_stmt>download_url<is><none><block_start>arcpy.AddWarning("Unable to get current release information."<concat>" Trying offline installation.")<block_end>local_install=<false><line_sep>base_path=os.path.join(os.path.abspath(os.path.dirname(__file__)) '..')<line_sep>zip_glob=glob.glob(os.path.join(base_path "arcgisbinding*.zip"))<line_sep># see if we have a local copy of the binding
<if_stmt>zip_glob<and>os.path.exists(zip_glob[0])<block_start>local_install=<true><line_sep>zip_path=zip_glob[0]<line_sep>zip_name=os.path.basename(zip_path)<block_end><elif_stmt><not>download_url<and><not>local_install<block_start>arcpy.AddError("Unable to access online package, and no "<concat>"local copy of package found.")<line_sep><return><block_end><else_stmt><block_start>local_install=<false><line_sep>zip_name=os.path.basename(download_url)<block_end># check for a network-based R installation
<if_stmt>r_path()<and>r_path()[0:2]<eq>r'\\'<block_start>arcpy.AddMessage("R installed on a network path, using fallback installation method.")<line_sep>r_local_install=<false><block_end><else_stmt><block_start>r_local_install=<true><block_end># we have a release, write it to disk for installation
<with_stmt>mkdtemp()<as>temp_dir# For R 4.0+, check version from GitHub but install via repo
<block_start><if_stmt>r_version()<and>r_version().split(".")[0]<eq>'4'<block_start>cmd="install.packages(\"arcgisbinding\", repos=\"https://r.esri.com\", type=\"win.binary\")"<line_sep>install_script=os.path.join(temp_dir 'install.R')<with_stmt>open(install_script 'w')<as>f<block_start>f.write(cmd)<block_end>rcmd_return=execute_r("Rscript" install_script)<if_stmt>rcmd_return<ne>0<block_start>arcpy.AddError("Failed to install bridge with `install.packages`, try manualy running the command `{cmd}` from an R session or RStudio.")<block_end><block_end><else_stmt><block_start>package_path=os.path.join(temp_dir zip_name)<if_stmt>local_install<block_start>arcpy.AddMessage("Found local copy of binding, installing from zip")<line_sep>shutil.copyfile(zip_path package_path)<block_end><else_stmt><block_start>save_url(download_url package_path)<block_end><if_stmt>os.path.exists(package_path)# TODO -- need to do UAC escalation here?
# call the R installation script
<block_start>rcmd_return=0<if_stmt>r_local_install<block_start>rcmd_return=execute_r('Rcmd' 'INSTALL' package_path)<block_end><if_stmt><not>r_local_install<or>rcmd_return<ne>0# if we don't have a per-user library, create one
<block_start>r_user_lib=r_user_lib_path()<if_stmt><not>os.path.exists(r_user_lib)<block_start><try_stmt><block_start>arcpy.AddMessage("Creating per-user library directory")<line_sep>os.makedirs(r_user_lib)<block_end><except_stmt>OSError<block_start>arcpy.AddWarning("Failed to create per-user library.")<block_end><block_end># Can't execute Rcmd in this context, write out a temporary
# script and run install.packages() from within an R session.
install_script=os.path.join(temp_dir 'install.R')<with_stmt>open(install_script 'w')<as>f<block_start>f.write("install.packages(\"{}\", repos=NULL)".format(package_path.replace("\\" "/")))<block_end>rcmd_return=execute_r("Rscript" install_script)<if_stmt>rcmd_return<ne>0<block_start>arcpy.AddWarning("Fallback installation method failed.")<block_end><block_end><block_end><else_stmt><block_start>arcpy.AddError("No package found at {}".format(package_path))<line_sep><return><block_end><block_end><block_end># return TMPDIR to its original value; only need it for Rcmd INSTALL
set_env_tmpdir(orig_tmpdir)<line_sep># at 10.4 and Pro <=1.2, if the user has installed a version with a non-
# numeric patch level (e.g. 3.2.4revised), and the bridge is installed
# into Program Files, the link will fail. In this case, set the
# appropriate registry key so that the bridge will still work. Note that
# this isn't ideal, because it will persist after updates, but it is
# better than the bridge failing to work at all.
<if_stmt>(arc_version<eq>'10.4'<and>product<eq>'Desktop')<or>(arc_version<in>('1.1' '1.1.1' '1.2')<and>product<eq>'Pro')<block_start><if_stmt>r_version()<block_start>(r_major r_minor r_patchlevel)=r_version().split(".")<line_sep># if we have a patchlevel like '4revised' or '3alpha', and
# the global library path is used, then use the registry key.
<if_stmt>len(r_patchlevel)<g>1<and>'Program Files'<in>r_library_path# create_registry_entry(product, arc_version)
<block_start>msg=("Currently, the bridge doesn't support patched releases"<concat>" (e.g. 3.2.4 Revised) in a global install. Please use"<concat>" another version of R.")<line_sep>arcpy.AddError(msg)<line_sep><return><block_end><block_end><block_end># at 10.3.1, we _must_ have the bridge installed at the correct location.
# create a symlink that connects back to the correct location on disk.
<if_stmt>arc_version<eq>'10.3.1'<and>product<eq>'ArcMap'<or>arcmap_needs_link<block_start>link_dir=os.path.join(r_integration_dir PACKAGE_NAME)<if_stmt>os.path.exists(link_dir)<block_start><if_stmt>junctions_supported(link_dir)<or>hardlinks_supported(link_dir)# os.rmdir uses RemoveDirectoryW, and can delete a junction
<block_start>os.rmdir(link_dir)<block_end><else_stmt><block_start>shutil.rmtree(link_dir)<block_end><block_end># set up the link
r_package_path=r_pkg_path()<if_stmt>r_package_path<block_start>arcpy.AddMessage("R package path: {}.".format(r_package_path))<block_end><else_stmt><block_start>arcpy.AddError("Unable to locate R package library. Link failed.")<line_sep><return><block_end>detect_msg="ArcGIS 10.3.1 detected."<if_stmt>junctions_supported(link_dir)<or>hardlinks_supported(link_dir)<block_start>arcpy.AddMessage("{} Creating link to package.".format(detect_msg))<line_sep>kdll.CreateSymbolicLinkW(link_dir r_package_path 1)<block_end><else_stmt># working on a non-NTFS volume, copy instead
<block_start>vol_info=getvolumeinfo(link_dir)<line_sep>arcpy.AddMessage("{} Drive type: {}. Copying package files.".format(detect_msg vol_info[0]))<line_sep># NOTE: this will need to be resynced when the package is updated,
# if installed from the R side.
shutil.copytree(r_package_path link_dir)<block_end><block_end><block_end># execute as standalone script, get parameters from sys.argv
<if_stmt>__name__<eq>'__main__'<block_start><if_stmt>len(sys.argv)<eq>2<block_start>overwrite=sys.argv[1]<block_end><else_stmt><block_start>overwrite=<none><block_end>print("library path: {}".format(r_lib_path()))<line_sep>install_package(overwrite=overwrite r_library_path=r_lib_path())<block_end> |
# Copyright FuseSoC contributors
# Licensed under the 2-Clause BSD License, see LICENSE for details.
# SPDX-License-Identifier: BSD-2-Clause
<import_stmt>sys<import_stmt>yaml<line_sep>template="""CAPI=2:
name : {}
targets:
default:
parameters: [p]
parameters:
p:
datatype : str
paramtype : vlogparam
"""<with_stmt>open(sys.argv[1])<as>fin<block_start>data=yaml.safe_load(fin)<line_sep>config=data.get("parameters")<line_sep>files_root=data.get("files_root")<line_sep>vlnv=data.get("vlnv")<block_end><with_stmt>open("generated.core" "w")<as>fout<block_start>fout.write(template.format(vlnv))<block_end> |
# Copyright (c) Facebook, Inc. and its affiliates.
<import_stmt>os<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>torch<import_stmt>torchvision.transforms<as>transforms<import_from_stmt>PIL Image<line_sep>##Yolo related
yolo_path='./PyTorch-YOLOv3'<line_sep>sys.path.append(yolo_path)<try_stmt><block_start><import_from_stmt>models Darknet<import_from_stmt>utils.utils non_max_suppression rescale_boxes<import_from_stmt>utils.datasets pad_to_square resize<block_end><except_stmt>ImportError<block_start>print("Cannot find PyTorch-YOLOv3")<block_end>##lightweight human pose
# pose2d_estimator_path = '/home/hjoo/codes_test/lightweight-human-pose-estimation.pytorch/'
# pose2d_checkpoint = "/home/hjoo/codes_test/lightweight-human-pose-estimation.pytorch/pretrain/checkpoint_iter_370000.pth"
pose2d_checkpoint="./lightweight-human-pose-estimation.pytorch/checkpoint_iter_370000.pth"<line_sep>pose2d_estimator_path='./lightweight-human-pose-estimation.pytorch/'<line_sep>sys.path.append(pose2d_estimator_path)<try_stmt><block_start><import_from_stmt>pose2d_models.with_mobilenet PoseEstimationWithMobileNet<import_from_stmt>modules.load_state load_state<import_from_stmt>val normalize pad_width<import_from_stmt>modules.pose Pose track_poses<import_from_stmt>modules.keypoints extract_keypoints group_keypoints<block_end><except_stmt>ImportError<block_start>print("Cannot find lightweight-human-pose-estimation.pytorch")<block_end><def_stmt>Load_Yolo device#Load Darknet
<block_start>yolo_model_def=os.path.join(yolo_path 'config/yolov3-tiny.cfg')<line_sep>yolo_img_size=416<line_sep>yolo_weights_path=os.path.join(yolo_path 'weights/yolov3-tiny.weights')<line_sep>model=Darknet(yolo_model_def img_size=yolo_img_size).to(device)<if_stmt>yolo_weights_path.endswith(".weights")# Load darknet weights
<block_start>model.load_darknet_weights(yolo_weights_path)<block_end><else_stmt># Load checkpoint weights
<block_start>model.load_state_dict(torch.load(yolo_weights_path))<block_end>model.eval()# Set in evaluation mode
<return>model<block_end><def_stmt>Yolo_detect model camInputFrame img_size=416 conf_thres=0.8 nms_thres=0.4<block_start>img=transforms.ToTensor()(Image.fromarray(camInputFrame))<line_sep># Pad to square resolution
img,_=pad_to_square(img 0)<line_sep># Resize
img=resize(img img_size)<line_sep>img=img.unsqueeze(0)#(1,3,416.419)
input_imgs=img.cuda()<with_stmt>torch.no_grad()<block_start>detections=model(input_imgs)<line_sep>detections=non_max_suppression(detections conf_thres nms_thres)<block_end><if_stmt>detections<is><not><none><block_start>detections=detections[0]<if_stmt>detections<is><not><none><block_start>detections=rescale_boxes(detections img_size camInputFrame.shape[:2])<block_end><block_end><return>detections<block_end><def_stmt>Yolo_detectHuman model camInputFrame<block_start>detections=Yolo_detect(model camInputFrame conf_thres=0.1 nms_thres=0.3)#Modified to be better with yolo tiny
bbr_list=[]#minX, minY, width, height
<if_stmt>detections<is><not><none><block_start><for_stmt>x1,y1,x2,y2,conf,cls_conf,cls_pred detections<block_start><if_stmt>cls_pred<ne>0<block_start><continue><block_end>box_w=x2-x1<line_sep>box_h=y2-y1<line_sep># camInputFrame = viewer2D.Vis_Bbox_minmaxPt(camInputFrame,[x1,y1], [x2,y2])
bbr_list.append(np.array([x1 y1 box_w box_h]))<block_end><block_end><return>bbr_list<block_end>#Code from https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch/demo.py
<def_stmt>infer_fast net img net_input_height_size stride upsample_ratio cpu pad_value=(0 0 0) img_mean=(128 128 128) img_scale=1/256<block_start>height,width,_=img.shape<line_sep>scale=net_input_height_size/height<line_sep>scaled_img=cv2.resize(img (0 0) fx=scale fy=scale interpolation=cv2.INTER_CUBIC)<line_sep>scaled_img=normalize(scaled_img img_mean img_scale)<line_sep>min_dims=[net_input_height_size max(scaled_img.shape[1] net_input_height_size)]<line_sep>padded_img,pad=pad_width(scaled_img stride pad_value min_dims)<line_sep>tensor_img=torch.from_numpy(padded_img).permute(2 0 1).unsqueeze(0).float()<if_stmt><not>cpu<block_start>tensor_img=tensor_img.cuda()<block_end>stages_output=net(tensor_img)<line_sep>stage2_heatmaps=stages_output[-2]<line_sep>heatmaps=np.transpose(stage2_heatmaps.squeeze().cpu().data.numpy() (1 2 0))<line_sep>heatmaps=cv2.resize(heatmaps (0 0) fx=upsample_ratio fy=upsample_ratio interpolation=cv2.INTER_CUBIC)<line_sep>stage2_pafs=stages_output[-1]<line_sep>pafs=np.transpose(stage2_pafs.squeeze().cpu().data.numpy() (1 2 0))<line_sep>pafs=cv2.resize(pafs (0 0) fx=upsample_ratio fy=upsample_ratio interpolation=cv2.INTER_CUBIC)<line_sep><return>heatmaps pafs scale pad<block_end>#Code from https://github.com/Daniil-Osokin/lightweight-human-pose-estimation.pytorch/demo.py
<def_stmt>pose2d_detectHuman net img height_size=256 track=1 smooth=1 bVis=<true><block_start>stride=8<line_sep>upsample_ratio=4<line_sep>num_keypoints=Pose.num_kpts<line_sep>previous_poses=[]<line_sep>delay=33<if_stmt><true># for img in image_provider:
<block_start>orig_img=img.copy()<line_sep>heatmaps,pafs,scale,pad=infer_fast(net img height_size stride upsample_ratio cpu=<not>torch.cuda.is_available())<line_sep>total_keypoints_num=0<line_sep>all_keypoints_by_type=[]<for_stmt>kpt_idx range(num_keypoints)# 19th for bg
<block_start>total_keypoints_num<augadd>extract_keypoints(heatmaps[: : kpt_idx] all_keypoints_by_type total_keypoints_num)<block_end>pose_entries,all_keypoints=group_keypoints(all_keypoints_by_type pafs demo=<true>)<for_stmt>kpt_id range(all_keypoints.shape[0])<block_start>all_keypoints[kpt_id 0]=(all_keypoints[kpt_id 0]<times>stride/upsample_ratio-pad[1])/scale<line_sep>all_keypoints[kpt_id 1]=(all_keypoints[kpt_id 1]<times>stride/upsample_ratio-pad[0])/scale<block_end>current_poses=[]<for_stmt>n range(len(pose_entries))<block_start><if_stmt>len(pose_entries[n])<eq>0<block_start><continue><block_end>pose_keypoints=np.ones((num_keypoints 2) dtype=np.int32)<times>-1<for_stmt>kpt_id range(num_keypoints)<block_start><if_stmt>pose_entries[n][kpt_id]<ne>-1.0# keypoint was found
<block_start>pose_keypoints[kpt_id 0]=int(all_keypoints[int(pose_entries[n][kpt_id]) 0])<line_sep>pose_keypoints[kpt_id 1]=int(all_keypoints[int(pose_entries[n][kpt_id]) 1])<block_end><block_end>pose=Pose(pose_keypoints pose_entries[n][18])<line_sep>current_poses.append(pose)<block_end><if_stmt>bVis<block_start><if_stmt>track<block_start>track_poses(previous_poses current_poses smooth=smooth)<line_sep>previous_poses=current_poses<block_end><for_stmt>pose current_poses<block_start>pose.draw(img)<block_end>img=cv2.addWeighted(orig_img 0.6 img 0.4 0)<for_stmt>pose current_poses<block_start>cv2.rectangle(img (pose.bbox[0] pose.bbox[1]) (pose.bbox[0]+pose.bbox[2] pose.bbox[1]+pose.bbox[3]) (0 255 0))<if_stmt>track<block_start>cv2.putText(img 'id: {}'.format(pose.id) (pose.bbox[0] pose.bbox[1]-16) cv2.FONT_HERSHEY_COMPLEX 0.5 (0 0 255))<block_end><block_end>cv2.imshow('Lightweight Human Pose Estimation Python Demo' img)<line_sep>key=cv2.waitKey(delay)<if_stmt>key<eq>27# esc
<block_start><return><block_end><elif_stmt>key<eq>112# 'p'
<block_start><if_stmt>delay<eq>33<block_start>delay=0<block_end><else_stmt><block_start>delay=33<block_end><block_end><block_end><block_end><return>current_poses<block_end><def_stmt>Load_pose2d device<block_start>"""
This one runs in CPU
"""<line_sep>net=PoseEstimationWithMobileNet()<line_sep>checkpoint=torch.load(pose2d_checkpoint map_location='cpu')<line_sep>load_state(net checkpoint)<line_sep>net=net.eval()<line_sep>net=net.to(device)<line_sep><return>net<block_end><class_stmt>BodyBboxDetector<block_start><def_stmt>__init__ self method="2dpose" device=torch.device('cuda')<block_start>"""
args:
method: "yolo" or "2dpose"
"""<line_sep>self.method=method<if_stmt>method<eq>"yolo"<block_start>print("Loading Yolo Model...")<line_sep>self.model=Load_Yolo(device)<line_sep>print("Done")<block_end><elif_stmt>method<eq>"2dpose"<block_start>print("Loading Pose Estimation Model...")<line_sep>self.model=Load_pose2d(device)<line_sep>print("Done")<block_end><else_stmt><block_start>print("invalid method")<assert_stmt><false><block_end>self.bboxXYWH_list=<none><block_end><def_stmt>detectBbox self img_bgr<block_start>"""
args:
img_bgr: Raw image with BGR order (cv2 default). Currently assumes BGR #TODO: make sure the input type of each method
output:
bboxXYWH_list: list of bboxes. Each bbox has XYWH form (minX,minY,width,height)
"""<if_stmt>self.method<eq>"yolo"<block_start>bboxXYWH_list=Yolo_detectHuman(self.model img_bgr)<block_end><elif_stmt>self.method<eq>"2dpose"<block_start>poses_from2dPoseEst=pose2d_detectHuman(self.model img_bgr bVis=<false>)<line_sep>bboxXYWH_list=[]<for_stmt>poseEst poses_from2dPoseEst<block_start>bboxXYWH_list.append(np.array(poseEst.bbox))<block_end><block_end><else_stmt><block_start>print("Unknown bbox extimation method")<assert_stmt><false><block_end>self.bboxXYWH_list=bboxXYWH_list#Save this as member function
<return>bboxXYWH_list<block_end><block_end> |
<import_from_stmt>.builder build_data_loader<line_sep> |
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>gym<import_stmt>numpy<as>np<class_stmt>Squeeze(gym.ObservationWrapper)<block_start>'''Assume wrap_deepmind with scale=True'''<def_stmt>__init__ self env<block_start><import_from_stmt>gym spaces<line_sep>gym.ObservationWrapper.__init__(self env)<line_sep>self.observation_space=spaces.Box(low=0 high=1.0 shape=(84 84) dtype=np.float32)<block_end><def_stmt>observation self observation<block_start><return>np.squeeze(observation)<block_end><block_end><def_stmt>make_atari_deepmind rom_name valid=<false><block_start><import_from_stmt>external.atari_wrappers make_atari wrap_deepmind<line_sep>env=make_atari(rom_name)<line_sep># framestack is handled by sampler.py
env=wrap_deepmind(env episode_life=<not>valid frame_stack=<false> scale=<true>)<line_sep>env=Squeeze(env)<line_sep><return>env<block_end> |
<import_from_stmt>django.apps AppConfig<import_from_stmt>django.utils.translation ugettext_lazy<as>_<class_stmt>DjangoCovid19Config(AppConfig)<block_start>name='django_covid19'<line_sep>verbose_name=_('django_covid19')<def_stmt>ready self<block_start><import_stmt>django_covid19.signals<block_end><block_end> |
"""SentencePiece based word tokenizer module"""<import_from_stmt>pathlib Path<import_from_stmt>typing List<import_stmt>sentencepiece<as>spm<import_from_stmt>urduhack.stop_words STOP_WORDS<def_stmt>_is_token pieces:list special_symbol:str="▁"<arrow>List[str]<block_start>"""
Check for stopwords and actual words in word pieces
Args:
pieces (list): word pieces returned by sentencepiece model
special_symbol (str): spm prefix special symbol for space
Returns:
List of decoded words
"""<line_sep>decoded=[]<for_stmt>piece pieces<block_start><if_stmt>special_symbol<not><in>piece<block_start><if_stmt>piece<in>STOP_WORDS<or>len(piece)<g>3<block_start>piece=special_symbol+piece<line_sep>decoded.append(piece)<block_end><else_stmt><block_start>decoded.append(piece)<block_end><block_end><else_stmt><block_start>decoded.append(piece)<block_end><block_end><return>decoded<block_end><def_stmt>_load_model model_path:str<arrow>spm.SentencePieceProcessor<block_start>"""
Loads pre_trained keras model and vocab file
Args:
model_path (str): Path to the spm model file
Returns:
spm model class instance
"""<line_sep>spm_model=spm.SentencePieceProcessor()<line_sep>spm_model.Load(model_file=model_path)<line_sep><return>spm_model<block_end><def_stmt>_is_model_available model_path:str<arrow><none><block_start>"""
Check if the models file exist.
Args:
model_path (str): path to the tokenizer model file
Raises:
FileNotFoundError: If model_path does not exist
Returns: None
"""<if_stmt><not>Path(model_path).exists()<block_start>_error="Word tokenizer Model not found!"<concat>"Please run 'urduhack download' in terminal."<concat>"Doc: https://urduhack.readthedocs.io/en/stable/installation.html#downloading-models"<line_sep><raise>FileNotFoundError(_error)<block_end><block_end> |
<import_from_stmt>plex_database.core db<import_from_stmt>plex_database.models.library_section LibrarySection<import_from_stmt>peewee *<class_stmt>Directory(Model)<block_start><class_stmt>Meta<block_start>database=db<line_sep>db_table='directories'<block_end>library_section=ForeignKeyField(LibrarySection null=<true> related_name='directories')<line_sep>parent_directory=ForeignKeyField('self' null=<true> related_name='children')<line_sep>path=CharField(null=<true>)<line_sep>created_at=DateTimeField(null=<true>)<line_sep>updated_at=DateTimeField(null=<true>)<line_sep>deleted_at=DateTimeField(null=<true>)<block_end> |
<import_stmt>unittest<import_from_stmt>tensorboardX.crc32c _crc32c _crc32c_native crc32c<class_stmt>CRC32CTest(unittest.TestCase)<block_start><def_stmt>test_crc32c self<block_start>data=b'abcd'<assert_stmt>crc32c(data)<eq>0x92c80a31<block_end><def_stmt>test_crc32c_python self<block_start>data=b'abcd'<assert_stmt>_crc32c(data)<eq>0x92c80a31<block_end><def_stmt>test_crc32c_native self<block_start><if_stmt>_crc32c_native<is><none><block_start><return><block_end>data=b'abcd'<assert_stmt>_crc32c_native(data)<eq>0x92c80a31<block_end><block_end> |
<import_stmt>time<import_stmt>pytest<import_stmt>jwt<import_from_stmt>cryptography.hazmat.primitives.asymmetric rsa<import_from_stmt>cryptography.hazmat.primitives serialization<import_from_stmt>authlib.jose jwk<import_from_stmt>util.security.jwtutil decode exp_max_s_option jwk_dict_to_public_key InvalidTokenError InvalidAlgorithmError <line_sep>@pytest.fixture(scope="session")<def_stmt>private_key <block_start><return>rsa.generate_private_key(public_exponent=65537 key_size=2048 )<block_end>@pytest.fixture(scope="session")<def_stmt>private_key_pem private_key<block_start><return>private_key.private_bytes(encoding=serialization.Encoding.PEM format=serialization.PrivateFormat.TraditionalOpenSSL encryption_algorithm=serialization.NoEncryption() )<block_end>@pytest.fixture(scope="session")<def_stmt>public_key private_key<block_start><return>private_key.public_key().public_bytes(encoding=serialization.Encoding.PEM format=serialization.PublicFormat.SubjectPublicKeyInfo )<block_end><def_stmt>_token_data audience subject iss iat=<none> exp=<none> nbf=<none><block_start><return>{"iss":iss "aud":audience "nbf":nbf()<if>nbf<is><not><none><else>int(time.time()) "iat":iat()<if>iat<is><not><none><else>int(time.time()) "exp":exp()<if>exp<is><not><none><else>int(time.time()+3600) "sub":subject }<block_end>@pytest.mark.parametrize("aud, iss, nbf, iat, exp, expected_exception" [pytest.param("invalidaudience" "someissuer" <none> <none> <none> "Invalid audience" id="invalid audience" ) pytest.param("someaudience" "invalidissuer" <none> <none> <none> "Invalid issuer" id="invalid issuer") pytest.param("someaudience" "someissuer" <lambda>:time.time()+120 <none> <none> "The token is not yet valid" id="invalid not before" ) pytest.param("someaudience" "someissuer" <none> <lambda>:time.time()+120 <none> "Issued At claim" id="issued at in future" ) pytest.param("someaudience" "someissuer" <none> <none> <lambda>:time.time()-100 "Signature has expired" id="already expired" ) pytest.param("someaudience" "someissuer" <none> <none> <lambda>:time.time()+10000 "Token was signed for more than" id="expiration too far in future" ) pytest.param("someaudience" "someissuer" <lambda>:time.time()+10 <none> <none> <none> id="not before in future by within leeway" ) pytest.param("someaudience" "someissuer" <none> <lambda>:time.time()+10 <none> <none> id="issued at in future but within leeway" ) pytest.param("someaudience" "someissuer" <none> <none> <lambda>:time.time()-10 <none> id="expiration in past but within leeway" ) ] )<def_stmt>test_decode_jwt_validation aud iss nbf iat exp expected_exception private_key_pem public_key<block_start>token=jwt.encode(_token_data(aud "subject" iss iat exp nbf) private_key_pem "RS256")<if_stmt>expected_exception<is><not><none><block_start><with_stmt>pytest.raises(InvalidTokenError)<as>ite<block_start>max_exp=exp_max_s_option(3600)<line_sep>decode(token public_key algorithms=["RS256"] audience="someaudience" issuer="someissuer" options=max_exp leeway=60 )<block_end><assert_stmt>ite.match(expected_exception)<block_end><else_stmt><block_start>max_exp=exp_max_s_option(3600)<line_sep>decode(token public_key algorithms=["RS256"] audience="someaudience" issuer="someissuer" options=max_exp leeway=60 )<block_end><block_end><def_stmt>test_decode_jwt_invalid_key private_key_pem# Encode with the test private key.
<block_start>token=jwt.encode(_token_data("aud" "subject" "someissuer") private_key_pem "RS256")<line_sep># Try to decode with a different public key.
another_public_key=(rsa.generate_private_key(public_exponent=65537 key_size=2048 ).public_key().public_bytes(encoding=serialization.Encoding.PEM format=serialization.PublicFormat.SubjectPublicKeyInfo ))<with_stmt>pytest.raises(InvalidTokenError)<as>ite<block_start>max_exp=exp_max_s_option(3600)<line_sep>decode(token another_public_key algorithms=["RS256"] audience="aud" issuer="someissuer" options=max_exp leeway=60 )<block_end><assert_stmt>ite.match("Signature verification failed")<block_end><def_stmt>test_decode_jwt_invalid_algorithm private_key_pem public_key# Encode with the test private key.
<block_start>token=jwt.encode(_token_data("aud" "subject" "someissuer") private_key_pem "RS256")<line_sep># Attempt to decode but only with a different algorithm than that used.
<with_stmt>pytest.raises(InvalidAlgorithmError)<as>ite<block_start>max_exp=exp_max_s_option(3600)<line_sep>decode(token public_key algorithms=["ES256"] audience="aud" issuer="someissuer" options=max_exp leeway=60 )<block_end><assert_stmt>ite.match("are not whitelisted")<block_end><def_stmt>test_jwk_dict_to_public_key private_key private_key_pem<block_start>public_key=private_key.public_key()<line_sep>key_dict=jwk.dumps(public_key.public_bytes(encoding=serialization.Encoding.PEM format=serialization.PublicFormat.SubjectPublicKeyInfo ))<line_sep>converted=jwk_dict_to_public_key(key_dict)<line_sep># Encode with the test private key.
token=jwt.encode(_token_data("aud" "subject" "someissuer") private_key_pem "RS256")<line_sep># Decode with the converted key.
max_exp=exp_max_s_option(3600)<line_sep>decode(token converted algorithms=["RS256"] audience="aud" issuer="someissuer" options=max_exp leeway=60 )<block_end> |
<import_from_stmt>setuptools setup find_packages<line_sep>setup(name='scrapy-djangoitem' version='1.1.1' url='https://github.com/scrapy-plugins/scrapy-djangoitem' description='Scrapy extension to write scraped items using Django models' long_description=open('README.rst').read() author='Scrapy developers' license='BSD' packages=find_packages(exclude=('tests' 'tests.*')) include_package_data=<true> zip_safe=<false> classifiers=['Framework :: Scrapy' 'Development Status :: 5 - Production/Stable' 'Environment :: Console' 'Intended Audience :: Developers' 'License :: OSI Approved :: BSD License' 'Operating System :: OS Independent' 'Programming Language :: Python' 'Programming Language :: Python :: 2' 'Programming Language :: Python :: 2.7' 'Programming Language :: Python :: 3' 'Programming Language :: Python :: 3.4' 'Programming Language :: Python :: 3.5' 'Programming Language :: Python :: 3.6' 'Topic :: Utilities' 'Framework :: Django' 'Framework :: Scrapy' ] install_requires=['six'] requires=['scrapy (>=0.24.5)' 'django'] )<line_sep> |
<import_stmt>json<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>tensorflow<as>tf<import_from_stmt>google.protobuf json_format<import_from_stmt>seldon_e2e_utils post_comment_in_pr run_benchmark_and_capture_results<line_sep>@[email protected]("argo_worfklows")<def_stmt>test_service_orchestrator <block_start>sort_by=["apiType" "disableOrchestrator"]<line_sep>data_size=1_000<line_sep>data=[100.0]<times>data_size<line_sep>data_tensor={"data":{"tensor":{"values":data "shape":[1 data_size]}}}<line_sep>df=run_benchmark_and_capture_results(api_type_list=["rest" "grpc"] disable_orchestrator_list=["false" "true"] image_list=["seldonio/seldontest_predict:1.10.0-dev"] benchmark_data=data_tensor )<line_sep>df=df.sort_values(sort_by)<line_sep>result_body="# Benchmark results - Testing Service Orchestrator\n\n"<line_sep>orch_mean=all((df[df["disableOrchestrator"]<eq>"false"]["mean"].values-df[df["disableOrchestrator"]<eq>"true"]["mean"].values)<l>3)<line_sep>result_body<augadd>f"* Orch added mean latency under 4ms: {orch_mean}\n"<line_sep>orch_nth=all((df[df["disableOrchestrator"]<eq>"false"]["95th"].values-df[df["disableOrchestrator"]<eq>"true"]["95th"].values)<l>5)<line_sep>result_body<augadd>f"* Orch added 95th latency under 5ms: {orch_nth}\n"<line_sep>orch_nth=all((df[df["disableOrchestrator"]<eq>"false"]["99th"].values-df[df["disableOrchestrator"]<eq>"true"]["99th"].values)<l>10)<line_sep>result_body<augadd>f"* Orch added 99th latency under 10ms: {orch_nth}\n"<line_sep># We have to set no errors to 1 as the tools for some reason have 1 as base
no_err=all(df["errors"]<le>1)<line_sep>result_body<augadd>f"* No errors: {no_err}\n"<line_sep>result_body<augadd>"\n### Results table\n\n"<line_sep>result_body<augadd>str(df.to_markdown())<line_sep>post_comment_in_pr(result_body)<assert_stmt>orch_mean<assert_stmt>orch_nth<block_end>@[email protected]("argo_worfklows")<def_stmt>test_workers_performance <block_start>sort_by=["apiType" "serverWorkers"]<line_sep>data_size=10<line_sep>data=[100.0]<times>data_size<line_sep>data_tensor={"data":{"tensor":{"values":data "shape":[1 data_size]}}}<line_sep>df=run_benchmark_and_capture_results(api_type_list=["grpc" "rest"] server_workers_list=["1" "5" "10"] benchmark_concurrency_list=["10" "100" "1000"] parallelism="1" requests_cpu_list=["4000Mi"] limits_cpu_list=["4000Mi"] image_list=["seldonio/seldontest_predict:1.10.0-dev"] benchmark_data=data_tensor )<line_sep>df=df.sort_values(sort_by)<line_sep>result_body="# Benchmark results - Testing Workers Performance\n\n"<line_sep>result_body<augadd>"\n### Results table\n\n"<line_sep>result_body<augadd>str(df.to_markdown())<line_sep>post_comment_in_pr(result_body)<block_end>@[email protected]("argo_worfklows")<def_stmt>test_python_wrapper_v1_vs_v2_iris <block_start>sort_by=["concurrency" "apiType"]<line_sep>benchmark_concurrency_list=["1" "50" "150"]<line_sep>result_body=""<line_sep>result_body<augadd>"\n# Benchmark Results - Python Wrapper V1 vs V2\n\n"<line_sep># Using single worker as fastapi also uses single worker
df_pywrapper=run_benchmark_and_capture_results(api_type_list=["rest" "grpc"] protocol="seldon" server_list=["SKLEARN_SERVER"] benchmark_concurrency_list=benchmark_concurrency_list model_uri_list=["gs://seldon-models/v1.12.0-dev/sklearn/iris"] benchmark_data={"data":{"ndarray":[[1 2 3 4]]}} )<line_sep>df_pywrapper=df_pywrapper.sort_values(sort_by)<line_sep>conc_idx=df_pywrapper["concurrency"]<eq>1<line_sep># Python V1 Wrapper Validations
# Ensure base mean performance latency below 10 ms
v1_latency_mean=all((df_pywrapper[conc_idx]["mean"]<l>10))<line_sep>result_body<augadd>f"* V1 base mean performance latency under 10ms: {v1_latency_mean}\n"<line_sep># Ensure 99th percentiles are not spiking above 15ms
v1_latency_nth=all(df_pywrapper[conc_idx]["99th"]<l>10)<line_sep>result_body<augadd>f"* V1 base 99th performance latenc under 10ms: {v1_latency_nth}\n"<line_sep># Ensure throughput is above 180 rps for REST
v1_rps_rest=all(df_pywrapper[(df_pywrapper["apiType"]<eq>"rest")&conc_idx]["throughputAchieved"]<g>180)<line_sep>result_body<augadd>f"* V1 base throughput above 180rps: {v1_rps_rest}\n"<line_sep># Ensure throughput is above 250 rps for GRPC
v1_rps_grpc=all(df_pywrapper[(df_pywrapper["apiType"]<eq>"grpc")&conc_idx]["throughputAchieved"]<g>250)<line_sep>result_body<augadd>f"* V1 base throughput above 250rps: {v1_rps_grpc}\n"<line_sep># Validate latenc added by adding service orchestrator is lower than 4ms
# TODO: Validate equivallent of parallel workers in MLServer
df_mlserver=run_benchmark_and_capture_results(api_type_list=["rest" "grpc"] model_name="classifier" protocol="kfserving" server_list=["SKLEARN_SERVER"] model_uri_list=["gs://seldon-models/sklearn/iris-0.23.2/lr_model"] benchmark_concurrency_list=benchmark_concurrency_list benchmark_data={"inputs":[{"name":"predict" "datatype":"FP32" "shape":[1 4] "data":[[1 2 3 4]] }]} benchmark_grpc_data_override={"model_name":"classifier" "inputs":[{"name":"predict" "datatype":"FP32" "shape":[1 4] "contents":{"fp32_contents":[1 2 3 4]} }] } )<line_sep># First we sort the dataframes to ensure they are compared correctly
df_mlserver=df_mlserver.sort_values(sort_by)<line_sep># Python V1 Wrapper Validations
conc_idx=df_mlserver["concurrency"]<eq>1<line_sep># Ensure all mean performance latency below 5 ms
v2_latency_mean=all(df_mlserver[conc_idx]["mean"]<l>5)<line_sep>result_body<augadd>f"* V2 mean performance latency under 5ms: {v2_latency_mean}\n"<line_sep># Ensure 99th percentiles are not spiking above 15ms
v2_latency_nth=all(df_mlserver[conc_idx]["99th"]<l>10)<line_sep>result_body<augadd>f"* V2 99th performance latenc under 10ms: {v2_latency_nth}\n"<line_sep># Ensure throughput is above 180 rps for REST
v2_rps_rest=all(df_mlserver[(df_mlserver["apiType"]<eq>"rest")&conc_idx]["throughputAchieved"]<g>250)<line_sep>result_body<augadd>f"* V2 REST throughput above 250rps: {v2_rps_rest}\n"<line_sep># Ensure throughput is above 250 rps for GRPC
v2_rps_grpc=all(df_mlserver[(df_mlserver["apiType"]<eq>"grpc")&conc_idx]["throughputAchieved"]<g>250)<line_sep>result_body<augadd>f"* V2 throughput above 300rps: {v2_rps_grpc}\n"<line_sep>result_body<augadd>"\n### Python V1 Wrapper Results table\n\n"<line_sep>result_body<augadd>str(df_pywrapper.to_markdown())<line_sep>result_body<augadd>"\n\n\n### Python V2 MLServer Results table\n\n"<line_sep>result_body<augadd>str(df_mlserver.to_markdown())<line_sep>post_comment_in_pr(result_body)<assert_stmt>v1_latency_mean<assert_stmt>v1_latency_nth<assert_stmt>v1_rps_rest<assert_stmt>v1_rps_grpc<assert_stmt>v2_latency_mean<assert_stmt>v2_latency_nth<assert_stmt>v2_rps_rest<assert_stmt>v2_rps_grpc<block_end>@[email protected]("argo_worfklows")<def_stmt>test_v1_seldon_data_types <block_start>sort_by=["concurrency" "apiType"]<line_sep># 10000 element array
data_size=10_000<line_sep>data=[100.0]<times>data_size<line_sep>benchmark_concurrency_list=["1" "50" "150"]<line_sep>image_list=["seldonio/seldontest_predict:1.10.0-dev"]<line_sep>data_ndarray={"data":{"ndarray":data}}<line_sep>data_tensor={"data":{"tensor":{"values":data "shape":[1 data_size]}}}<line_sep>array=np.array(data)<line_sep>tftensor_proto=tf.make_tensor_proto(array)<line_sep>tftensor_json_str=json_format.MessageToJson(tftensor_proto)<line_sep>tftensor_dict=json.loads(tftensor_json_str)<line_sep>data_tftensor={"data":{"tftensor":tftensor_dict}}<line_sep>df_ndarray=run_benchmark_and_capture_results(api_type_list=["rest" "grpc"] image_list=image_list benchmark_concurrency_list=benchmark_concurrency_list benchmark_data=data_ndarray )<line_sep>df_ndarray=df_ndarray.sort_values(sort_by)<line_sep>df_tensor=run_benchmark_and_capture_results(api_type_list=["rest" "grpc"] image_list=image_list benchmark_concurrency_list=benchmark_concurrency_list benchmark_data=data_tensor )<line_sep>df_tensor=df_tensor.sort_values(sort_by)<line_sep>df_tftensor=run_benchmark_and_capture_results(api_type_list=["rest" "grpc"] image_list=image_list benchmark_concurrency_list=benchmark_concurrency_list benchmark_data=data_tftensor )<line_sep>df_tftensor=df_tftensor.sort_values(sort_by)<line_sep>result_body="# Benchmark results - Testing Seldon V1 Data Types\n\n"<line_sep>result_body<augadd>"\n### Results for NDArray\n\n"<line_sep>result_body<augadd>str(df_ndarray.to_markdown())<line_sep>result_body<augadd>"\n### Results for Tensor\n\n"<line_sep>result_body<augadd>str(df_tensor.to_markdown())<line_sep>result_body<augadd>"\n### Results for TFTensor\n\n"<line_sep>result_body<augadd>str(df_tftensor.to_markdown())<line_sep>post_comment_in_pr(result_body)<block_end> |
# (c) Copyright [2017] Hewlett Packard Enterprise Development LP
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
* Validate that every benchmark in ``input-file`` has mandatory parameters
defined in ``params``
$ python result_processor.py validate --input-file= --params=
* Filter benchmarks in ``input-file`` by throwing away those not containing
specific parameters defined in ``params``. The filtered subset of benchmarks
is written to ``output-file``.
$ python result_processor.py filter --input-file= --params= --output-file=
* Update every benchmark in ``input-file`` by overriding values of specific
parameters which value are defined in ``params``. The updated subset of
benchmarks is written to ``output-file``.
$ python result_processor.py update --input-file= --params= --output-file=
"""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>json<import_from_stmt>collections defaultdict<import_from_stmt>dlbs.utils Six<import_from_stmt>dlbs.utils DictUtils<import_from_stmt>dlbs.processor Processor<def_stmt>load_json_file file_name<block_start>""" Loads a json object from a file.
:param str file_name: A file name to load JSON object from.
:return: A loaded JSON object.
"""<with_stmt>open(file_name)<as>file_obj<block_start><return>json.load(file_obj)<block_end><block_end><def_stmt>get_params params<block_start>"""Loads parameters specified by params.
:param str params: A JSON parsable string that defines how parameters
need to be loaded. See function comments on how it is
done.
:return: A dictionary with keys being parameters and values being their
values. Null value means no value - that's perfectly valid case.
:rtype: dict
The ``params`` is a JSON parsable string treated differently depending
on its type:
* ``string`` The value is a file name that contains JSON object
* ``list`` The list of parameters
* ``dict`` The dictionary that maps parameters to their values.
If type is list or loaded JSON object is a list, it gets converted to
dictionary with null values.
"""<line_sep>parsed_params=json.loads(params)<if_stmt>isinstance(parsed_params Six.string_types)<block_start>parsed_params=load_json_file(parsed_params)<block_end><if_stmt>isinstance(parsed_params list)<block_start>parsed_params=dict.fromkeys(parsed_params <none>)<block_end><if_stmt><not>isinstance(parsed_params dict)<block_start><raise>ValueError("Invalid type of object that holds parameters (%s)"%type(parsed_params))<block_end><return>parsed_params<block_end><def_stmt>validate_benchmarks args<block_start>"""Validates benchmarks ensuring every benchmark contains mandatory parameters.
Also make sure `exp.id`s are unique.
:param argparse args: Command line arguments.
The following command line arguments are used:
* ``args.input_file`` A file with benchmark results.
* ``args.params`` Specification of mandatory parameters. For format,
read comments of ``get_params`` function
"""<line_sep># Load benchmarks and parameters.
benchmarks=load_json_file(args.input_file)['data']<line_sep>params=get_params(args.params)<line_sep># Figure out missing parameters.
missing_params=defaultdict(<lambda>:0)<line_sep>exp_ids=set()# All identifiers of experiments
duplicates=<false># If two or more experiments have the same ID
<for_stmt>benchmark benchmarks<block_start>keys=[key<for>key params<if>key<not><in>benchmark]<for_stmt>key keys<block_start>missing_params[key]<augadd>1<block_end><if_stmt>'exp.id'<in>benchmark<block_start><if_stmt>benchmark['exp.id']<not><in>exp_ids<block_start>exp_ids.add(benchmark['exp.id'])<block_end><else_stmt><block_start>duplicates=<true><block_end><block_end><block_end># Report validation results.
print("Number of benchmarks: %d"%len(benchmarks))<if_stmt><not>missing_params<and><not>duplicates<block_start>print("Benchmark validation result: SUCCESS")<block_end><else_stmt><block_start>print("Benchmark validation result: FAILURE")<if_stmt>len(missing_params)<g>0<block_start>print("missing parameters:")<for_stmt>missing_param missing_params<block_start>print("\t%s: %d"%(missing_param missing_params[missing_param]))<block_end><block_end><if_stmt>duplicates<block_start>print("Several benchmarks have same identifier (exp.id)")<block_end><block_end><block_end><def_stmt>filter_benchmarks args<block_start>"""Filter benchmarks by removing those that do not contain provided parameters.
:param argparse args: Command line arguments.
The following command line arguments are used:
* ``args.input_file`` A file with benchmark results.
* ``args.params`` Specification of mandatory parameters. For format,
read comments of ``get_params`` function
* ``args.output_file`` An output file with updated benchmark results.
"""<line_sep># Load benchmarks and parameters
input_benchmarks=load_json_file(args.input_file)['data']<line_sep>params=get_params(args.params)<line_sep># Filter benchmarks
output_benchmarks=[]<for_stmt>input_benchmark input_benchmarks<block_start>keep=<true><for_stmt>key params<block_start><if_stmt>key<not><in>input_benchmark<or><not>input_benchmark[key]<block_start>keep=<false><line_sep><break><block_end><block_end><if_stmt>keep<block_start>output_benchmarks.append(input_benchmark)<block_end><block_end># Report results and serialize
print("Number of input benchmarks: %d"%len(input_benchmarks))<line_sep>print("Number of output benchmarks: %d"%len(output_benchmarks))<line_sep>DictUtils.dump_json_to_file({"data":output_benchmarks} args.output_file)<block_end><def_stmt>update_benchmarks args<block_start>"""Update benchmarks by overriding parameters provided by a user.
:param argparse args: Command line arguments.
The following command line arguments are used:
* ``args.input_file`` A file with benchmark results.
* ``args.params`` Specification of mandatory parameters. For format,
read comments of ``get_params`` function
* ``args.output_file`` An output file with updated benchmark results.
"""<line_sep># Load benchmarks and parameters.
benchmarks=load_json_file(args.input_file)['data']<line_sep>prefix='__'<line_sep>params={prefix+k:v<for>k,v get_params(args.params).items()}<line_sep># Add prefixed parameters to all benchmarks.
<for_stmt>benchmark benchmarks<block_start>benchmark.update(params)<block_end># Process and compute variables
Processor().compute_variables(benchmarks)<line_sep># Replace prefix overwriting variables in case of a conflict
prefixed_keys=params.keys()<line_sep>prefix_len=len(prefix)<line_sep>output_benchmarks=[]<for_stmt>benchmark benchmarks<block_start><for_stmt>k prefixed_keys<block_start>benchmark[k[prefix_len:]]=benchmark[k]<del_stmt>benchmark[k]<block_end><if_stmt>benchmark['exp.model']<ne>''<block_start>output_benchmarks.append(benchmark)<block_end><block_end>benchmarks=output_benchmarks<line_sep># Serialize updated benchmarks.
DictUtils.dump_json_to_file({"data":benchmarks} args.output_file)<block_end><def_stmt>main <block_start>"""Main function - parses command line args and processes benchmarks."""<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('action' type=str help="Action to perform ('validate', 'filter', 'update')")<line_sep>parser.add_argument('--input_file' '--input-file' type=str required=<true> default=<none> help='An input JSON file. This file is never modified.')<line_sep>parser.add_argument('--params' type=str required=<false> default=<none> help="JSON array or object OR string. If string it's considered as a file name.")<line_sep>parser.add_argument('--output_file' '--output-file' required=<false> default=<false> help="Output JSON file, possible, modified version of an input JSON file.")<line_sep>args=parser.parse_args()<if_stmt>args.action<eq>'validate'<block_start>validate_benchmarks(args)<block_end><elif_stmt>args.action<eq>'filter'<block_start>filter_benchmarks(args)<block_end><elif_stmt>args.action<eq>'update'<block_start>update_benchmarks(args)<block_end><else_stmt><block_start><raise>ValueError("Action parameter has invalid value (%s). "<concat>"Must be one of ['validate', 'filter', 'update']"%args.action)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end> |
# Copyright 2014 ARM Limited
#
# Licensed under the Apache License, Version 2.0
# See LICENSE file for details.
# standard library modules, , ,
<import_stmt>argparse<import_stmt>logging<import_stmt>os<line_sep># version, , represent versions and specifications, internal
<import_from_stmt>yotta.lib version<line_sep># Component, , represents an installed component, internal
<import_from_stmt>yotta.lib component<line_sep># Target, , represents an installed target, internal
<import_from_stmt>yotta.lib target<line_sep># vcs, , represent version controlled directories, internal
<import_from_stmt>yotta.lib vcs<def_stmt>addOptions parser<block_start><def_stmt>patchType s<block_start><if_stmt>s.lower()<in>('major' 'minor' 'patch')<block_start><return>s.lower()<block_end><try_stmt><block_start><return>version.Version(s)<block_end><except_stmt><block_start><raise>argparse.ArgumentTypeError('"%s" is not a valid version (expected patch, major, minor, or something like 1.2.3)'%s)<block_end><block_end>parser.add_argument('action' type=patchType nargs='?' help='[patch | minor | major | <version>]')<block_end><def_stmt>execCommand args following_args<block_start>wd=os.getcwd()<line_sep>c=component.Component(wd)<line_sep># skip testing for target if we already found a component
t=<none><if>c<else>target.Target(wd)<if_stmt><not>(c<or>t)<block_start>logging.debug(str(c.getError()))<if_stmt>t<block_start>logging.debug(str(t.getError()))<block_end>logging.error('The current directory does not contain a valid module or target.')<line_sep><return>1<block_end><else_stmt># only needed separate objects in order to display errors
<block_start>p=(c<or>t)<block_end><if_stmt>args.action<block_start><try_stmt><block_start><if_stmt><not>p.vcsIsClean()<block_start>logging.error('The working directory is not clean')<line_sep><return>1<block_end>v=p.getVersion()<line_sep>pre_script_env={'YOTTA_OLD_VERSION':str(v)}<if_stmt>args.action<in>('major' 'minor' 'patch')<block_start>v.bump(args.action)<block_end><else_stmt><block_start>v=args.action<block_end>pre_script_env['YOTTA_NEW_VERSION']=str(v)<line_sep>errcode=p.runScript('preVersion' pre_script_env)<if_stmt>errcode<block_start><return>errcode<block_end>logging.info('@%s'%v)<line_sep>p.setVersion(v)<line_sep>p.writeDescription()<line_sep>errcode=p.runScript('postVersion')<if_stmt>errcode<block_start><return>errcode<block_end>p.commitVCS(tag='v'+str(v))<block_end><except_stmt>vcs.VCSError<as>e<block_start>logging.error(e)<block_end><block_end><else_stmt><block_start>logging.info(str(p.getVersion()))<block_end><block_end> |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.