ext
stringclasses
9 values
sha
stringlengths
40
40
content
stringlengths
3
1.04M
py
1a58fdb444fe7c06026933d0dbdf8865e16517ec
from django.db import models # Create your models here. class Saison(models.Model): nom = models.CharField(max_length=100) Date = models.DateTimeField() def __str__(self): return self.nom class Categorie(models.Model): age_min = models.IntegerField() age_max = models.IntegerField() categorie = models.CharField(max_length=100) tarif_intra = models.IntegerField() tarif_extra = models.IntegerField() def __str__(self): return self.categorie class Joueur(models.Model): nom = models.CharField(max_length=100) prenom = models.CharField(max_length=100) adresse = models.CharField(max_length=100) code_postal = models.IntegerField() commune = models.CharField(max_length=100) intra_extra = models.CharField(max_length=10) categorie = models.CharField(max_length=100) prix = models.IntegerField() #categorie = models.ForeignKey() #saison = models.ForeignKey() age = models.IntegerField() naissance = models.DateTimeField() licence = models.IntegerField() genre = models.CharField(max_length=1) taille = models.IntegerField() def __str__(self): return '{} {}'.format(self.nom, self.prenom)
py
1a58fde27fa6a4d7ddc65c80661f3f4e6b74eae7
log_level = 'INFO' load_from = None resume_from = None dist_params = dict(backend='nccl') workflow = [('train', 1)] checkpoint_config = dict(interval=50) evaluation = dict(interval=50, metric='mAP', key_indicator='AP') optimizer = dict( type='Adam', lr=0.0015, ) optimizer_config = dict(grad_clip=None) # learning policy lr_config = dict( policy='step', warmup='linear', warmup_iters=500, warmup_ratio=0.001, step=[200, 260]) total_epochs = 300 log_config = dict( interval=50, hooks=[ dict(type='TextLoggerHook'), # dict(type='TensorboardLoggerHook') ]) channel_cfg = dict( dataset_joints=17, dataset_channel=[ [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16], ], inference_channel=[ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16 ]) data_cfg = dict( image_size=512, base_size=256, base_sigma=2, heatmap_size=[128], num_joints=channel_cfg['dataset_joints'], dataset_channel=channel_cfg['dataset_channel'], inference_channel=channel_cfg['inference_channel'], num_scales=1, scale_aware_sigma=False, ) # model settings model = dict( type='BottomUp', pretrained='mmcls://mobilenet_v2', backbone=dict(type='MobileNetV2', widen_factor=1., out_indices=(7, )), keypoint_head=dict( type='BottomUpSimpleHead', in_channels=1280, num_joints=17, tag_per_joint=True, with_ae_loss=[True]), train_cfg=dict( num_joints=channel_cfg['dataset_joints'], img_size=data_cfg['image_size']), test_cfg=dict( num_joints=channel_cfg['dataset_joints'], max_num_people=30, scale_factor=[1], with_heatmaps=[True], with_ae=[True], project2image=True, nms_kernel=5, nms_padding=2, tag_per_joint=True, detection_threshold=0.1, tag_threshold=1, use_detection_val=True, ignore_too_much=False, adjust=True, refine=True, flip_test=True), loss_pose=dict( type='MultiLossFactory', num_joints=17, num_stages=1, ae_loss_type='exp', with_ae_loss=[True], push_loss_factor=[0.001], pull_loss_factor=[0.001], with_heatmaps_loss=[True], heatmaps_loss_factor=[1.0], ), ) train_pipeline = [ dict(type='LoadImageFromFile'), dict( type='BottomUpRandomAffine', rot_factor=30, scale_factor=[0.75, 1.5], scale_type='short', trans_factor=40), dict(type='BottomUpRandomFlip', flip_prob=0.5), dict(type='ToTensor'), dict( type='NormalizeTensor', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), dict( type='BottomUpGenerateTarget', sigma=2, max_num_people=30, ), dict( type='Collect', keys=['img', 'joints', 'targets', 'masks'], meta_keys=[]), ] val_pipeline = [ dict(type='LoadImageFromFile'), dict(type='BottomUpGetImgSize', test_scale_factor=[1]), dict( type='BottomUpResizeAlign', transforms=[ dict(type='ToTensor'), dict( type='NormalizeTensor', mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]), ]), dict( type='Collect', keys=['img'], meta_keys=[ 'image_file', 'aug_data', 'test_scale_factor', 'base_size', 'center', 'scale', 'flip_index' ]), ] test_pipeline = val_pipeline data_root = 'data/coco' data = dict( samples_per_gpu=24, workers_per_gpu=1, train=dict( type='BottomUpCocoDataset', ann_file=f'{data_root}/annotations/person_keypoints_train2017.json', img_prefix=f'{data_root}/train2017/', data_cfg=data_cfg, pipeline=train_pipeline), val=dict( type='BottomUpCocoDataset', ann_file=f'{data_root}/annotations/person_keypoints_val2017.json', img_prefix=f'{data_root}/val2017/', data_cfg=data_cfg, pipeline=val_pipeline), test=dict( type='BottomUpCocoDataset', ann_file=f'{data_root}/annotations/person_keypoints_val2017.json', img_prefix=f'{data_root}/val2017/', data_cfg=data_cfg, pipeline=val_pipeline), )
py
1a58fe887b6b60eea23b89d913f935e450b82c24
from __future__ import absolute_import import os import shutil import tempfile import unittest import pytest import cwltool.expression as expr import cwltool.pathmapper import cwltool.process import cwltool.workflow from cwltool.main import main from cwltool.utils import onWindows from .util import get_data, needs_docker, windows_needs_docker @needs_docker class TestListing(unittest.TestCase): def test_missing_enable_ext(self): # Require that --enable-ext is provided. self.assertEquals(main([get_data('tests/wf/listing_deep.cwl'), get_data('tests/listing-job.yml')]), 1) def test_listing_deep(self): # Should succeed. self.assertEquals(main(["--enable-ext", get_data('tests/wf/listing_deep.cwl'), get_data('tests/listing-job.yml')]), 0) def test_listing_shallow(self): # This fails on purpose, because it tries to access listing in a subdirectory the same way that listing_deep does, # but it shouldn't be expanded. self.assertEquals(main(["--enable-ext", get_data('tests/wf/listing_shallow.cwl'), get_data('tests/listing-job.yml')]), 1) def test_listing_none(self): # This fails on purpose, because it tries to access listing but it shouldn't be there. self.assertEquals(main(["--enable-ext", get_data('tests/wf/listing_none.cwl'), get_data('tests/listing-job.yml')]), 1) def test_listing_v1_0(self): # Default behavior in 1.0 is deep expansion. self.assertEquals(main([get_data('tests/wf/listing_v1_0.cwl'), get_data('tests/listing-job.yml')]), 0) # def test_listing_v1_1(self): # # Default behavior in 1.1 will be no expansion # self.assertEquals(main([get_data('tests/wf/listing_v1_1.cwl'), get_data('tests/listing-job.yml')]), 1) @pytest.mark.skipif(onWindows(), reason="InplaceUpdate uses symlinks,does not run on windows without admin privileges") class TestInplaceUpdate(unittest.TestCase): def test_updateval(self): try: tmp = tempfile.mkdtemp() with open(os.path.join(tmp, "value"), "w") as f: f.write("1") out = tempfile.mkdtemp() self.assertEquals(main(["--outdir", out, get_data('tests/wf/updateval.cwl'), "-r", os.path.join(tmp, "value")]), 0) with open(os.path.join(tmp, "value"), "r") as f: self.assertEquals("1", f.read()) with open(os.path.join(out, "value"), "r") as f: self.assertEquals("2", f.read()) finally: shutil.rmtree(tmp) shutil.rmtree(out) def test_updateval_inplace(self): try: tmp = tempfile.mkdtemp() with open(os.path.join(tmp, "value"), "w") as f: f.write("1") out = tempfile.mkdtemp() self.assertEquals(main(["--enable-ext", "--leave-outputs", "--outdir", out, get_data('tests/wf/updateval_inplace.cwl'), "-r", os.path.join(tmp, "value")]), 0) with open(os.path.join(tmp, "value"), "r") as f: self.assertEquals("2", f.read()) self.assertFalse(os.path.exists(os.path.join(out, "value"))) finally: shutil.rmtree(tmp) shutil.rmtree(out) def test_write_write_conflict(self): try: tmp = tempfile.mkdtemp() with open(os.path.join(tmp, "value"), "w") as f: f.write("1") self.assertEquals(main(["--enable-ext", get_data('tests/wf/mut.cwl'), "-a", os.path.join(tmp, "value")]), 1) with open(os.path.join(tmp, "value"), "r") as f: self.assertEquals("2", f.read()) finally: shutil.rmtree(tmp) def test_sequencing(self): try: tmp = tempfile.mkdtemp() with open(os.path.join(tmp, "value"), "w") as f: f.write("1") self.assertEquals(main(["--enable-ext", get_data('tests/wf/mut2.cwl'), "-a", os.path.join(tmp, "value")]), 0) with open(os.path.join(tmp, "value"), "r") as f: self.assertEquals("3", f.read()) finally: shutil.rmtree(tmp) # def test_read_write_conflict(self): # try: # tmp = tempfile.mkdtemp() # with open(os.path.join(tmp, "value"), "w") as f: # f.write("1") # self.assertEquals(main(["--enable-ext", get_data('tests/wf/mut3.cwl'), "-a", os.path.join(tmp, "value")]), 0) # finally: # shutil.rmtree(tmp) def test_updatedir(self): try: tmp = tempfile.mkdtemp() with open(os.path.join(tmp, "value"), "w") as f: f.write("1") out = tempfile.mkdtemp() self.assertFalse(os.path.exists(os.path.join(tmp, "blurb"))) self.assertFalse(os.path.exists(os.path.join(out, "blurb"))) self.assertEquals(main(["--outdir", out, get_data('tests/wf/updatedir.cwl'), "-r", tmp]), 0) self.assertFalse(os.path.exists(os.path.join(tmp, "blurb"))) self.assertTrue(os.path.exists(os.path.join(out, "inp/blurb"))) finally: shutil.rmtree(tmp) shutil.rmtree(out) def test_updatedir_inplace(self): try: tmp = tempfile.mkdtemp() with open(os.path.join(tmp, "value"), "w") as f: f.write("1") out = tempfile.mkdtemp() self.assertFalse(os.path.exists(os.path.join(tmp, "blurb"))) self.assertFalse(os.path.exists(os.path.join(out, "blurb"))) self.assertEquals(main(["--enable-ext", "--leave-outputs", "--outdir", out, get_data('tests/wf/updatedir_inplace.cwl'), "-r", tmp]), 0) self.assertTrue(os.path.exists(os.path.join(tmp, "blurb"))) self.assertFalse(os.path.exists(os.path.join(out, "inp/blurb"))) finally: shutil.rmtree(tmp) shutil.rmtree(out) class TestV1_1backports(unittest.TestCase): @needs_docker def test_require_prefix_networkaccess(self): self.assertEquals(main(["--enable-ext", get_data('tests/wf/networkaccess.cwl')]), 0) self.assertEquals(main([get_data('tests/wf/networkaccess.cwl')]), 1) self.assertEquals(main(["--enable-ext", get_data('tests/wf/networkaccess-fail.cwl')]), 1) @needs_docker def test_require_prefix_workreuse(self): self.assertEquals(main(["--enable-ext", get_data('tests/wf/workreuse.cwl')]), 0) self.assertEquals(main([get_data('tests/wf/workreuse.cwl')]), 1) self.assertEquals(main(["--enable-ext", get_data('tests/wf/workreuse-fail.cwl')]), 1) @windows_needs_docker def test_require_prefix_timelimit(self): self.assertEquals(main(["--enable-ext", get_data('tests/wf/timelimit.cwl')]), 0) self.assertEquals(main([get_data('tests/wf/timelimit.cwl')]), 1) self.assertEquals(main(["--enable-ext", get_data('tests/wf/timelimit-fail.cwl')]), 1)
py
1a58fe8bcc5c312c36026452dd80b57cb7af1622
#!/usr/bin/env python # coding: utf-8 # In[7]: #!/usr/bin/env python # coding: utf-8 # In[1]: """ This script will check for the zip codes format and whether it begins with a 68 for the City of Omaha """ import xml.etree.cElementTree as ET from collections import defaultdict import re import pprint osmfile = 'omaha_nebraska' zip_type_re = re.compile(r'\d{5}$') #5 digit zip code, no dashes def audit_ziptype(zip_types, zipcode): if zipcode[0:2]!= 68: zip_types[zipcode[0:2]].add(zipcode) def is_zipcode(elem): return (elem.attrib['k'] == "addr:postcode") def audit_zip(osmfile): osm_file = open (osmfile, "r") zip_types = defaultdict(set) for event, elem in ET.iterparse(osmfile, events=("start",)): if elem.tag == "node" or elem.tag == "way": for tag in elem.iter("tag"): if is_zipcode(tag): audit_ziptype(zip_types,tag.attrib['v']) osm_file.close() return zip_types zip_print = audit_zip(osmfile) def test(): pprint.pprint(dict(zip_print)) if __name__ == '__main__': test() def update_zipcode(zipcode): """ This function updates the zip codes by replacing the wrong zip codes with fixed ones''' """ if re.findall(r'(^\d{5})-\d{4}$', zipcode): valid_zipcode = re.findall(r'(^\d{5})-\d{4}$',zipcode)[0] return valid_zipcode else: return zipcode def test_zip(): for zips, ways in zip_print.items(): for name in ways: better_name = update_zipcode(name) print (name, "=>", better_name) if __name__ == '__main__': test_zip() # In[ ]:
py
1a58feaf4d020c1e922a81895b72ebd01c7a7f05
class MockCache(): def __init__(self): self.d = {} def set(self, key, value, timeout=0, version=None): self.d[key] = value def has_key(self, k, version=None): return self.d.has_key(k) def get_many(self, keys, version=None): d = {} for k in keys: val = self.get(k, version=version) if val is not None: d[k] = val return d def clear(self): self.d = {} def validate_key(self, key): return True def set_many(self, data, timeout=None, version=None): for key, value in data.items(): self.set(key, value, timeout=timeout, version=version) def get(self, key, default=None, version=None): return self.d.get(key,default) def delete(self, key, version=None): try: del self.d[key] except KeyError: pass class NamespacedCache(object): """ Structure is a __root-keys__ -> ( rootkey1, rootkey2 ) "__keys-list__" + root-key -> ( subkey, subkey.something, subkey.something.a, subkey.a ) main.footer.links main.footer.articles root = main base = footer.links root = main base = footer.articles """ # should not contain the divisor namespace_root_key = "__root-keys__" # contains all the root keys namespace_base_prefix = "__keys-list__" # contains all the base keys for a root key # default to . like namespace.object.specific namespace_divisor = "." def set_cache(self, cache): """ wraps a valid cache and add a simple namespace support """ self.cache = cache def _ns_store(self, key, key_data, version=None): # Stores in a set of keys the current key-data data = self.cache.get(key, set(), version=version) data.add(key_data) self.cache.set(key, data, version=version) def _ns_delete_(self, key, key_remove, version=None): # deletes a key from namespace store data = self.cache.get(key, set(), version=version) try: data.remove(key_remove) except KeyError: pass else: self.cache.set(key, data, version=version) def _store_base_key(self, root, base, version=None): self._ns_store(self.namespace_base_prefix+root, base, version=version) def _store_root_key(self, root, version=None): self._ns_store(self.namespace_root_key, root, version=version) def _split(self, key): sp = key.split(self.namespace_divisor) root = sp[0] base = self.namespace_divisor.join(sp[1:]) return root, base def _get_root_keys(self, root): base_keys = self.cache.get(self.namespace_base_prefix+root, set()) keys = [] for key in base_keys: if key == "": #stored value on the root key keys.append(root) else: keys.append(root+self.namespace_divisor+key) return keys def _get_all_keys(self): root_keys = self.cache.get(self.namespace_root_key, set()) keys = map(lambda k:self._get_root_keys(k), root_keys) return reduce(lambda x,y:x.union(y), keys, set()) def get(self, key, default=None, version=None): return self.cache.get(key=key, default=default, version=None) def set(self, key, value, timeout=0, version=None): """ """ self.cache.set(key, value, timeout=timeout, version=version) root, base = self._split(key) self._store_base_key(root, base, version=version) self._store_root_key(root, version=version) def delete(self, key, version=None): """ this method can't delete namespaced roots, for that used delete_keys """ self.cache.delete(key=key) root, base = self._split(key) self._ns_delete_(self.namespace_base_prefix+root, base, version=version) def get_many(self, keys, version=None): return self.cache.get_many(keys, version) def has_key(self, key, version=None): return self.cache.has_key(key, version=version) def add(self, key, value, timeout=None, version=None): return self.cache.add(self, key, value, timeout=timeout, version=version) def incr(self, key, delta=1, version=None): return self.incr(self, key, delta=delta, version=version) def decr(self, key, delta=1, version=None): return self.decr(self, key, delta=delta, version=version) def set_many(self, data, timeout=None, version=None): # we do a lot of key changing before saving for key, value in data.items(): self.set(key, value, timeout=timeout, version=version) def delete_many(self, keys, version=None): # we do a lot of key changing before deleting for key in keys: self.delete(key, version=version) def clear(self): return self.cache.clear() def validate_key(self, key): return self.cache.validate_key(key) def incr_version(self, key, delta=1, version=None): raise NotImplementedError def decr_version(self, key, delta=1, version=None): raise NotImplementedError def get_keys(self, pattern=None): # pattern # "root" all the sub trees under root + "root" key if present # "root.subroot" all the sub trees under subroot + subroot if present # "root." all the sub tress under root # "root.subroot." all the sub trees under subroot if pattern is None: return list(self._get_all_keys()) root, base = self._split(pattern) keys = self._get_root_keys(root) if root.endswith(self.namespace_divisor): try: keys.remove(root) except ValueError: pass return filter( lambda x:x.startswith(pattern), keys ) def delete_keys(self, pattern=None): keys = self.get_keys(pattern=pattern) for key in keys: self.delete(key) try: from django.core.cache import BaseCache except: BaseCache = object class NamespacedCacheDjango(NamespacedCache, BaseCache): def __init__(self, cache_name, *args, **kwargs): BaseCache.__init__(self, *args, **kwargs) if cache_name == "": cache_name = "default" try: #django > 1.7 support from django.core.cache import caches except ImportError: from django.core.cache import get_cache self.set_cache(get_cache(cache_name)) else: self.set_cache(caches[cache_name])
py
1a58ff42e233001cbbb0ccd3ac20b40c785e949c
import numpy as np from statsmodels.graphics.plottools import rainbow import utils def interaction_plot(x, trace, response, func=np.mean, ax=None, plottype='b', xlabel=None, ylabel=None, colors = [], markers = [], linestyles = [], legendloc='best', legendtitle=None, **kwargs): """ Interaction plot for factor level statistics uses pandas.DataFrame to calculate an `aggregate` statistic for each level of the factor or group given by `trace`. Parameters ---------- x : array-like The `x` factor levels are the x-axis. If a `pandas.Series` is given its name will be used in `xlabel` if `xlabel` is None. trace : array-like The `trace` factor levels will form the trace. If `trace` is a `pandas.Series` its name will be used as the `legendtitle` if `legendtitle` is None. response : array-like The reponse variable. If a `pandas.Series` is given its name will be used in `ylabel` if `ylabel` is None. func : function Anything accepted by `pandas.DataFrame.aggregate`. This is applied to the response variable grouped by the trace levels. plottype : str {'line', 'scatter', 'both'}, optional The type of plot to return. Can be 'l', 's', or 'b' ax : axes, optional Matplotlib axes instance xlabel : str, optional Label to use for `x`. Default is 'X'. If `x` is a `pandas.Series` it will use the series names. ylabel : str, optional Label to use for `response`. Default is 'func of response'. If `response` is a `pandas.Series` it will use the series names. colors : list, optional If given, must have length == number of levels in trace. linestyles : list, optional If given, must have length == number of levels in trace. markers : list, optional If given, must have length == number of lovels in trace kwargs These will be passed to the plot command used either plot or scatter. If you want to control the overall plotting options, use kwargs. Returns ------- fig : Figure The figure given by `ax.figure` or a new instance. Examples -------- >>> import numpy as np >>> np.random.seed(12345) >>> weight = np.random.randint(1,4,size=60) >>> duration = np.random.randint(1,3,size=60) >>> days = np.log(np.random.randint(1,30, size=60)) >>> fig = interaction_plot(weight, duration, days, ... colors=['red','blue'], markers=['D','^'], ms=10) >>> import matplotlib.pyplot as plt >>> plt.show() .. plot:: import numpy as np from statsmodels.graphics.factorplots import interaction_plot np.random.seed(12345) weight = np.random.randint(1,4,size=60) duration = np.random.randint(1,3,size=60) days = np.log(np.random.randint(1,30, size=60)) fig = interaction_plot(weight, duration, days, colors=['red','blue'], markers=['D','^'], ms=10) import matplotlib.pyplot as plt #plt.show() """ from pandas import DataFrame fig, ax = utils.create_mpl_ax(ax) if ylabel is None: try: # did we get a pandas.Series response_name = response.name except: response_name = 'response' #NOTE: py3 compatible? ylabel = '%s of %s' % (func.func_name, response_name) if xlabel is None: try: x_name = x.name except: x_name = 'X' if legendtitle is None: try: legendtitle = trace.name except: legentitle = 'Trace' ax.set_ylabel(ylabel) ax.set_xlabel(x_name) data = DataFrame(dict(x=x, trace=trace, response=response)) plot_data = data.groupby(['trace', 'x']).aggregate(func).reset_index() # check plot args n_trace = len(plot_data['trace'].unique()) if linestyles: try: assert len(linestyles) == n_trace except AssertionError, err: raise ValueError("Must be a linestyle for each trace level") else: # set a default linestyles = ['-'] * n_trace if markers: try: assert len(markers) == n_trace except AssertionError, err: raise ValueError("Must be a linestyle for each trace level") else: # set a default markers = ['.'] * n_trace if colors: try: assert len(colors) == n_trace except AssertionError, err: raise ValueError("Must be a linestyle for each trace level") else: # set a default #TODO: how to get n_trace different colors? colors = rainbow(n_trace) if plottype == 'both' or plottype == 'b': for i, (values, group) in enumerate(plot_data.groupby(['trace'])): # trace label label = str(group['trace'].values[0]) ax.plot(group['x'], group['response'], color=colors[i], marker=markers[i], label=label, linestyle=linestyles[i], **kwargs) elif plottype == 'line' or plottype == 'l': for i, (values, group) in enumerate(plot_data.groupby(['trace'])): # trace label label = str(group['trace'].values[0]) ax.plot(group['x'], group['response'], color=colors[i], label=label, linestyle=linestyles[i], **kwargs) elif plottype == 'scatter' or plottype == 's': for i, (values, group) in enumerate(plot_data.groupby(['trace'])): # trace label label = str(group['trace'].values[0]) ax.scatter(group['x'], group['response'], color=colors[i], label=label, marker=markers[i], **kwargs) else: raise ValueError("Plot type %s not understood" % plottype) ax.legend(loc=legendloc, title=legendtitle) ax.margins(.1) return fig
py
1a58ffd8a518eb162b4896d649086e3e48f85688
import logging import examples.basic.main as basic import sim.docker as docker from sim.core import Environment from sim.faas import FunctionDefinition, FunctionSimulator, FunctionReplica, FunctionRequest from sim.faassim import Simulation logger = logging.getLogger(__name__) def main(): logging.basicConfig(level=logging.INFO) # prepare simulation with topology and benchmark from basic example sim = Simulation(basic.example_topology(), basic.ExampleBenchmark()) # override the SimulatorFactory factory sim.create_simulator_factory = CustomSimulatorFactory # run the simulation sim.run() class CustomSimulatorFactory: def __init__(self) -> None: super().__init__() def create(self, env: Environment, fn: FunctionDefinition) -> FunctionSimulator: return MyFunctionSimulator() class MyFunctionSimulator(FunctionSimulator): def deploy(self, env: Environment, replica: FunctionReplica): # simulate a docker pull command for deploying the function (also done by sim.faassim.DockerDeploySimMixin) yield from docker.pull(env, replica.function.image, replica.node.ether_node) def startup(self, env: Environment, replica: FunctionReplica): logger.info('[simtime=%.2f] starting up function replica for function %s', env.now, replica.function.name) # you could create a very fine-grained setup routines here yield env.timeout(10) # simulate docker startup def setup(self, env: Environment, replica: FunctionReplica): # no setup routine yield env.timeout(0) def invoke(self, env: Environment, replica: FunctionReplica, request: FunctionRequest): # you would probably either create one simulator per function, or use a generalized simulator, this is just # to demonstrate how the simulators are used to encapsulate simulator behavior. logger.info('[simtime=%.2f] invoking function %s on node %s', env.now, request, replica.node.name) if replica.function.name == 'python-pi': if replica.node.name.startswith('rpi3'): # those are nodes we created in basic.example_topology() yield env.timeout(20) # invoking this function takes 20 seconds on a raspberry pi else: yield env.timeout(2) # invoking this function takes 2 seconds on all other nodes in the cluster elif replica.function.name == 'resnet50-inference': yield env.timeout(0.5) # invoking this function takes 500 ms else: yield env.timeout(0) def teardown(self, env: Environment, replica: FunctionReplica): yield env.timeout(0) if __name__ == '__main__': main()
py
1a5900ef0c7e66617bbbf5505f1d96c2be3a4101
from .. utils import TranspileTestCase, BuiltinFunctionTestCase class BinTests(TranspileTestCase): def test_int_but_no_index(self): self.assertCodeExecution(""" class IntLike: def __init__(self, val): self.val = val def __int__(self): return self.val x = IntLike(5) print(bin(x)) """, run_in_function=False) class BuiltinBinFunctionTests(BuiltinFunctionTestCase, TranspileTestCase): functions = ["bin"] not_implemented = [ 'test_int', ]
py
1a5901234b8b202dc80ae438fde4b42200cc8175
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import math import torch import torch.nn.functional as F from fairseq import utils from fairseq.criterions import LegacyFairseqCriterion, register_criterion from fairseq.data import encoders @register_criterion("wsc") class WSCCriterion(LegacyFairseqCriterion): def __init__(self, args, task): super().__init__(args, task) if self.args.save_predictions is not None: self.prediction_h = open(self.args.save_predictions, "w") else: self.prediction_h = None self.bpe = encoders.build_bpe(args) self.tokenizer = encoders.build_tokenizer(args) def __del__(self): if self.prediction_h is not None: self.prediction_h.close() @staticmethod def add_args(parser): """Add criterion-specific arguments to the parser.""" parser.add_argument("--wsc-margin-alpha", type=float, metavar="A", default=1.0) parser.add_argument("--wsc-margin-beta", type=float, metavar="B", default=0.0) parser.add_argument( "--wsc-cross-entropy", action="store_true", help="use cross entropy formulation instead of margin loss", ) parser.add_argument( "--save-predictions", metavar="FILE", help="file to save predictions to" ) def get_masked_input(self, tokens, mask): masked_tokens = tokens.clone() masked_tokens[mask] = self.task.mask return masked_tokens def get_lprobs(self, model, tokens, mask): logits, _ = model(src_tokens=self.get_masked_input(tokens, mask)) lprobs = F.log_softmax(logits, dim=-1, dtype=torch.float) scores = lprobs.gather(2, tokens.unsqueeze(-1)).squeeze(-1) mask = mask.type_as(scores) scores = (scores * mask).sum(dim=-1) / mask.sum(dim=-1) return scores def get_loss(self, query_lprobs, cand_lprobs): if self.args.wsc_cross_entropy: return F.cross_entropy( torch.cat([query_lprobs, cand_lprobs]).unsqueeze(0), query_lprobs.new([0]).long(), ) else: return ( -query_lprobs + self.args.wsc_margin_alpha * (cand_lprobs - query_lprobs + self.args.wsc_margin_beta).clamp(min=0) ).sum() def forward(self, model, sample, reduce=True): # compute loss and accuracy loss, nloss = 0.0, 0 ncorrect, nqueries = 0, 0 for i, label in enumerate(sample["labels"]): query_lprobs = self.get_lprobs( model, sample["query_tokens"][i].unsqueeze(0), sample["query_masks"][i].unsqueeze(0), ) cand_lprobs = self.get_lprobs( model, sample["candidate_tokens"][i], sample["candidate_masks"][i], ) pred = (query_lprobs >= cand_lprobs).all().item() if label is not None: label = 1 if label else 0 ncorrect += 1 if pred == label else 0 nqueries += 1 if label: # only compute a loss for positive instances nloss += 1 loss += self.get_loss(query_lprobs, cand_lprobs) id = sample["id"][i].item() if self.prediction_h is not None: print("{}\t{}\t{}".format(id, pred, label), file=self.prediction_h) if nloss == 0: loss = torch.tensor(0.0, requires_grad=True) sample_size = nqueries if nqueries > 0 else 1 logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["nsentences"], "sample_size": sample_size, "ncorrect": ncorrect, "nqueries": nqueries, } return loss, sample_size, logging_output @staticmethod def aggregate_logging_outputs(logging_outputs): """Aggregate logging outputs from data parallel training.""" loss_sum = sum(log.get("loss", 0) for log in logging_outputs) ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) agg_output = { "loss": loss_sum / sample_size / math.log(2), "ntokens": ntokens, "nsentences": nsentences, "sample_size": sample_size, } ncorrect = sum(log.get("ncorrect", 0) for log in logging_outputs) nqueries = sum(log.get("nqueries", 0) for log in logging_outputs) if nqueries > 0: agg_output["accuracy"] = ncorrect / float(nqueries) return agg_output @register_criterion("winogrande") class WinograndeCriterion(WSCCriterion): def forward(self, model, sample, reduce=True): # compute loss and accuracy query_lprobs = self.get_lprobs( model, sample["query_tokens"], sample["query_masks"], ) cand_lprobs = self.get_lprobs( model, sample["candidate_tokens"], sample["candidate_masks"], ) pred = query_lprobs >= cand_lprobs loss = self.get_loss(query_lprobs, cand_lprobs) sample_size = sample["query_tokens"].size(0) ncorrect = pred.sum().item() logging_output = { "loss": utils.item(loss.data) if reduce else loss.data, "ntokens": sample["ntokens"], "nsentences": sample["nsentences"], "sample_size": sample_size, "ncorrect": ncorrect, "nqueries": sample_size, } return loss, sample_size, logging_output
py
1a5901693ddcfbe3bc8a53b4b87156cb05a2433f
# Generated by Django 2.0.3 on 2018-03-21 17:24 from django.db import migrations class Migration(migrations.Migration): dependencies = [ ('patientbasicinfo', '0009_description'), ] operations = [ migrations.RemoveField( model_name='description', name='identity_fk', ), migrations.DeleteModel( name='Description', ), ]
py
1a5901f8ddf5922a7081bb15a425dba2c28a6fb6
# -*- coding: utf-8 -*- """ author: 左想 date: 2018-01-11 """ import cv2 import random import numpy as np from math import fabs, sin, cos, radians from PIL import Image, ImageDraw, ImageEnhance def img_rotation(file_path, output, degree, is_full): """ 对图片进行旋转,并另存为旋转后的图片; :param file_path: String 图片路径; :param output: String 输出旋转后的图片路径; :param degree: String 旋转角度; :param is_full: Bool 是否保留整张图片进行旋转。 True则在旋转时会将尺寸进行扩大以保留完整的图片; False则在旋转时保留原始图片的尺寸进行旋转; :return: """ im = cv2.imread(file_path, 1) height, width = im.shape[:2] matRotation = cv2.getRotationMatrix2D((width / 2, height / 2), degree, 1) if is_full: height_new = int(width * fabs(sin(radians(degree))) + height * fabs(cos(radians(degree)))) width_new = int(height * fabs(sin(radians(degree))) + width * fabs(cos(radians(degree)))) matRotation[0, 2] += (width_new - width) / 2 # 重点在这步,目前不懂为什么加这步 matRotation[1, 2] += (height_new - height) / 2 # 重点在这步 imgRotation = cv2.warpAffine(im, matRotation, (width_new, height_new), borderMode=cv2.BORDER_REPLICATE) else: imgRotation = cv2.warpAffine(im, matRotation, (width, height), borderMode=cv2.BORDER_REPLICATE) return imgRotation def randomColor(image): """ 对图像进行颜色抖动 :param image: PIL的图像image :return: 有颜色色差的图像image """ random_factor = np.random.randint(0, 21) / 10. # 随机因子 color_image = ImageEnhance.Color(image).enhance(random_factor) # 调整图像的饱和度 random_factor = np.random.randint(3, 15) / 10. # 随机因子 brightness_image = ImageEnhance.Brightness(color_image).enhance(random_factor) # 调整图像的亮度 random_factor = np.random.randint(10, 15) / 10. # 随机因1子 contrast_image = ImageEnhance.Contrast(brightness_image).enhance(random_factor) # 调整图像对比度 random_factor = np.random.randint(0, 21) / 10. # 随机因子 return ImageEnhance.Sharpness(contrast_image).enhance(random_factor) # 调整图像锐度 def randomGaussian(image, mean=0.5, sigma=0.3): """ 对图像进行高斯噪声处理 :param image: :return: """ def gaussianNoisy(im, mean=0.5, sigma=0.3): """ 对图像做高斯噪音处理 :param im: 单通道图像 :param mean: 偏移量 :param sigma: 标准差 :return: """ for _i in range(len(im)): im[_i] += random.gauss(mean, sigma) return im # 将图像转化成数组 img = np.asarray(image) img.flags.writeable = True # 将数组改为读写模式 width, height = img.shape[:2] img_r = gaussianNoisy(img[:, :, 0].flatten(), mean, sigma) img_g = gaussianNoisy(img[:, :, 1].flatten(), mean, sigma) img_b = gaussianNoisy(img[:, :, 2].flatten(), mean, sigma) img[:, :, 0] = img_r.reshape([width, height]) img[:, :, 1] = img_g.reshape([width, height]) img[:, :, 2] = img_b.reshape([width, height]) return Image.fromarray(np.uint8(img)) def translate_coord(left_top, length, width, theta, center=None): """ 根据旋转前矩形坐标以及旋转弧度来计算将矩形旋转弧度之后的顶点坐标 :param left_top: 左下角顶点坐标 :param length: 矩形长度 :param width: 矩形宽度 :param theta: 旋转弧度 :return: 返回四个顶点坐标 """ # 获取左下角顶点坐标 left_down = [left_top[0], left_top[1] + width] # 获取右上角顶点坐标 right_top = [left_top[0] + length, left_top[1]] # 获取右下角顶点坐标 right_down = [left_top[0] + length, left_top[1] + width] # 计算中心点坐标 if center is None: center = [(left_top[0] + right_down[0]) / 2, (left_top[1] + right_down[1]) / 2] # 计算四个顶点旋转后的坐标 right_down_rotation = calculate_rotation_coord(right_down, center, theta) right_top_rotation = calculate_rotation_coord(right_top, center, theta) left_down_rotation = calculate_rotation_coord(left_down, center, theta) left_top_rotation = calculate_rotation_coord(left_top, center, theta) return left_top_rotation, left_down_rotation, right_top_rotation, right_down_rotation def calculate_rotation_coord(point, center, theta): """ 计算一个点以另一个点为中心,旋转theta弧度后的坐标 :param point: 旋转前点的坐标 :param center: 旋转中心坐标 :param theta: 旋转弧度 :return: 返回旋转后点的坐标 """ # 计算旋转之后点的坐标 right_rotation_x = (point[0] - center[0]) * cos(theta) - \ (point[1] - center[1]) * sin(theta) + center[0] right_rotation_y = (point[0] - center[0]) * sin(theta) + \ (point[1] - center[1]) * cos(theta) + center[1] return [int(right_rotation_x), int(right_rotation_y)] def draw_box(img, img_save, left_top, left_down, right_top, right_down): """ 根据矩形的四个点的坐标,在图片中画框 :param img: 图片路径 :param img_save: 图片保存路径 :param left_top: 左上顶点坐标 :param left_down: 左下顶点坐标 :param right_top: 右上顶点坐标 :param right_down: 右下顶点坐标 :return: None """ # 打开图片 im = Image.open(img) draw = ImageDraw.Draw(im) # 分别画四条直线,即框出box的位置了 draw.line((left_top[0], left_top[1], left_down[0], left_down[1])) draw.line((left_top[0], left_top[1], right_top[0], right_top[1])) draw.line((right_top[0], right_top[1], right_down[0], right_down[1])) draw.line((left_down[0], left_down[1], right_down[0], right_down[1])) im.save(img_save) def get_color_box(img, height_im, width_im, move_pix=10): """ 给定一个box的长宽以及移动的像素值,从图片的左上角开始移动, 每次通过统计区域中像素颜色h的方差,找出图片h方差最小的区域也就是颜色相近的区域 :param img: 样本图片 :param height_im: 选取区域的长度 :param width_im: 选取区域的宽度 :param move_pix: 移动的像素值大小 :return: 返回图片颜色相近区域的起始位置,该区域颜色的反色rgb值 """ im = cv2.imread(img) # 将rgb值转化为hsv值 hsv_im = cv2.cvtColor(im, cv2.COLOR_BGR2HSV) # rgb矩阵 rgb_array = np.array(im) # hsv矩阵 hsv_array = np.array(hsv_im) height, width, chanel = hsv_array.shape # 计算box需要移动的次数 width_times = int((width - width_im) / move_pix + 1) height_times = int((height - height_im) / move_pix + 1) # 定义统计方差的list var_result = np.ndarray([height_times, width_times], dtype=np.float32) # 开始移动box for i in range(height_times): for j in range(width_times): # 计算box的起始位置 begin_height = i * move_pix end_height = begin_height + height_im + 1 begin_width = j * move_pix end_width = begin_width + width_im + 1 # 获取到box对应的hsv数组 hsv_box = hsv_array[begin_height:end_height, begin_width:end_width, :] # 计算box内的hsv中h的方差 box_color_count = statistic_color(hsv_box) var_result[i, j] = box_color_count # 找出方差最小的box所在的行和列 min_row, min_col = np.where(var_result == np.min(var_result)) # 随机从符合的位置中选取一个 rand_number = random.randint(0, len(min_row)-1) # 计算box对应的起始位置 height_location_begin = min_row[rand_number] * move_pix height_location_end = height_location_begin + height_im width_location_begin = min_col[rand_number] * move_pix width_location_end = width_location_begin + width_im # 获取到box对应的rgb数组 rgb_box = rgb_array[height_location_begin:height_location_end, width_location_begin:width_location_end, :] # 获取box的里的反色 diff_max_rgb = get_diff_color(rgb_box) return [[height_location_begin, height_location_end], [width_location_begin, width_location_end]], diff_max_rgb def statistic_color(color_array): """ 主要是获取box内的hsv值中h的方差 :param color_array: box对应的矩阵 :return: 返回box的hsv值中h的方差 """ h_value = color_array[:, :, 0] s_value = color_array[:, :, 1] variance = np.var(h_value) + np.var(s_value) return variance def get_diff_color(color_array): """ 主要是获取当前box的rgb值的均值,再求均值的反色 :param color_array: 当前box的rgb矩阵 :return: 当前box的反色的rgb值 """ r_mean = np.mean(color_array[:, :, 0]) g_mean = np.mean(color_array[:, :, 1]) b_mean = np.mean(color_array[:, :, 2]) return (int(255-r_mean), int(255-g_mean), int(255-b_mean)) def save_image_use_cv2(image, path): cv2.imwrite(path, image) def save_image_use_pil(image, path): image.save(path)
py
1a590211dc268e3b496d821a43593cf6e24e3c9d
import numpy as np import pytest from docarray import DocumentArray from docarray.array.qdrant import DocumentArrayQdrant from docarray.array.sqlite import DocumentArraySqlite from docarray.array.annlite import DocumentArrayAnnlite, AnnliteConfig from docarray.array.storage.qdrant import QdrantConfig from docarray.array.storage.weaviate import WeaviateConfig from docarray.array.weaviate import DocumentArrayWeaviate from docarray.array.elastic import DocumentArrayElastic, ElasticConfig @pytest.mark.parametrize( 'cls', [ DocumentArray, DocumentArraySqlite, DocumentArrayAnnlite, DocumentArrayWeaviate, DocumentArrayQdrant, DocumentArrayElastic, ], ) @pytest.mark.parametrize( 'content_attr', ['texts', 'embeddings', 'tensors', 'blobs', 'contents'] ) def test_content_empty_getter_return_none(cls, content_attr, start_storage): if cls in [ DocumentArrayAnnlite, DocumentArrayWeaviate, DocumentArrayQdrant, DocumentArrayElastic, ]: da = cls(config={'n_dim': 3}) else: da = cls() assert getattr(da, content_attr) is None @pytest.mark.parametrize( 'cls', [ DocumentArray, DocumentArraySqlite, DocumentArrayAnnlite, DocumentArrayWeaviate, DocumentArrayQdrant, DocumentArrayElastic, ], ) @pytest.mark.parametrize( 'content_attr', [ ('texts', ''), ('embeddings', np.array([])), ('tensors', np.array([])), ('blobs', []), ('contents', []), ], ) def test_content_empty_setter(cls, content_attr, start_storage): if cls in [ DocumentArrayAnnlite, DocumentArrayWeaviate, DocumentArrayQdrant, DocumentArrayElastic, ]: da = cls(config={'n_dim': 3}) else: da = cls() setattr(da, content_attr[0], content_attr[1]) assert getattr(da, content_attr[0]) is None @pytest.mark.parametrize( 'cls,config', [ (DocumentArray, None), (DocumentArraySqlite, None), (DocumentArrayAnnlite, AnnliteConfig(n_dim=128)), (DocumentArrayWeaviate, WeaviateConfig(n_dim=128)), (DocumentArrayQdrant, QdrantConfig(n_dim=128)), (DocumentArrayElastic, ElasticConfig(n_dim=128)), ], ) @pytest.mark.parametrize( 'content_attr', [ ('texts', ['s'] * 10), ('tensors', np.random.random([10, 2])), ('blobs', [b's'] * 10), ], ) def test_content_getter_setter(cls, content_attr, config, start_storage): if config: da = cls.empty(10, config=config) else: da = cls.empty(10) setattr(da, content_attr[0], content_attr[1]) np.testing.assert_equal(da.contents, content_attr[1]) da.contents = content_attr[1] np.testing.assert_equal(da.contents, content_attr[1]) np.testing.assert_equal(getattr(da, content_attr[0]), content_attr[1]) da.contents = None assert da.contents is None @pytest.mark.parametrize('da_len', [0, 1, 2]) @pytest.mark.parametrize( 'da_cls,config', [ (DocumentArray, None), (DocumentArraySqlite, None), (DocumentArrayAnnlite, AnnliteConfig(n_dim=128)), (DocumentArrayWeaviate, WeaviateConfig(n_dim=128)), (DocumentArrayQdrant, QdrantConfig(n_dim=128)), (DocumentArrayElastic, ElasticConfig(n_dim=128)), ], ) def test_content_empty(da_len, da_cls, config, start_storage): if config: da = da_cls.empty(da_len, config=config) else: da = da_cls.empty(da_len) assert not da.contents assert not da.tensors if da_len == 0: assert not da.texts assert not da.blobs else: assert da.texts == [''] * da_len assert da.blobs == [b''] * da_len da.texts = ['hello'] * da_len if da_len == 0: assert not da.contents else: assert da.contents == ['hello'] * da_len assert da.texts == ['hello'] * da_len assert not da.tensors assert da.blobs == [b''] * da_len @pytest.mark.parametrize('da_len', [0, 1, 2]) @pytest.mark.parametrize( 'da_cls,config', [ (DocumentArray, None), (DocumentArraySqlite, None), (DocumentArrayAnnlite, AnnliteConfig(n_dim=5)), (DocumentArrayWeaviate, WeaviateConfig(n_dim=5)), (DocumentArrayQdrant, QdrantConfig(n_dim=5)), (DocumentArrayElastic, ElasticConfig(n_dim=5)), ], ) def test_embeddings_setter(da_len, da_cls, config, start_storage): if config: da = da_cls.empty(da_len, config=config) else: da = da_cls.empty(da_len) da.embeddings = np.random.rand(da_len, 5) for doc in da: assert doc.embedding.shape == (5,)
py
1a59027263416d69d218543d3cf80a29d08f34ff
import enum from collections import OrderedDict, namedtuple from typing import Callable, Tuple, Sequence from gtp import Status, GTPRunner class CommandType(str, enum.Enum): SBOARD = 'sboard' DBOARD = 'dboard' CBOARD = 'cboard' STRING = 'string' HSTRING = 'hstring' HPSTRING = 'hpstring' PSTRING = 'pstring' PLIST = 'plist' PARAM = 'param' PSPAIRS = 'pspairs' VARC = 'varc' GFX = 'gfx' NONE = 'none' GoGuiParam = namedtuple('GoGuiParam', 'name type gogui_type value') class GoGuiParams: def __init__(self, params: Sequence[GoGuiParam]): self.params = OrderedDict() for param in params: self.params[param.name] = (param.type, param.gogui_type, param.value) @property def param_names(self): return self.params.keys() def keys(self): return self.param_names def __getitem__(self, key): if key in self.params: return self.__getattr__(key) raise KeyError def __getattr__(self, name): if name in self.params: param_type, _, param_value = self.params[name] return param_type(param_value) raise AttributeError def update(self, key, value): assert key in self.params param_type, gogui_type, _ = self.params[key] self.params[key] = (param_type, gogui_type, value) def __call__(self, param_name=None, param_value=None): if param_name is None and param_value is None: return Status.success, str(self) self.update(param_name, param_value) return Status.success, "" def __str__(self): return '\n'.join(["[{type}] {param} {value}".format(type=gogui_type, param=param_name, value=value) for (param_name, (param_type, gogui_type, value)) in self.params.items()]) class GoGuiGTPRunner(GTPRunner): def __init__(self): super().__init__() self._analyze_callbacks = [] self.add_callback('gogui_analyze_commands', self.cmd_gogui_analyze_commands, arity=0) def add_analyze_callback(self, command_type: CommandType, command_str: str, callback: Callable[..., Tuple[Status, str]], check_arity=True, display_name: str=None, description: str=None) -> None: command_tokens = command_str.split() self._assert_command_tokens(command_tokens) self._analyze_callbacks.append("%s/%s/%s" % (command_type.value, display_name or command_str, command_str)) arity = len(command_tokens) - 1 if check_arity else None if command_tokens[0] not in self.list_commands: self.add_callback(command_tokens[0], callback, arity=arity, description=description) def cmd_gogui_analyze_commands(self, *_) -> Tuple[Status, str]: return Status.success, "\n".join(self._analyze_callbacks) @staticmethod def _assert_command_tokens(command_tokens) -> None: assert len(command_tokens) > 0 for param in command_tokens[1:]: assert param in {'%s', '%p', '%c', '%w', '%r'}
py
1a59033186c850dd24c706c4fe5b3bfeb3fd665e
# -*- coding: utf-8 -*- # # Copyright (C) 2004-2020 Edgewall Software # Copyright (C) 2004 Francois Harvey <[email protected]> # Copyright (C) 2005 Matthew Good <[email protected]> # All rights reserved. # # This software is licensed as described in the file COPYING, which # you should have received as part of this distribution. The terms # are also available at https://trac.edgewall.org/wiki/TracLicense. # # This software consists of voluntary contributions made by many # individuals. For the exact contribution history, see the revision # history and logs, available at https://trac.edgewall.org/log/. # # Author: Francois Harvey <[email protected]> # Matthew Good <[email protected]> import os.path from trac.config import ConfigurationError, Option, ParsingError, \ PathOption, UnicodeConfigParser from trac.core import Component, TracError, implements from trac.perm import IPermissionPolicy from trac.util import pathjoin, to_list from trac.util.text import exception_to_unicode from trac.versioncontrol.api import RepositoryManager def parent_iter(path): while 1: yield path if path == '/': return path = path[:-1] yield path idx = path.rfind('/') path = path[:idx + 1] def parse(authz_file, modules): """Parse a Subversion authorization file. Return a dict of modules, each containing a dict of paths, each containing a dict mapping users to permissions. Only modules contained in `modules` are retained. """ parser = UnicodeConfigParser(ignorecase_option=False) parser.read(authz_file) groups = {} aliases = {} sections = {} for section in parser.sections(): if section == 'groups': for name, value in parser.items(section): groups.setdefault(name, set()).update(to_list(value)) elif section == 'aliases': for name, value in parser.items(section): aliases[name] = value.strip() else: for name, value in parser.items(section): parts = section.split(':', 1) module, path = parts[0] if len(parts) > 1 else '', parts[-1] if module in modules: sections.setdefault((module, path), []) \ .append((name, value)) def resolve(subject, done): if subject.startswith('@'): done.add(subject) for members in groups[subject[1:]] - done: for each in resolve(members, done): yield each elif subject.startswith('&'): yield aliases[subject[1:]] else: yield subject authz = {} for (module, path), items in sections.iteritems(): section = authz.setdefault(module, {}).setdefault(path, {}) for subject, perms in items: readable = 'r' in perms # Ordering isn't significant; any entry could grant permission section.update((user, readable) for user in resolve(subject, set()) if not section.get(user)) return authz class AuthzSourcePolicy(Component): """Permission policy for `source:` and `changeset:` resources using a Subversion authz file. `FILE_VIEW` and `BROWSER_VIEW` permissions are granted as specified in the authz file. `CHANGESET_VIEW` permission is granted for changesets where `FILE_VIEW` is granted on at least one modified file, as well as for empty changesets. """ implements(IPermissionPolicy) authz_file = PathOption('svn', 'authz_file', '', """The path to the Subversion [%(svnbook)s authorization (authz) file]. To enable authz permission checking, the `AuthzSourcePolicy` permission policy must be added to `[trac] permission_policies`. Non-absolute paths are relative to the Environment `conf` directory. """, doc_args={'svnbook': 'http://svnbook.red-bean.com/en/1.7/' 'svn.serverconfig.pathbasedauthz.html'}) authz_module_name = Option('svn', 'authz_module_name', '', """The module prefix used in the `authz_file` for the default repository. If left empty, the global section is used. """) _handled_perms = frozenset([(None, 'BROWSER_VIEW'), (None, 'CHANGESET_VIEW'), (None, 'FILE_VIEW'), (None, 'LOG_VIEW'), ('source', 'BROWSER_VIEW'), ('source', 'FILE_VIEW'), ('source', 'LOG_VIEW'), ('changeset', 'CHANGESET_VIEW')]) def __init__(self): self._mtime = 0 self._authz = {} self._users = set() # IPermissionPolicy methods def check_permission(self, action, username, resource, perm): realm = resource.realm if resource else None if (realm, action) in self._handled_perms: authz, users = self._get_authz_info() if authz is None: return False if username == 'anonymous': usernames = '$anonymous', '*' else: usernames = username, '$authenticated', '*' if resource is None: return True if users & set(usernames) else None rm = RepositoryManager(self.env) try: repos = rm.get_repository(resource.parent.id) except TracError: return True # Allow error to be displayed in the repo index if repos is None: return True modules = [resource.parent.id or self.authz_module_name] if modules[0]: modules.append('') def check_path_0(spath): sections = [authz.get(module, {}).get(spath) for module in modules] sections = [section for section in sections if section] denied = False for user in usernames: for section in sections: if user in section: if section[user]: return True denied = True # Don't check section without module name # because the section with module name defines # the user's permissions. break if denied: # All users has no readable permission. return False def check_path(path): path = '/' + pathjoin(repos.scope, path) if path != '/': path += '/' # Allow access to parent directories of allowed resources for spath in set(sum((list(authz.get(module, {})) for module in modules), [])): if spath.startswith(path): result = check_path_0(spath) if result is True: return True # Walk from resource up parent directories for spath in parent_iter(path): result = check_path_0(spath) if result is not None: return result if realm == 'source': return check_path(resource.id) elif realm == 'changeset': changes = list(repos.get_changeset(resource.id).get_changes()) if not changes or any(check_path(change[0]) for change in changes): return True def _get_authz_info(self): if not self.authz_file: self.log.error("The [svn] authz_file configuration option in " "trac.ini is empty or not defined") raise ConfigurationError() try: mtime = os.path.getmtime(self.authz_file) except OSError as e: self.log.error("Error accessing svn authz permission policy " "file: %s", exception_to_unicode(e)) raise ConfigurationError() if mtime != self._mtime: self._mtime = mtime rm = RepositoryManager(self.env) modules = set(repos.reponame for repos in rm.get_real_repositories()) if '' in modules and self.authz_module_name: modules.add(self.authz_module_name) modules.add('') self.log.info("Parsing authz file: %s", self.authz_file) try: self._authz = parse(self.authz_file, modules) except ParsingError as e: self.log.error("Error parsing svn authz permission policy " "file: %s", exception_to_unicode(e)) raise ConfigurationError() else: self._users = {user for paths in self._authz.itervalues() for path in paths.itervalues() for user, result in path.iteritems() if result} return self._authz, self._users
py
1a5903b398b73ce4642737c65e7ff71e6d6ac9f3
# -*- coding: utf-8 -*- """ pygments.lexers.sas ~~~~~~~~~~~~~~~~~~~ Lexer for SAS. :copyright: Copyright 2006-2020 by the Pygments team, see AUTHORS. :license: BSD, see LICENSE for details. """ import re from pygments.lexer import RegexLexer, include, words from pygments.token import Comment, Keyword, Name, Number, String, Text, \ Other, Generic __all__ = ['SASLexer'] class SASLexer(RegexLexer): """ For `SAS <http://www.sas.com/>`_ files. .. versionadded:: 2.2 """ # Syntax from syntax/sas.vim by James Kidd <[email protected]> name = 'SAS' aliases = ['sas'] filenames = ['*.SAS', '*.sas'] mimetypes = ['text/x-sas', 'text/sas', 'application/x-sas'] flags = re.IGNORECASE | re.MULTILINE builtins_macros = ( "bquote", "nrbquote", "cmpres", "qcmpres", "compstor", "datatyp", "display", "do", "else", "end", "eval", "global", "goto", "if", "index", "input", "keydef", "label", "left", "length", "let", "local", "lowcase", "macro", "mend", "nrquote", "nrstr", "put", "qleft", "qlowcase", "qscan", "qsubstr", "qsysfunc", "qtrim", "quote", "qupcase", "scan", "str", "substr", "superq", "syscall", "sysevalf", "sysexec", "sysfunc", "sysget", "syslput", "sysprod", "sysrc", "sysrput", "then", "to", "trim", "unquote", "until", "upcase", "verify", "while", "window" ) builtins_conditionals = ( "do", "if", "then", "else", "end", "until", "while" ) builtins_statements = ( "abort", "array", "attrib", "by", "call", "cards", "cards4", "catname", "continue", "datalines", "datalines4", "delete", "delim", "delimiter", "display", "dm", "drop", "endsas", "error", "file", "filename", "footnote", "format", "goto", "in", "infile", "informat", "input", "keep", "label", "leave", "length", "libname", "link", "list", "lostcard", "merge", "missing", "modify", "options", "output", "out", "page", "put", "redirect", "remove", "rename", "replace", "retain", "return", "select", "set", "skip", "startsas", "stop", "title", "update", "waitsas", "where", "window", "x", "systask" ) builtins_sql = ( "add", "and", "alter", "as", "cascade", "check", "create", "delete", "describe", "distinct", "drop", "foreign", "from", "group", "having", "index", "insert", "into", "in", "key", "like", "message", "modify", "msgtype", "not", "null", "on", "or", "order", "primary", "references", "reset", "restrict", "select", "set", "table", "unique", "update", "validate", "view", "where" ) builtins_functions = ( "abs", "addr", "airy", "arcos", "arsin", "atan", "attrc", "attrn", "band", "betainv", "blshift", "bnot", "bor", "brshift", "bxor", "byte", "cdf", "ceil", "cexist", "cinv", "close", "cnonct", "collate", "compbl", "compound", "compress", "cos", "cosh", "css", "curobs", "cv", "daccdb", "daccdbsl", "daccsl", "daccsyd", "dacctab", "dairy", "date", "datejul", "datepart", "datetime", "day", "dclose", "depdb", "depdbsl", "depsl", "depsyd", "deptab", "dequote", "dhms", "dif", "digamma", "dim", "dinfo", "dnum", "dopen", "doptname", "doptnum", "dread", "dropnote", "dsname", "erf", "erfc", "exist", "exp", "fappend", "fclose", "fcol", "fdelete", "fetch", "fetchobs", "fexist", "fget", "fileexist", "filename", "fileref", "finfo", "finv", "fipname", "fipnamel", "fipstate", "floor", "fnonct", "fnote", "fopen", "foptname", "foptnum", "fpoint", "fpos", "fput", "fread", "frewind", "frlen", "fsep", "fuzz", "fwrite", "gaminv", "gamma", "getoption", "getvarc", "getvarn", "hbound", "hms", "hosthelp", "hour", "ibessel", "index", "indexc", "indexw", "input", "inputc", "inputn", "int", "intck", "intnx", "intrr", "irr", "jbessel", "juldate", "kurtosis", "lag", "lbound", "left", "length", "lgamma", "libname", "libref", "log", "log10", "log2", "logpdf", "logpmf", "logsdf", "lowcase", "max", "mdy", "mean", "min", "minute", "mod", "month", "mopen", "mort", "n", "netpv", "nmiss", "normal", "note", "npv", "open", "ordinal", "pathname", "pdf", "peek", "peekc", "pmf", "point", "poisson", "poke", "probbeta", "probbnml", "probchi", "probf", "probgam", "probhypr", "probit", "probnegb", "probnorm", "probt", "put", "putc", "putn", "qtr", "quote", "ranbin", "rancau", "ranexp", "rangam", "range", "rank", "rannor", "ranpoi", "rantbl", "rantri", "ranuni", "repeat", "resolve", "reverse", "rewind", "right", "round", "saving", "scan", "sdf", "second", "sign", "sin", "sinh", "skewness", "soundex", "spedis", "sqrt", "std", "stderr", "stfips", "stname", "stnamel", "substr", "sum", "symget", "sysget", "sysmsg", "sysprod", "sysrc", "system", "tan", "tanh", "time", "timepart", "tinv", "tnonct", "today", "translate", "tranwrd", "trigamma", "trim", "trimn", "trunc", "uniform", "upcase", "uss", "var", "varfmt", "varinfmt", "varlabel", "varlen", "varname", "varnum", "varray", "varrayx", "vartype", "verify", "vformat", "vformatd", "vformatdx", "vformatn", "vformatnx", "vformatw", "vformatwx", "vformatx", "vinarray", "vinarrayx", "vinformat", "vinformatd", "vinformatdx", "vinformatn", "vinformatnx", "vinformatw", "vinformatwx", "vinformatx", "vlabel", "vlabelx", "vlength", "vlengthx", "vname", "vnamex", "vtype", "vtypex", "weekday", "year", "yyq", "zipfips", "zipname", "zipnamel", "zipstate" ) tokens = { 'root': [ include('comments'), include('proc-data'), include('cards-datalines'), include('logs'), include('general'), (r'.', Text), ], # SAS is multi-line regardless, but * is ended by ; 'comments': [ (r'^\s*\*.*?;', Comment), (r'/\*.*?\*/', Comment), (r'^\s*\*(.|\n)*?;', Comment.Multiline), (r'/[*](.|\n)*?[*]/', Comment.Multiline), ], # Special highlight for proc, data, quit, run 'proc-data': [ (r'(^|;)\s*(proc \w+|data|run|quit)[\s;]', Keyword.Reserved), ], # Special highlight cards and datalines 'cards-datalines': [ (r'^\s*(datalines|cards)\s*;\s*$', Keyword, 'data'), ], 'data': [ (r'(.|\n)*^\s*;\s*$', Other, '#pop'), ], # Special highlight for put NOTE|ERROR|WARNING (order matters) 'logs': [ (r'\n?^\s*%?put ', Keyword, 'log-messages'), ], 'log-messages': [ (r'NOTE(:|-).*', Generic, '#pop'), (r'WARNING(:|-).*', Generic.Emph, '#pop'), (r'ERROR(:|-).*', Generic.Error, '#pop'), include('general'), ], 'general': [ include('keywords'), include('vars-strings'), include('special'), include('numbers'), ], # Keywords, statements, functions, macros 'keywords': [ (words(builtins_statements, prefix = r'\b', suffix = r'\b'), Keyword), (words(builtins_sql, prefix = r'\b', suffix = r'\b'), Keyword), (words(builtins_conditionals, prefix = r'\b', suffix = r'\b'), Keyword), (words(builtins_macros, prefix = r'%', suffix = r'\b'), Name.Builtin), (words(builtins_functions, prefix = r'\b', suffix = r'\('), Name.Builtin), ], # Strings and user-defined variables and macros (order matters) 'vars-strings': [ (r'&[a-z_]\w{0,31}\.?', Name.Variable), (r'%[a-z_]\w{0,31}', Name.Function), (r'\'', String, 'string_squote'), (r'"', String, 'string_dquote'), ], 'string_squote': [ ('\'', String, '#pop'), (r'\\\\|\\"|\\\n', String.Escape), # AFAIK, macro variables are not evaluated in single quotes # (r'&', Name.Variable, 'validvar'), (r'[^$\'\\]+', String), (r'[$\'\\]', String), ], 'string_dquote': [ (r'"', String, '#pop'), (r'\\\\|\\"|\\\n', String.Escape), (r'&', Name.Variable, 'validvar'), (r'[^$&"\\]+', String), (r'[$"\\]', String), ], 'validvar': [ (r'[a-z_]\w{0,31}\.?', Name.Variable, '#pop'), ], # SAS numbers and special variables 'numbers': [ (r'\b[+-]?([0-9]+(\.[0-9]+)?|\.[0-9]+|\.)(E[+-]?[0-9]+)?i?\b', Number), ], 'special': [ (r'(null|missing|_all_|_automatic_|_character_|_n_|' r'_infile_|_name_|_null_|_numeric_|_user_|_webout_)', Keyword.Constant), ], # 'operators': [ # (r'(-|=|<=|>=|<|>|<>|&|!=|' # r'\||\*|\+|\^|/|!|~|~=)', Operator) # ], }
py
1a5903b7951653de054f1422bb86d159562071d3
#!/usr/bin/env python """ """ # ============================================================================== # --General imports ------------------------------------------------------------ # ============================================================================== from time import sleep import math import numpy as np import sys import copy sys.path.append('./HiddenMarkovModel') from HiddenMarkovModel.HMM_MODEL import * # ============================================================================== # -- ROS imports --------------------------------------------------------------- # ============================================================================== import rospy import os sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) from perception.msg import Object from perception.msg import ObjectsList from perception.msg import WaypointsList from perception.msg import Waypoint from perception.msg import TrajectoriesList from perception.srv import DrivingPaths, DrivingPathsRequest, DrivingPathsResponse from prediction.msg import VehiclesCollisionEvent from prediction.msg import VehiclesCollisionEventList from prediction.msg import PedestrianCollisionEvent from prediction.msg import PedestrianCollisionEventList sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__)))) # ============================================================================== # -- Implementation ------------------------------------------------------------ # ============================================================================== MAX_SEQ_LENGTH_MEMORY = 5 MAX_STATE_SEQ_LENGTH_MEMORY = 1 GLOBAL_DRAW = True state_i = 0 class VehicleState: def __init__(self, vehicle, angle): self.vehicle = [vehicle] self.angles = [angle] self.vehicle_id = vehicle.object_id self.trajectories = [] self.possible_trajectory_id = [] self.state = [] self.observations_seq = [] def v_pop(self, index=0): if len(self.vehicle) > MAX_SEQ_LENGTH_MEMORY: self.angles.pop(index) self.vehicle.pop(index) def s_pop(self, index=0): if len(self.state) > MAX_STATE_SEQ_LENGTH_MEMORY: self.state.pop(index) def ob_pop(self, index=0): if len(self.observations_seq) > MAX_STATE_SEQ_LENGTH_MEMORY: self.observations_seq.pop(index) class MotionPrediction: def __init__(self): self.vehicles_state_list = [] self.vehicles_id = [] self.trajectories_length = 40.0 self.HMM_MODEL = HMM_MODEL() self.vehicles_collision_events_info = [] self.pedestrians_collision_events_info = [] # --- ROS --- rospy.init_node('Prediction_node', anonymous=True) self.ego_vehicle = Object() self.vehicles_list = [] self.pedestrians_list = [] self.traffic_signs_loc = [] self.ego_trajectory = [] self.subscriber_ego_vehicle = rospy.Subscriber("ego_vehicle_msg", Object, self.callback_ego_vehicle, queue_size=1) self.subscriber_ego_trajectory = rospy.Subscriber("ego_trajectory_msg", WaypointsList, self.callback_ego_trajectory, queue_size=1) self.subscriber_vehicles_list = rospy.Subscriber('vehicles_list_msg', ObjectsList, self.callback_vehicles_list, queue_size=1) self.subscriber_pedestrians_list = rospy.Subscriber('pedestrians_list_msg', ObjectsList, self.callback_pedestrians_list, queue_size=1) self.subscriber_signs_location = rospy.Subscriber("signs_location_msg", WaypointsList, self.callback_signs_location, queue_size=1) self.pub_vehicles_collision_info = rospy.Publisher('vehicles_collision_info_msg', VehiclesCollisionEventList, queue_size=1) self.pub_pedestrians_collision_info = rospy.Publisher('pedestrians_collision_info_msg', PedestrianCollisionEventList, queue_size=1) # Draw trajectories if GLOBAL_DRAW: self.pub_prototype_trajectories_draw = rospy.Publisher('prototype_trajectories_draw_msg', TrajectoriesList, queue_size=1) self.pub_collision_points_draw = rospy.Publisher('collision_points_draw_msg', WaypointsList, queue_size=1) def track_vehicles_and_save_states(self, vehicles, angles): # For vehicles found for vehicle in vehicles: if vehicle.object_id in self.vehicles_id: index = self.vehicles_id.index(vehicle.object_id) veh_state = self.vehicles_state_list[index] veh_state.vehicle.append(vehicle) veh_state.angles.append(angles[vehicles.index(vehicle)]) veh_state.v_pop(0) else: self.vehicles_state_list.append(VehicleState(vehicle, angles[vehicles.index(vehicle)])) self.vehicles_id.append(vehicle.object_id) # Remove vehicles from list which don't exist any more if not vehicles: self.vehicles_state_list = [] self.vehicles_id = [] existed_vehicles_id = [veh.object_id for veh in vehicles] #print(existed_vehicles_id) if len(existed_vehicles_id) != 0: self.vehicles_state_list = [self.vehicles_state_list[i] for i in range(len(self.vehicles_state_list)) if self.vehicles_id[i] in existed_vehicles_id] self.vehicles_id = [v_id for v_id in self.vehicles_id if v_id in existed_vehicles_id] def get_objects_around_vehicle(self, obj_type="vehicle", min_radius=50.0): ego_vehicle = copy.deepcopy(self.ego_vehicle) if obj_type == "vehicle": to_angle = 120.0 - ego_vehicle.speed * 3.6 if ego_vehicle.speed * 3.6 < 30.0 else 90.0 else: to_angle = 80.0 - ego_vehicle.speed * 3.6 if ego_vehicle.speed * 3.6 < 30.0 else 40.0 from_angle = - to_angle t_stop = 3.0 radius = min_radius + ego_vehicle.speed * t_stop objects, angles = self.objects_in_angle_range_and_in_radius(obj_type, from_angle, to_angle, radius) return objects, angles def objects_in_angle_range_and_in_radius(self, object_type, from_angle=-90.0, to_angle=90.0, radius=20.0): """ Method to find all the objects of a type like vehicles, pedestrians,etc between two angles (from_angle -> to_angle) in relation to vehicle coordinate system :param ego_vehicle: The self driving vehicle :param object_type: The object type, vehicles, pedestrians, traffic signs etc. :param from_angle: Start angle in relation to vehicle coordinate system in degrees in the interval [-180, 180) :param to_angle: The final angle in relation to vehicle coordinate system in degrees in the interval [-180, 180) :param radius: The max radius in which the object need to be """ ego_vehicle = copy.deepcopy(self.ego_vehicle) if object_type == "vehicle": objects_list = copy.deepcopy(self.vehicles_list) elif object_type == "pedestrian": objects_list = copy.deepcopy(self.pedestrians_list) else: return [], [] if len(objects_list) == 0: return [], [] target_objects = [] angle_list = [] for an_object in objects_list: x = an_object.x - ego_vehicle.x y = an_object.y - ego_vehicle.y theta = math.degrees(math.atan2(y, x)) % 360.0 theta = theta - ego_vehicle.yaw theta = theta % 360.0 theta = theta - 360.0 if theta > 180.0 else theta rel_dist = math.hypot(an_object.y - ego_vehicle.y, an_object.x - ego_vehicle.x) if from_angle <= theta <= to_angle and rel_dist < radius: target_objects.append(an_object) # theta = theta + 360 if theta < 0 else theta angle_list.append(theta) return [object_i for object_i in target_objects] if len(target_objects) != 0 else [], angle_list def get_prototype_trajectories_and_vehicles(self): draw_points_flag = True percentage_renew = 0.5 vehicles, angles = self.get_objects_around_vehicle(obj_type="vehicle") self.track_vehicles_and_save_states(vehicles, angles) veh_trajectories = self.client_driving_paths(vehicles) if vehicles: for v_i, vehicle in enumerate(vehicles): index = self.vehicles_id.index(vehicle.object_id) if len(self.vehicles_state_list[index].trajectories) == 0: self.vehicles_state_list[index].trajectories = veh_trajectories[v_i] else: w_e = self.vehicles_state_list[index].trajectories[0][-1] w_b = self.vehicles_state_list[index].trajectories[0][0] dist = math.hypot(vehicle.x - w_e.x, vehicle.y - w_e.y) dist_reverse = math.hypot(vehicle.x - w_b.x, vehicle.y - w_b.y) if dist < percentage_renew*self.trajectories_length or \ dist_reverse > (percentage_renew/4)*self.trajectories_length or \ len(self.vehicles_state_list[index].trajectories) == 1: self.vehicles_state_list[index].trajectories = veh_trajectories[v_i] if GLOBAL_DRAW: self.publish_prototype_trajectories_draw() return [state.vehicle[-1] for state in self.vehicles_state_list], [state.trajectories for state in self.vehicles_state_list] def calculate_trajectories_probability(self): """ For each vehicle saved in data base we calculate the most possible trajectory from the prototypes trajectories to follow. For each vehicle track we take all the past instances of the vehicle and we calculate the minimum distance of each from each trajectory and sum them. Finally the trajectory with the lowest sum is chosen. The most possible trajectory and the position of the vehicle on it is saved in "possible_trajectory_id" """ e_threshold = 0.1 min_probability = 0.3 max_probability = 0.6 trajectories_list = [] probabilities = [] for track in self.vehicles_state_list: x_y = [[vehicle_inst.x, vehicle_inst.y] for vehicle_inst in track.vehicle] min_sums = [] index = [] yaw_cos = [] for trajectory in track.trajectories: dist_min_list = [] idx = 0 for i in range(len(x_y)): min_dist = float("inf") for w in trajectory: dist = math.hypot(w.x - x_y[i][0], w.y - x_y[i][1]) if min_dist > dist: min_dist = dist if i == len(x_y)-1: idx = trajectory.index(w) dist_min_list.append(min_dist) yaw_cos.append(sum([math.cos(math.radians(tr.yaw)) for tr in trajectory[idx:]])/len(trajectory[idx:])) index.append(idx) min_sums.append(sum(dist_min_list)) k1 = abs(math.cos(math.radians(track.vehicle[-1].yaw))) max_yaw = [1-abs(k1 - abs(c_yaw)) for c_yaw in yaw_cos] max_sums = [1-m_s/(max(min_sums)+0.0000001) for m_s in min_sums] max_value = [(max_sums[i]+max_yaw[i]**2) for i in range(len(max_sums))] probability = [m_v/(sum(max_value)+0.000001) for m_v in max_value] max_of_all = max(probability) track.possible_trajectory_id = [] for i in range(len(probability)): if abs(max_of_all-probability[i]) < e_threshold or \ probability[probability.index(max_of_all)] < min_probability or\ probability[i] > max_probability: track.possible_trajectory_id.append([i, index[i]]) if len(track.possible_trajectory_id) == 0: i = probability.index(max_of_all) track.possible_trajectory_id.append([i, index[i]]) trajectories = [] for i in range(len(track.trajectories)): pos_tr = [p_t[0] for p_t in track.possible_trajectory_id] if i in pos_tr: trajectories.append(track.trajectories[i][track.possible_trajectory_id[pos_tr.index(i)][1]:]) track.trajectories = trajectories probabilities.append(probability) trajectories_list.append(trajectories) return trajectories_list, probabilities def get_trajectories_with_stop_constraints(self): min_dist = 3.0 vehicles = [track.vehicle[-1] for track in self.vehicles_state_list] trajectories = [track.trajectories for track in self.vehicles_state_list] traffic_signs_loc = copy.deepcopy(self.traffic_signs_loc) traject_with_constr = [] for signs_loc in traffic_signs_loc: for v_t in trajectories: for trajectory in v_t: for t_loc in trajectory: dist = math.hypot(signs_loc.x - t_loc.x, signs_loc.y - t_loc.y) if dist < min_dist: vehicle = vehicles[trajectories.index(v_t)] v_dist = math.hypot(signs_loc.x - vehicle.x, signs_loc.y - vehicle.y) traject_with_constr.append([trajectories.index(v_t), v_t.index(trajectory), v_dist]) break return traject_with_constr def predict_vehicle_speed(self, traject_with_constr): ego_vehicle = copy.deepcopy(self.ego_vehicle) vehicles_speed = [] speed_probabilities = [] tick_time = 1.0 a_stop = 6.0 a_deceleration = 4.0 a_acceleration = 3.0 global state_i for v_i in range(len(self.vehicles_state_list)): vehicle = self.vehicles_state_list[v_i].vehicle[-1] traf_info_const = [const_info for const_info in traject_with_constr if v_i == const_info[0]] if len(traf_info_const) != 0: observation = self.HMM_MODEL.get_observation(vehicle, traf_info_const, ego_vehicle) #print(observation) self.vehicles_state_list[v_i].observations_seq.append(observation) self.vehicles_state_list[v_i].ob_pop(0) #print(self.vehicles_state_list[v_i].observations_seq) ln_prob, num_seq, curr_state = self.HMM_MODEL.predict_state(vehicle.speed, self.vehicles_state_list[v_i].observations_seq) probability = math.exp(ln_prob) states = [STATE_VECTOR[i] for i in num_seq] #print(states) next_state = states[-1] state_i = OBSERVATION_VECTOR.index(observation) else: probability = 1 next_state = STATE_VECTOR[2] # Steady state curr_state = next_state if next_state == STATE_VECTOR[2]: # Steady state predicted_speed = vehicle.speed elif next_state == STATE_VECTOR[0]: # Stop predicted_speed = vehicle.speed - tick_time*a_stop predicted_speed = 0.0 if predicted_speed < 0.0 else predicted_speed elif next_state == STATE_VECTOR[3]: # Accelerate predicted_speed = vehicle.speed + tick_time * a_acceleration*probability else: # Deceleration predicted_speed = vehicle.speed - tick_time * a_deceleration*probability predicted_speed = 0.0 if predicted_speed < 0.0 else predicted_speed speed_probabilities.append(probability) vehicles_speed.append(predicted_speed) #print(STATE_VECTOR.index(next_state), state_i, round(predicted_speed, 2), round(vehicle.speed, 2), round(probability, 2), STATE_VECTOR.index(curr_state)) return vehicles_speed, speed_probabilities def predict_vehicles_collision(self): ego_vehicle = copy.deepcopy(self.ego_vehicle) draw_points_flag = True ego_vehicle_trajectory = copy.deepcopy(self.ego_trajectory) if len(ego_vehicle_trajectory) == 0: return True vehicles, all_vehicles_paths = self.get_prototype_trajectories_and_vehicles() possible_trajectories, trajectories_probabilities = self.calculate_trajectories_probability() traject_with_constr = self.get_trajectories_with_stop_constraints() predicted_vehicles_speed, speed_probabilities = self.predict_vehicle_speed(traject_with_constr) # angles = [angles[i] for i in range(len(vehicles)) if vehicles[i] in c_vehicles] min_collision_dist = 2.0 t = [0.0] ego_vehicle_speed = ego_vehicle.speed + 0.00000001 dist = 0.0 for i_d in range(len(ego_vehicle_trajectory)-1): dist += math.hypot(ego_vehicle_trajectory[i_d + 1].y - ego_vehicle_trajectory[i_d].y, ego_vehicle_trajectory[i_d + 1].x - ego_vehicle_trajectory[i_d].x) t.append(dist/ego_vehicle_speed if ego_vehicle_speed > 0.0 else 100000.0) t_v = [] for trajectories in possible_trajectories: t_tr = [] other_vehicles_speed = predicted_vehicles_speed[possible_trajectories.index(trajectories)] + 0.001 for trajectory in trajectories: t_t = [0.0] dist = 0.0 for i_d in range(len(trajectory) - 1): dist += math.hypot(trajectory[i_d + 1].y - trajectory[i_d].y, trajectory[i_d + 1].x - trajectory[i_d].x) t_t.append(dist / other_vehicles_speed if other_vehicles_speed > 0.0 else 100000.0) t_tr.append(t_t) t_v.append(t_tr) collision_vehicles = [] for t_tr in t_v: # t_windows = min_collision_dist/vehicles[t_v.index(t_tr)].speed + 0.001 for t_t in t_tr: break_flag = False for time_i in t_t: bigger_than = False for time_j in t: if time_j >= time_i: bigger_than = True w1 = possible_trajectories[t_v.index(t_tr)][t_tr.index(t_t)][t_t.index(time_i)] w2 = ego_vehicle_trajectory[t.index(time_j)] dist = math.hypot(w2.y - w1.y, w2.x - w1.x) if dist < min_collision_dist: break_flag = True collision_time = time_i collision_vehicles.append([t_v.index(t_tr), t_tr.index(t_t), collision_time, w1]) break if not bigger_than and time_i != 0.0: for time_j in t: w1 = possible_trajectories[t_v.index(t_tr)][t_tr.index(t_t)][t_t.index(time_i)] w2 = ego_vehicle_trajectory[t.index(time_j)] dist = math.hypot(w2.y - w1.y, w2.x - w1.x) if dist < min_collision_dist: break_flag = True collision_time = time_i if math.hypot(w2.y - ego_vehicle_trajectory[0].y, w2.x - ego_vehicle_trajectory[0].x) > \ math.hypot(w2.y - possible_trajectories[t_v.index(t_tr)][t_tr.index(t_t)][0].y, w2.x - possible_trajectories[t_v.index(t_tr)][t_tr.index(t_t)][0].x): collision_time = time_j collision_vehicles.append([t_v.index(t_tr), t_tr.index(t_t), collision_time, w1]) break if break_flag: break collision_events_info = [] for collision in collision_vehicles: vehicle = self.vehicles_state_list[collision[0]].vehicle[-1] trajectory_probability = trajectories_probabilities[collision[0]][collision[1]] speed_probability = speed_probabilities[collision[0]] collision_time = collision[2] collision_point = collision[3] predict_probability = trajectory_probability*speed_probability collision_events_info.append([vehicle, collision_point, collision_time, predict_probability]) self.vehicles_collision_events_info = collision_events_info self.publish_vehicles_collision_events() if GLOBAL_DRAW and len(vehicles) != 0 and len(collision_vehicles) != 0: self.publish_collision_points_draw([collision[-1] for collision in collision_vehicles]) def predict_pedestrians_collision(self): ego_vehicle = copy.deepcopy(self.ego_vehicle) radius_sum = 2.0 pedestrians, angles = self.get_objects_around_vehicle(obj_type="pedestrian", min_radius=12) t_col = [] dist_col = [] for pedestrian in pedestrians: t = 100000.0 min_dist = 10000.0 relative_speed_x = pedestrian.vel_x - ego_vehicle.vel_x - math.cos(math.radians(ego_vehicle.yaw))*0.1 relative_speed_y = pedestrian.vel_y - ego_vehicle.vel_y - math.sin(math.radians(ego_vehicle.yaw))*0.1 relative_distance_x = pedestrian.x - ego_vehicle.x relative_distance_y = pedestrian.y - ego_vehicle.y a = relative_speed_x**2 + relative_speed_y**2 + 0.000001 b = 2*(relative_speed_x*relative_distance_x + relative_speed_y*relative_distance_y) c = relative_distance_x**2 + relative_distance_y**2 - radius_sum**2 discriminant = b**2 - 4*a*c if discriminant > 0.0: # Collision happens t0 = (-b - math.sqrt(discriminant)) / (2.0 * a) t1 = (-b + math.sqrt(discriminant)) / (2.0 * a) t = min([t0, t1]) if min([t0, t1]) >= 0.0 else max([t0, t1]) min_dist = 0.0 else: t = -b/(2*a) min_dist = a * (t ** 2) + t * b + c t, min_dist = (100000.0, 10000.0) if t < 0.0 else (t, min_dist) # No collision if t < 0 t_col.append(t) dist_col.append(min_dist) if t_col: collision_event = [pedestrians[dist_col.index(min(dist_col))], angles[dist_col.index(min(dist_col))], t_col[dist_col.index(min(dist_col))], min(dist_col)] else: collision_event = [] self.pedestrians_collision_events_info = [collision_event] self.publish_pedestrians_collision_events() if GLOBAL_DRAW and len(collision_event) != 0 and collision_event[-1] < 2.0: pedestrian_object = collision_event[0] dist_ped = math.hypot(pedestrian_object.x-ego_vehicle.x, pedestrian_object.y-ego_vehicle.y) angle = math.radians(collision_event[1]) dist_ped = dist_ped*math.cos(angle) waypoint = Waypoint() theta = math.radians(ego_vehicle.yaw) waypoint.x = ego_vehicle.x + dist_ped * math.cos(theta) waypoint.y = ego_vehicle.y + dist_ped * math.sin(theta) self.publish_collision_points_draw([waypoint]) # -------- ROS functions --------- def callback_ego_vehicle(self, ros_data): self.ego_vehicle = ros_data def callback_vehicles_list(self, ros_data): self.vehicles_list = ros_data.objects_list def callback_pedestrians_list(self, ros_data): self.pedestrians_list = ros_data.objects_list def callback_signs_location(self, ros_data): self.traffic_signs_loc = ros_data.waypoints_list def callback_ego_trajectory(self, ros_data): self.ego_trajectory = ros_data.waypoints_list def client_driving_paths(self, vehicles): rospy.wait_for_service('driving_paths_srv') rospy.wait_for_service('driving_paths_srv') x_list = [vehicle.x for vehicle in vehicles] y_list = [vehicle.y for vehicle in vehicles] try: driving_paths = rospy.ServiceProxy('driving_paths_srv', DrivingPaths) resp1 = driving_paths(x_list, y_list, self.trajectories_length) veh_trajectories = [] for vehicle in resp1.driving_paths: trajectories = [trajectory.waypoints_list for trajectory in vehicle.trajectories_list] veh_trajectories.append(trajectories) #rospy.loginfo(veh_trajectories) return veh_trajectories except rospy.ServiceException as e: print("Service call failed: %s" % e) def publish_vehicles_collision_events(self): pub = self.pub_vehicles_collision_info collision_event_list = [] for collision_event in self.vehicles_collision_events_info: ros_vehicle_collision_event = VehiclesCollisionEvent() ros_vehicle_collision_event.object = collision_event[0] ros_vehicle_collision_event.collision_point = collision_event[1] ros_vehicle_collision_event.collision_time = collision_event[2] ros_vehicle_collision_event.prediction_probability = collision_event[3] collision_event_list.append(ros_vehicle_collision_event) ros_collision_event_list = VehiclesCollisionEventList() ros_collision_event_list.collision_event_list = collision_event_list #rospy.loginfo(ros_collision_event_list) pub.publish(ros_collision_event_list) def publish_pedestrians_collision_events(self): pub = self.pub_pedestrians_collision_info collision_event_list = [] if self.pedestrians_collision_events_info[0]: for collision_event in self.pedestrians_collision_events_info: ros_collision_event = PedestrianCollisionEvent() ros_collision_event.object = collision_event[0] ros_collision_event.angle = collision_event[1] ros_collision_event.collision_time = collision_event[2] ros_collision_event.collision_distance = collision_event[3] collision_event_list.append(ros_collision_event) ros_collision_event_list = PedestrianCollisionEventList() ros_collision_event_list.collision_event_list = collision_event_list # rospy.loginfo(ros_collision_event) pub.publish(ros_collision_event_list) def publish_prototype_trajectories_draw(self): pub = self.pub_prototype_trajectories_draw trajectories_list = [] for prototype_trajectories in [veh.trajectories for veh in self.vehicles_state_list]: for trajectory in prototype_trajectories: ros_trajectory = WaypointsList() ros_trajectory.waypoints_list = trajectory trajectories_list.append(ros_trajectory) prototype_trajectories = TrajectoriesList() prototype_trajectories.trajectories_list = trajectories_list #rospy.loginfo(prototype_trajectories) pub.publish(prototype_trajectories) def publish_collision_points_draw(self, collision_points): pub = self.pub_collision_points_draw ros_points_list = WaypointsList() ros_points_list.waypoints_list = collision_points #rospy.loginfo(prototype_trajectories) pub.publish(ros_points_list) def main(): motion_prediction = MotionPrediction() try: while not rospy.is_shutdown(): motion_prediction.predict_vehicles_collision() motion_prediction.predict_pedestrians_collision() except rospy.ROSInterruptException: print("Local path planner node failed") pass if __name__ == '__main__': main()
py
1a590401371eedd6547f85a129aa4d1cf0998999
"""Data types for agent-based learning.""" import collections import enum import akro import numpy as np from garage.np import concat_tensor_dict_list, slice_nested_dict # pylint: disable=too-many-lines class EpisodeBatch( collections.namedtuple('EpisodeBatch', [ 'env_spec', 'observations', 'last_observations', 'actions', 'rewards', 'env_infos', 'agent_infos', 'step_types', 'lengths', ])): # pylint: disable=missing-return-doc, missing-return-type-doc, missing-param-doc, missing-type-doc # noqa: E501 r"""A tuple representing a batch of whole episodes. Data type for on-policy algorithms. A :class:`~EpisodeBatch` represents a batch of whole episodes, produced when one or more agents interacts with one or more environments. +-----------------------+-------------------------------------------------+ | Symbol | Description | +=======================+=================================================+ | :math:`N` | Episode batch dimension | +-----------------------+-------------------------------------------------+ | :math:`[T]` | Variable-length time dimension of each | | | episode | +-----------------------+-------------------------------------------------+ | :math:`S^*` | Single-step shape of a time-series tensor | +-----------------------+-------------------------------------------------+ | :math:`N \bullet [T]` | A dimension computed by flattening a | | | variable-length time dimension :math:`[T]` into | | | a single batch dimension with length | | | :math:`sum_{i \in N} [T]_i` | +-----------------------+-------------------------------------------------+ Attributes: env_spec (EnvSpec): Specification for the environment from which this data was sampled. observations (numpy.ndarray): A numpy array of shape :math:`(N \bullet [T], O^*)` containing the (possibly multi-dimensional) observations for all time steps in this batch. These must conform to :obj:`EnvStep.observation_space`. last_observations (numpy.ndarray): A numpy array of shape :math:`(N, O^*)` containing the last observation of each episode. This is necessary since there are one more observations than actions every episode. actions (numpy.ndarray): A numpy array of shape :math:`(N \bullet [T], A^*)` containing the (possibly multi-dimensional) actions for all time steps in this batch. These must conform to :obj:`EnvStep.action_space`. rewards (numpy.ndarray): A numpy array of shape :math:`(N \bullet [T])` containing the rewards for all time steps in this batch. env_infos (dict): A dict of numpy arrays arbitrary environment state information. Each value of this dict should be a numpy array of shape :math:`(N \bullet [T])` or :math:`(N \bullet [T], S^*)`. agent_infos (numpy.ndarray): A dict of numpy arrays arbitrary agent state information. Each value of this dict should be a numpy array of shape :math:`(N \bullet [T])` or :math:`(N \bullet [T], S^*)`. For example, this may contain the hidden states from an RNN policy. step_types (numpy.ndarray): A numpy array of `StepType with shape :math:`(N,)` containing the time step types for all transitions in this batch. lengths (numpy.ndarray): An integer numpy array of shape :math:`(N,)` containing the length of each episode in this batch. This may be used to reconstruct the individual episodes. Raises: ValueError: If any of the above attributes do not conform to their prescribed types and shapes. """ __slots__ = () def __new__(cls, env_spec, observations, last_observations, actions, rewards, env_infos, agent_infos, step_types, lengths): # noqa: D102 # pylint: disable=too-many-branches first_observation = observations[0] first_action = actions[0] inferred_batch_size = lengths.sum() # lengths if len(lengths.shape) != 1: raise ValueError( 'Lengths tensor must be a tensor of shape (N,), but got a ' 'tensor of shape {} instead'.format(lengths.shape)) if not (lengths.dtype.kind == 'u' or lengths.dtype.kind == 'i'): raise ValueError( 'Lengths tensor must have an integer dtype, but got dtype {} ' 'instead.'.format(lengths.dtype)) # observations if not env_spec.observation_space.contains(first_observation): # Discrete actions can be either in the space normally, or one-hot # encoded. if isinstance(env_spec.observation_space, (akro.Box, akro.Discrete, akro.Dict)): if env_spec.observation_space.flat_dim != np.prod( first_observation.shape): raise ValueError('observations should have the same ' 'dimensionality as the observation_space ' '({}), but got data with shape {} ' 'instead'.format( env_spec.observation_space.flat_dim, first_observation.shape)) else: raise ValueError( 'observations must conform to observation_space {}, but ' 'got data with shape {} instead.'.format( env_spec.observation_space, first_observation)) if observations.shape[0] != inferred_batch_size: raise ValueError( 'Expected batch dimension of observations to be length {}, ' 'but got length {} instead.'.format(inferred_batch_size, observations.shape[0])) # observations if not env_spec.observation_space.contains(last_observations[0]): # Discrete actions can be either in the space normally, or one-hot # encoded. if isinstance(env_spec.observation_space, (akro.Box, akro.Discrete, akro.Dict)): if env_spec.observation_space.flat_dim != np.prod( last_observations[0].shape): raise ValueError('last_observations should have the same ' 'dimensionality as the observation_space ' '({}), but got data with shape {} ' 'instead'.format( env_spec.observation_space.flat_dim, last_observations[0].shape)) else: raise ValueError( 'last_observations must conform to observation_space {}, ' 'but got data with shape {} instead.'.format( env_spec.observation_space, last_observations[0])) if last_observations.shape[0] != len(lengths): raise ValueError( 'Expected batch dimension of last_observations to be length ' '{}, but got length {} instead.'.format( len(lengths), last_observations.shape[0])) # actions if not env_spec.action_space.contains(first_action): # Discrete actions can be either in the space normally, or one-hot # encoded. if isinstance(env_spec.action_space, (akro.Box, akro.Discrete, akro.Dict)): if env_spec.action_space.flat_dim != np.prod( first_action.shape): raise ValueError('actions should have the same ' 'dimensionality as the action_space ' '({}), but got data with shape {} ' 'instead'.format( env_spec.action_space.flat_dim, first_action.shape)) else: raise ValueError( 'actions must conform to action_space {}, but got data ' 'with shape {} instead.'.format(env_spec.action_space, first_action)) if actions.shape[0] != inferred_batch_size: raise ValueError( 'Expected batch dimension of actions to be length {}, but got ' 'length {} instead.'.format(inferred_batch_size, actions.shape[0])) # rewards if rewards.shape != (inferred_batch_size, ): raise ValueError( 'Rewards tensor must have shape {}, but got shape {} ' 'instead.'.format(inferred_batch_size, rewards.shape)) # env_infos for key, val in env_infos.items(): if not isinstance(val, (dict, np.ndarray)): raise ValueError( 'Each entry in env_infos must be a numpy array or ' 'dictionary, but got key {} with value type {} instead.'. format(key, type(val))) if (isinstance(val, np.ndarray) and val.shape[0] != inferred_batch_size): raise ValueError( 'Each entry in env_infos must have a batch dimension of ' 'length {}, but got key {} with batch size {} instead.'. format(inferred_batch_size, key, val.shape[0])) # agent_infos for key, val in agent_infos.items(): if not isinstance(val, (dict, np.ndarray)): raise ValueError( 'Each entry in agent_infos must be a numpy array or ' 'dictionary, but got key {} with value type {} instead.' 'instead'.format(key, type(val))) if (isinstance(val, np.ndarray) and val.shape[0] != inferred_batch_size): raise ValueError( 'Each entry in agent_infos must have a batch dimension of ' 'length {}, but got key {} with batch size {} instead.'. format(inferred_batch_size, key, val.shape[0])) # step_types if step_types.shape != (inferred_batch_size, ): raise ValueError( 'step_types tensor must have shape {}, but got shape {} ' 'instead.'.format(inferred_batch_size, step_types.shape)) if step_types.dtype != StepType: raise ValueError( 'step_types tensor must be dtype `StepType`, but got tensor ' 'of dtype {} instead.'.format(step_types.dtype)) return super().__new__(EpisodeBatch, env_spec, observations, last_observations, actions, rewards, env_infos, agent_infos, step_types, lengths) @classmethod def concatenate(cls, *batches): """Create a EpisodeBatch by concatenating EpisodeBatches. Args: batches (list[EpisodeBatch]): Batches to concatenate. Returns: EpisodeBatch: The concatenation of the batches. """ if __debug__: for b in batches: assert (set(b.env_infos.keys()) == set( batches[0].env_infos.keys())) assert (set(b.agent_infos.keys()) == set( batches[0].agent_infos.keys())) env_infos = { k: np.concatenate([b.env_infos[k] for b in batches]) for k in batches[0].env_infos.keys() } agent_infos = { k: np.concatenate([b.agent_infos[k] for b in batches]) for k in batches[0].agent_infos.keys() } return cls( env_spec=batches[0].env_spec, observations=np.concatenate( [batch.observations for batch in batches]), last_observations=np.concatenate( [batch.last_observations for batch in batches]), actions=np.concatenate([batch.actions for batch in batches]), rewards=np.concatenate([batch.rewards for batch in batches]), env_infos=env_infos, agent_infos=agent_infos, step_types=np.concatenate([batch.step_types for batch in batches]), lengths=np.concatenate([batch.lengths for batch in batches])) def split(self): """Split an EpisodeBatch into a list of EpisodeBatches. The opposite of concatenate. Returns: list[EpisodeBatch]: A list of EpisodeBatches, with one episode per batch. """ episodes = [] start = 0 for i, length in enumerate(self.lengths): stop = start + length eps = EpisodeBatch( env_spec=self.env_spec, observations=self.observations[start:stop], last_observations=np.asarray([self.last_observations[i]]), actions=self.actions[start:stop], rewards=self.rewards[start:stop], env_infos=slice_nested_dict(self.env_infos, start, stop), agent_infos=slice_nested_dict(self.agent_infos, start, stop), step_types=self.step_types[start:stop], lengths=np.asarray([length])) episodes.append(eps) start = stop return episodes def to_list(self): """Convert the batch into a list of dictionaries. Returns: list[dict[str, np.ndarray or dict[str, np.ndarray]]]: Keys: * observations (np.ndarray): Non-flattened array of observations. Has shape (T, S^*) (the unflattened state space of the current environment). observations[i] was used by the agent to choose actions[i]. * next_observations (np.ndarray): Non-flattened array of observations. Has shape (T, S^*). next_observations[i] was observed by the agent after taking actions[i]. * actions (np.ndarray): Non-flattened array of actions. Should have shape (T, S^*) (the unflattened action space of the current environment). * rewards (np.ndarray): Array of rewards of shape (T,) (1D array of length timesteps). * agent_infos (dict[str, np.ndarray]): Dictionary of stacked, non-flattened `agent_info` arrays. * env_infos (dict[str, np.ndarray]): Dictionary of stacked, non-flattened `env_info` arrays. * step_types (numpy.ndarray): A numpy array of `StepType with shape (T,) containing the time step types for all transitions in this batch. """ start = 0 episodes = [] for i, length in enumerate(self.lengths): stop = start + length episodes.append({ 'observations': self.observations[start:stop], 'next_observations': np.concatenate((self.observations[1 + start:stop], [self.last_observations[i]])), 'actions': self.actions[start:stop], 'rewards': self.rewards[start:stop], 'env_infos': {k: v[start:stop] for (k, v) in self.env_infos.items()}, 'agent_infos': {k: v[start:stop] for (k, v) in self.agent_infos.items()}, 'step_types': self.step_types[start:stop] }) start = stop return episodes @classmethod def from_list(cls, env_spec, paths): """Create a EpisodeBatch from a list of episodes. Args: env_spec (EnvSpec): Specification for the environment from which this data was sampled. paths (list[dict[str, np.ndarray or dict[str, np.ndarray]]]): Keys: * observations (np.ndarray): Non-flattened array of observations. Typically has shape (T, S^*) (the unflattened state space of the current environment). observations[i] was used by the agent to choose actions[i]. observations may instead have shape (T + 1, S^*). * next_observations (np.ndarray): Non-flattened array of observations. Has shape (T, S^*). next_observations[i] was observed by the agent after taking actions[i]. Optional. Note that to ensure all information from the environment was preserved, observations[i] should have shape (T + 1, S^*), or this key should be set. However, this method is lenient and will "duplicate" the last observation if the original last observation has been lost. * actions (np.ndarray): Non-flattened array of actions. Should have shape (T, S^*) (the unflattened action space of the current environment). * rewards (np.ndarray): Array of rewards of shape (T,) (1D array of length timesteps). * agent_infos (dict[str, np.ndarray]): Dictionary of stacked, non-flattened `agent_info` arrays. * env_infos (dict[str, np.ndarray]): Dictionary of stacked, non-flattened `env_info` arrays. * step_types (numpy.ndarray): A numpy array of `StepType with shape (T,) containing the time step types for all transitions in this batch. """ lengths = np.asarray([len(p['rewards']) for p in paths]) if all( len(path['observations']) == length + 1 for (path, length) in zip(paths, lengths)): last_observations = np.asarray( [p['observations'][-1] for p in paths]) observations = np.concatenate( [p['observations'][:-1] for p in paths]) else: # The number of observations and timesteps must match. observations = np.concatenate([p['observations'] for p in paths]) if paths[0].get('next_observations') is not None: last_observations = np.asarray( [p['next_observations'][-1] for p in paths]) else: last_observations = np.asarray( [p['observations'][-1] for p in paths]) stacked_paths = concat_tensor_dict_list(paths) # Temporary solution. This logic is not needed if algorithms process # step_types instead of dones directly. if 'dones' in stacked_paths and 'step_types' not in stacked_paths: step_types = np.array([ StepType.TERMINAL if done else StepType.MID for done in stacked_paths['dones'] ], dtype=StepType) stacked_paths['step_types'] = step_types del stacked_paths['dones'] return cls(env_spec=env_spec, observations=observations, last_observations=last_observations, actions=stacked_paths['actions'], rewards=stacked_paths['rewards'], env_infos=stacked_paths['env_infos'], agent_infos=stacked_paths['agent_infos'], step_types=stacked_paths['step_types'], lengths=lengths) @property def next_observations(self): """Get the observations seen after actions are performed. Usually, in an :class:`~EpisodeBatch`, next_observations don't need to be stored explicitly, since the next observation is already stored in the batch. Returns: np.ndarray: The "next_observations". """ return np.concatenate( tuple([ np.concatenate((eps.observations[1:], eps.last_observations)) for eps in self.split() ])) class StepType(enum.IntEnum): """Defines the status of a :class:`~TimeStep` within a sequence. Note that the last :class:`~TimeStep` in a sequence can either be :attribute:`StepType.TERMINAL` or :attribute:`StepType.TIMEOUT`. Suppose max_episode_length = 5: * A success sequence terminated at step 4 will look like: FIRST, MID, MID, TERMINAL * A success sequence terminated at step 5 will look like: FIRST, MID, MID, MID, TERMINAL * An unsuccessful sequence truncated by time limit will look like: FIRST, MID, MID, MID, TIMEOUT """ # Denotes the first :class:`~TimeStep` in a sequence. FIRST = 0 # Denotes any :class:`~TimeStep` in the middle of a sequence (i.e. not the # first or last one). MID = 1 # Denotes the last :class:`~TimeStep` in a sequence that terminates # successfully. TERMINAL = 2 # Denotes the last :class:`~TimeStep` in a sequence truncated by time # limit. TIMEOUT = 3 @classmethod def get_step_type(cls, step_cnt, max_episode_length, done): """Determines the step type based on step cnt and done signal. Args: step_cnt (int): current step cnt of the environment. max_episode_length (int): maximum episode length. done (bool): the done signal returned by Environment. Returns: StepType: the step type. Raises: ValueError: if step_cnt is < 1. In this case a environment's `reset()` is likely not called yet and the step_cnt is None. """ if max_episode_length is not None and step_cnt >= max_episode_length: return StepType.TIMEOUT elif done: return StepType.TERMINAL elif step_cnt == 1: return StepType.FIRST elif step_cnt < 1: raise ValueError('Expect step_cnt to be >= 1, but got {} ' 'instead. Did you forget to call `reset(' ')`?'.format(step_cnt)) else: return StepType.MID class TimeStep( collections.namedtuple('TimeStep', [ 'env_spec', 'observation', 'action', 'reward', 'next_observation', 'env_info', 'agent_info', 'step_type' ])): # pylint: disable=missing-return-doc, missing-return-type-doc, missing-param-doc, missing-type-doc # noqa: E501 r"""A tuple representing a single TimeStep. A :class:`~TimeStep` represents a single sample when an agent interacts with an environment. It describes as SARS (State–action–reward–state) tuple that characterizes the evolution of a MDP. Attributes: env_spec (EnvSpec): Specification for the environment from which this data was sampled. observation (numpy.ndarray): A numpy array of shape :math:`(O^*)` containing the observation for the this time step in the environment. These must conform to :obj:`EnvStep.observation_space`. The observation before applying the action. `None` if `step_type` is `StepType.FIRST`, i.e. at the start of a sequence. action (numpy.ndarray): A numpy array of shape :math:`(A^*)` containing the action for the this time step. These must conform to :obj:`EnvStep.action_space`. `None` if `step_type` is `StepType.FIRST`, i.e. at the start of a sequence. reward (float): A float representing the reward for taking the action given the observation, at the this time step. `None` if `step_type` is `StepType.FIRST`, i.e. at the start of a sequence. next_observation (numpy.ndarray): A numpy array of shape :math:`(O^*)` containing the observation for the this time step in the environment. These must conform to :obj:`EnvStep.observation_space`. The observation after applying the action. env_info (dict): A dict arbitrary environment state information. agent_info (dict): A dict of arbitrary agent state information. For example, this may contain the hidden states from an RNN policy. step_type (StepType): a :class:`~StepType` enum value. Can be one of :attribute:`~StepType.FIRST`, :attribute:`~StepType.MID`, :attribute:`~StepType.TERMINAL`, or :attribute:`~StepType.TIMEOUT`. """ @property def first(self): """bool: Whether this step is the first of its episode.""" return self.step_type is StepType.FIRST @property def mid(self): """bool: Whether this step is in the middle of its episode.""" return self.step_type is StepType.MID @property def terminal(self): """bool: Whether this step records a termination condition.""" return self.step_type is StepType.TERMINAL @property def timeout(self): """bool: Whether this step records a timeout condition.""" return self.step_type is StepType.TIMEOUT @property def last(self): """bool: Whether this step is the last of its episode.""" return self.step_type is StepType.TERMINAL or self.step_type \ is StepType.TIMEOUT @classmethod def from_env_step(cls, env_step, last_observation, agent_info): """Create a TimeStep from a EnvStep. Args: env_step (EnvStep): the env step returned by the environment. last_observation (numpy.ndarray): A numpy array of shape :math:`(O^*)` containing the observation for the this time step in the environment. These must conform to :obj:`EnvStep.observation_space`. The observation before applying the action. agent_info (dict): A dict of arbitrary agent state information. Returns: TimeStep: The TimeStep with all information of EnvStep plus the agent info. """ return cls(env_spec=env_step.env_spec, observation=last_observation, action=env_step.action, reward=env_step.reward, next_observation=env_step.observation, env_info=env_step.env_info, agent_info=agent_info, step_type=env_step.step_type) class InOutSpec: """Describes the input and output spaces of a primitive or module. Args: input_space (akro.Space): Input space of a module. output_space (akro.Space): Output space of a module. """ def __init__(self, input_space, output_space): self._input_space = input_space self._output_space = output_space @property def input_space(self): """Get input space of the module. Returns: akro.Space: Input space of the module. """ return self._input_space @property def output_space(self): """Get output space of the module. Returns: akro.Space: Output space of the module. """ return self._output_space class TimeStepBatch( collections.namedtuple('TimeStepBatch', [ 'env_spec', 'observations', 'actions', 'rewards', 'next_observations', 'env_infos', 'agent_infos', 'step_types' ])): # pylint: disable=missing-param-doc, missing-type-doc """A tuple representing a batch of TimeSteps. Data type for off-policy algorithms, imitation learning and batch-RL. Attributes: env_spec (EnvSpec): Specification for the environment from which this data was sampled. observations (numpy.ndarray): Non-flattened array of observations. Typically has shape (batch_size, S^*) (the unflattened state space of the current environment). actions (numpy.ndarray): Non-flattened array of actions. Should have shape (batch_size, S^*) (the unflattened action space of the current environment). rewards (numpy.ndarray): Array of rewards of shape (batch_size, 1). next_observation (numpy.ndarray): Non-flattened array of next observations. Has shape (batch_size, S^*). next_observations[i] was observed by the agent after taking actions[i]. env_infos (dict): A dict arbitrary environment state information. agent_infos (dict): A dict of arbitrary agent state information. For example, this may contain the hidden states from an RNN policy. step_types (numpy.ndarray): A numpy array of `StepType with shape ( batch_size,) containing the time step types for all transitions in this batch. Raises: ValueError: If any of the above attributes do not conform to their prescribed types and shapes. """ __slots__ = () def __new__(cls, env_spec, observations, actions, rewards, next_observations, env_infos, agent_infos, step_types): # noqa: D102 # pylint: disable=missing-return-doc, missing-return-type-doc, # pylint: disable=too-many-branches inferred_batch_size = len(rewards) if inferred_batch_size < 1: raise ValueError( 'Expected batch dimension of rewards to be greater than 1, ' 'but got length {} instead.'.format(inferred_batch_size)) first_observation = observations[0] first_action = actions[0] # observation if not env_spec.observation_space.contains(first_observation): if isinstance(env_spec.observation_space, (akro.Box, akro.Discrete, akro.Dict)): if env_spec.observation_space.flat_dim != np.prod( first_observation.shape): raise ValueError('observations should have the same ' 'dimensionality as the observation_space ' '({}), but got data with shape {} ' 'instead'.format( env_spec.observation_space.flat_dim, first_observation.shape)) else: raise ValueError( 'observations must conform to observation_space {}, ' 'but got data with shape {} instead.'.format( env_spec.observation_space, first_observation.shape)) if observations.shape[0] != inferred_batch_size: raise ValueError( 'Expected batch dimension of observations to be length {}, ' 'but got length {} instead.'.format(inferred_batch_size, observations.shape[0])) # next_observation if not env_spec.observation_space.contains(next_observations[0]): if isinstance(env_spec.observation_space, (akro.Box, akro.Discrete, akro.Dict)): if env_spec.observation_space.flat_dim != np.prod( next_observations[0].shape): raise ValueError('next_observations should have the same ' 'dimensionality as the observation_space ' '({}), but got data with shape {} ' 'instead'.format( env_spec.observation_space.flat_dim, next_observations[0].shape)) else: raise ValueError( 'next_observations must conform to observation_space {}, ' 'but got data with shape {} instead.'.format( env_spec.observation_space, next_observations[0].shape[0])) if next_observations.shape[0] != inferred_batch_size: raise ValueError( 'Expected batch dimension of next_observations to be length {' '}, but got length {} instead.'.format( inferred_batch_size, next_observations[0].shape[0])) # action if not env_spec.action_space.contains(first_action): if isinstance(env_spec.action_space, (akro.Box, akro.Discrete, akro.Dict)): if env_spec.action_space.flat_dim != np.prod( first_action.shape): raise ValueError('actions should have the same ' 'dimensionality as the action_space ' '({}), but got data with shape {} ' 'instead'.format( env_spec.action_space.flat_dim, first_action.shape)) else: raise ValueError('actions must conform to action_space {}, ' 'but got data with shape {} instead.'.format( env_spec.action_space, first_action.shape)) if actions.shape[0] != inferred_batch_size: raise ValueError( 'Expected batch dimension of actions to be length {}, but got ' 'length {} instead.'.format(inferred_batch_size, actions.shape[0])) # rewards if rewards.shape != (inferred_batch_size, 1): raise ValueError( 'Rewards tensor must have shape {}, but got shape {} ' 'instead.'.format((inferred_batch_size, 1), rewards.shape)) # step_types if step_types.shape[0] != inferred_batch_size: raise ValueError( 'Expected batch dimension of step_types to be length {}, ' 'but got ' 'length {} instead.'.format(inferred_batch_size, rewards.shape[0])) for step_type in step_types: if not isinstance(step_type, StepType): raise ValueError( 'Each entry in step_types must be a StepType, but got' ' value type {} instead.'.format(type(step_type))) # env_infos for key, val in env_infos.items(): if not isinstance(val, (dict, np.ndarray)): raise ValueError( 'Each entry in env_infos must be a numpy array or ' 'dictionary, but got key {} with value type {} ' 'instead.'.format(key, type(val))) if (isinstance(val, np.ndarray) and val.shape[0] != inferred_batch_size): raise ValueError( 'Each entry in env_infos must have a batch dimension ' 'of ' 'length {}, but got key {} with batch size {} instead.'. format(inferred_batch_size, key, val.shape[0])) # agent_infos for key, val in agent_infos.items(): if not isinstance(val, (dict, np.ndarray)): raise ValueError( 'Each entry in agent_infos must be a numpy array or ' 'dictionary, but got key {} with value type {} instead.' 'instead'.format(key, type(val))) if (isinstance(val, np.ndarray) and val.shape[0] != inferred_batch_size): raise ValueError( 'Each entry in agent_infos must have a batch ' 'dimension of ' 'length {}, but got key {} with batch size {} instead.'. format(inferred_batch_size, key, val.shape[0])) return super().__new__(TimeStepBatch, env_spec, observations, actions, rewards, next_observations, env_infos, agent_infos, step_types) @classmethod def concatenate(cls, *batches): """Concatenate two or more :class:`TimeStepBatch`s. Args: batches (list[TimeStepBatch]): Batches to concatenate. Returns: TimeStepBatch: The concatenation of the batches. Raises: ValueError: If no TimeStepBatches are provided. """ if len(batches) < 1: raise ValueError('Please provide at least one TimeStepBatch to ' 'concatenate') env_infos = { k: np.concatenate([b.env_infos[k] for b in batches]) for k in batches[0].env_infos.keys() } agent_infos = { k: np.concatenate([b.agent_infos[k] for b in batches]) for k in batches[0].agent_infos.keys() } return cls( env_spec=batches[0].env_spec, observations=np.concatenate( [batch.observations for batch in batches]), actions=np.concatenate([batch.actions for batch in batches]), rewards=np.concatenate([batch.rewards for batch in batches]), next_observations=np.concatenate( [batch.next_observations for batch in batches]), env_infos=env_infos, agent_infos=agent_infos, step_types=np.concatenate([batch.step_types for batch in batches])) def split(self): """Split a :class:`~TimeStepBatch` into a list of :class:`~TimeStepBatch`s. The opposite of concatenate. Returns: list[TimeStepBatch]: A list of :class:`TimeStepBatch`s, with one :class:`~TimeStep` per :class:`~TimeStepBatch`. """ time_steps = [] for i in range(len(self.rewards)): time_step = TimeStepBatch( env_spec=self.env_spec, observations=np.asarray([self.observations[i]]), actions=np.asarray([self.actions[i]]), rewards=np.asarray([self.rewards[i]]), next_observations=np.asarray([self.next_observations[i]]), env_infos={ k: np.asarray([v[i]]) for (k, v) in self.env_infos.items() }, agent_infos={ k: np.asarray([v[i]]) for (k, v) in self.agent_infos.items() }, step_types=np.asarray([self.step_types[i]], dtype=StepType)) time_steps.append(time_step) return time_steps def to_time_step_list(self): """Convert the batch into a list of dictionaries. Breaks the :class:`~TimeStepBatch` into a list of single time step sample dictionaries. len(rewards) (or the number of discrete time step) dictionaries are returned Returns: list[dict[str, np.ndarray or dict[str, np.ndarray]]]: Keys: observations (numpy.ndarray): Non-flattened array of observations. Typically has shape (batch_size, S^*) (the unflattened state space of the current environment). actions (numpy.ndarray): Non-flattened array of actions. Should have shape (batch_size, S^*) (the unflattened action space of the current environment). rewards (numpy.ndarray): Array of rewards of shape ( batch_size,) (1D array of length batch_size). next_observation (numpy.ndarray): Non-flattened array of next observations. Has shape (batch_size, S^*). next_observations[i] was observed by the agent after taking actions[i]. env_infos (dict): A dict arbitrary environment state information. agent_infos (dict): A dict of arbitrary agent state information. For example, this may contain the hidden states from an RNN policy. step_types (numpy.ndarray): A numpy array of `StepType with shape (batch_size,) containing the time step types for all transitions in this batch. """ samples = [] for i in range(len(self.rewards)): samples.append({ 'observations': np.asarray([self.observations[i]]), 'actions': np.asarray([self.actions[i]]), 'rewards': np.asarray([self.rewards[i]]), 'next_observations': np.asarray([self.next_observations[i]]), 'env_infos': {k: np.asarray([v[i]]) for (k, v) in self.env_infos.items()}, 'agent_infos': {k: np.asarray([v[i]]) for (k, v) in self.agent_infos.items()}, 'step_types': np.asarray([self.step_types[i]]), }) return samples @property def terminals(self): """Get an array of boolean indicating ternianal information. Returns: numpy.ndarray: An array of boolean of shape (batch_size, 1) indicating whether the `StepType is `TERMINAL """ return np.array([[s == StepType.TERMINAL] for s in self.step_types]) @classmethod def from_time_step_list(cls, env_spec, ts_samples): """Create a :class:`~TimeStepBatch` from a list of time step dictionaries. Args: env_spec (EnvSpec): Specification for the environment from which this data was sampled. ts_samples (list[dict[str, np.ndarray or dict[str, np.ndarray]]]): keys: * observations (numpy.ndarray): Non-flattened array of observations. Typically has shape (batch_size, S^*) (the unflattened state space of the current environment). * actions (numpy.ndarray): Non-flattened array of actions. Should have shape (batch_size, S^*) (the unflattened action space of the current environment). * rewards (numpy.ndarray): Array of rewards of shape ( batch_size,) (1D array of length batch_size). * next_observation (numpy.ndarray): Non-flattened array of next observations. Has shape (batch_size, S^*). next_observations[i] was observed by the agent after taking actions[i]. * env_infos (dict): A dict arbitrary environment state information. * agent_infos (dict): A dict of arbitrary agent state information. For example, this may contain the hidden states from an RNN policy. * step_types (numpy.ndarray): A numpy array of `StepType with shape (batch_size,) containing the time step types for all transitions in this batch. Returns: TimeStepBatch: The concatenation of samples. Raises: ValueError: If no dicts are provided. """ if len(ts_samples) < 1: raise ValueError('Please provide at least one dict') ts_batches = [ TimeStepBatch(env_spec=env_spec, observations=sample['observations'], actions=sample['actions'], rewards=sample['rewards'], next_observations=sample['next_observations'], env_infos=sample['env_infos'], agent_infos=sample['agent_infos'], step_types=sample['step_types']) for sample in ts_samples ] return TimeStepBatch.concatenate(*ts_batches) @classmethod def from_episode_batch(cls, batch): """Construct a :class:`~TimeStepBatch` from an :class:`~EpisodeBatch`. Args: batch (EpisodeBatch): Episode batch to convert. Returns: TimeStepBatch: The converted batch. """ next_observations = np.concatenate( tuple([ np.concatenate((eps.observations[1:], eps.last_observations)) for eps in batch.split() ])) return cls(env_spec=batch.env_spec, observations=batch.observations, actions=batch.actions, rewards=batch.rewards.reshape(-1, 1), next_observations=next_observations, env_infos=batch.env_infos, agent_infos=batch.agent_infos, step_types=batch.step_types)
py
1a59042cf2dea4686ecce672f433aa7ffee1b42a
#!/usr/local/bin/python3 import argparse import random class GenerationConfig: count = 0 maximum = 0 minimum = 0 def __init__(self, count = 100, maximum = 100, minimum = 0): if count <= 0: raise Exception("Count must be positive!") if minimum >= maximum: raise Exception("Maximum must be greater than minimum!") self.count = count self.maximum = maximum self.minimum = minimum def generate(self): result = [] for i in range(self.count): result.append(random.randint(self.minimum, self.maximum)) return result def process_args(): parser = argparse.ArgumentParser() parser.add_argument("--count", help="number of items to generate", default = 100, required = False) parser.add_argument("--max", help="maximum value of generated items", default = 100, required = False) parser.add_argument("--min", help="maximum value of generated items", default = 0, required = False) args = parser.parse_args() return GenerationConfig(int(args.count), int(args.max), int(args.min)) def generate(generationConfig): return generationConfig.generate() def main(): generationConfig = process_args() data = generate(generationConfig) for item in data: print(item) if __name__ == '__main__': main()
py
1a5904894da575e63be0b62d5a1997595baae7a4
"""Utility functions for tensor operations """ import numpy as np from six.moves import xrange def _check_1d_vector(vector): """Check 1D vector shape Check 1D vector shape. array with shape [n, 1] or [n, ] are accepted. Will return a 1 dimension vector. Parameters ---------- vector : array (n,) or (n, 1) rank one vector Returns ------- vector : array, (n,) """ v_shape = vector.shape if len(v_shape) == 1: return vector elif len(v_shape) == 2 and v_shape[1] == 1: return vector.reshape(v_shape[0],) else: raise ValueError("Vector is not 1-d array: shape %s" % str(v_shape)) def _check_square_matrix(matrix): """Check 2D matrix shape Check 1D vector shape. array with shape [n, 1] or [n, ] are accepted. Will return a 1 dimension vector. Parameters ---------- matrix : (n, n) rank one vector Returns ------- matrix : array, (n, n) """ m_shape = matrix.shape if len(m_shape) == 2: if m_shape[0] != m_shape[1]: raise ValueError("matrix is not square: shape %s" % str(m_shape)) return matrix else: raise ValueError("matrix is not 2-d array: shape %s" % str(m_shape)) def rank_1_tensor_3d(a, b, c): """Generate a 3-D tensor from 3 1-D vectors Generate a 3D tensor from 3 rank one vectors `a`, `b`, and `c`. The returned 3-D tensor is in unfolded format. Parameters ---------- a : array, shape (n,) first rank one vector b : array, shape (n,) second rank one vector c : array, shape (n,) thrid rank one vector Returns ------- tensor: array, (n, n * n) 3D tensor in unfolded format. element (i, j, k) will map to (i, (n * k) + j) """ a = _check_1d_vector(a) b = _check_1d_vector(b) c = _check_1d_vector(c) dim = a.shape[0] # check dimension if (dim != b.shape[0]) or (dim != c.shape[0]): raise ValueError("Vector dimension mismatch: (%d, %d, %d)" % (dim, b.shape[0], c.shape[0])) outter = b[:, np.newaxis] * c[:, np.newaxis].T tensor = a[:, np.newaxis] * outter.ravel(order='F')[np.newaxis, :] return tensor def tensor_3d_from_vector_matrix(a, b): """Generate 3-D tensor from 1-D vector and 2-D matrix Generate a 3D tensor from a 1-D vector `a` and 2-D matrix `b`. The returned 3-D tensor is in unfolded format. Parameters ---------- a : array, shape (m,) 1-D vector b : 2-D array, shape (n, p) 2-D matrix Returns ------- tensor: array, (m, n * p) 3D tensor in unfolded format. """ a = _check_1d_vector(a) tensor = a[:, np.newaxis] * b.ravel(order='F')[np.newaxis, :] return tensor def tensor_3d_from_matrix_vector(b, a): """Generate 3-D tensor from 2-D matrix and 1-D vector This function is similar to `tensor_3d_from_vector_matrix` function. The only difference is the first argument is 2-D matrix and the second element is 1-D vector. Parameters ---------- b : array, shape (m, n) 2-D matrix a : array, shape (p,) vector Returns ------- tensor : array, shape (m, n * p) 3D tensor in unfolded format. """ len_a = a.shape[0] n_col = b.shape[1] tensor = np.tile(b, len_a) for i in xrange(len_a): col_from = n_col * i col_to = n_col * (i+1) tensor[:, col_from:col_to] *= a[i] return tensor def tensor_3d_permute(tensor, tensor_shape, a, b, c): """Permute the mode of a 3-D tensor This is a slow implementation to generate 3-D tensor permutations. Parameters ---------- tensor : 2D array, shape (n, m * k) 3D tensor in unfolded format tensor_shape : int triple Shape of the tensor. Since tensor is in unfolded format. We need it's real format to calculate permutation. a : int, {1, 2, 3} new first index } b : int, {1, 2, 3} new second index c : int, {1, 2, 3} new thrid order index Return ------ permuted_tensor: 2D array Permuted tensor, element (i_1, i_2, i_3) in the permuted tensor will be element (i_a, i_b, i_c) in the original tensor """ # TODO: check parameter a_idx = a - 1 b_idx = b - 1 c_idx = c - 1 # TODO: move this part to cython loop n_col = tensor_shape[1] dim1 = tensor_shape[a_idx] dim2 = tensor_shape[b_idx] dim3 = tensor_shape[c_idx] permuted_tensor = np.empty((dim1, dim2 * dim3)) old_idx = np.zeros(3).astype('int32') for i in xrange(dim1): for j in xrange(dim2): for k in xrange(dim3): old_idx[a_idx] = i old_idx[b_idx] = j old_idx[c_idx] = k old_val = tensor[old_idx[0], (n_col * old_idx[2]) + old_idx[1]] # new index permuted_tensor[i, (dim2 * k) + j] = old_val return permuted_tensor def khatri_rao_prod(a, b): """Khatri-Rao product Generate Khatri-Rao product from 2 2-D matrix. Parameters ---------- a : 2D array, shape (n, k) first matrix b : 2D array, shape (m, k) second matrix Returns ------- matrix : 2D array, shape (n * m, k) Khatri-Rao product of `a` and `b` """ a_row, a_col = a.shape b_row, b_col = b.shape # check column size if a_col != b_col: raise ValueError("column dimension mismatch: %d != %d" % a_col, b_col) matrix = np.empty((a_row * b_row, a_col)) for i in xrange(a_col): matrix[:, i] = np.kron(a[:, i], b[:, i]) return matrix def tensor_3d_prod(tensor, a, b, c): """Calculate product of 3D tensor with matrix on each dimension TODO: move it to test Parameters ---------- tensor : 3D array, shape (n1, n2, n3) a : array, (n1, m) b : array, (n2, n) c : array, (n3, p) Returns ------- t_abc : array, (m, n, p) tensor(a, b, c) """ n1, n2, n3 = tensor.shape n1_, m = a.shape n2_, n = b.shape n3_, p = c.shape # (n1, n2, p) t_c = np.dot(tensor, c) t_bc = np.empty((n1, n, p)) for i in xrange(n1): # (n, p) = (n, n2) * (n2, p) t_bc[i, :, :] = np.dot(b.T, t_c[i, :, :]) t_abc = np.empty((m, n, p)) for i in xrange(p): t_abc[:, :, i] = np.dot(a.T, t_bc[:, :, i]) return t_abc
py
1a5905320f9832d5ae2dfffec8c29e3b2c712a23
# Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. """ Tests for the API /conductors/ methods. """ import datetime import mock from oslo_config import cfg from oslo_utils import timeutils from oslo_utils import uuidutils from six.moves import http_client from ironic.api.controllers import base as api_base from ironic.api.controllers import v1 as api_v1 from ironic.tests.unit.api import base as test_api_base from ironic.tests.unit.objects import utils as obj_utils class TestListConductors(test_api_base.BaseApiTest): def test_empty(self): data = self.get_json( '/conductors', headers={api_base.Version.string: str(api_v1.max_version())}) self.assertEqual([], data['conductors']) def test_list(self): obj_utils.create_test_conductor(self.context, hostname='why care') obj_utils.create_test_conductor(self.context, hostname='why not') data = self.get_json( '/conductors', headers={api_base.Version.string: str(api_v1.max_version())}) self.assertEqual(2, len(data['conductors'])) for c in data['conductors']: self.assertIn('hostname', c) self.assertIn('conductor_group', c) self.assertIn('alive', c) self.assertNotIn('drivers', c) self.assertEqual(data['conductors'][0]['hostname'], 'why care') self.assertEqual(data['conductors'][1]['hostname'], 'why not') def test_list_with_detail(self): obj_utils.create_test_conductor(self.context, hostname='why care') obj_utils.create_test_conductor(self.context, hostname='why not') data = self.get_json( '/conductors?detail=true', headers={api_base.Version.string: str(api_v1.max_version())}) self.assertEqual(2, len(data['conductors'])) for c in data['conductors']: self.assertIn('hostname', c) self.assertIn('drivers', c) self.assertIn('conductor_group', c) self.assertIn('alive', c) self.assertIn('drivers', c) self.assertEqual(data['conductors'][0]['hostname'], 'why care') self.assertEqual(data['conductors'][1]['hostname'], 'why not') def test_list_with_invalid_api(self): response = self.get_json( '/conductors', headers={api_base.Version.string: '1.48'}, expect_errors=True) self.assertEqual(http_client.NOT_FOUND, response.status_int) def test_get_one(self): obj_utils.create_test_conductor(self.context, hostname='rocky.rocks') data = self.get_json( '/conductors/rocky.rocks', headers={api_base.Version.string: str(api_v1.max_version())}) self.assertIn('hostname', data) self.assertIn('drivers', data) self.assertIn('conductor_group', data) self.assertIn('alive', data) self.assertIn('drivers', data) self.assertEqual(data['hostname'], 'rocky.rocks') self.assertTrue(data['alive']) @mock.patch.object(timeutils, 'utcnow', autospec=True) def test_get_one_conductor_offline(self, mock_utcnow): self.config(heartbeat_timeout=10, group='conductor') _time = datetime.datetime(2000, 1, 1, 0, 0) mock_utcnow.return_value = _time obj_utils.create_test_conductor(self.context, hostname='rocky.rocks') mock_utcnow.return_value = _time + datetime.timedelta(seconds=30) data = self.get_json( '/conductors/rocky.rocks', headers={api_base.Version.string: str(api_v1.max_version())}) self.assertIn('hostname', data) self.assertIn('drivers', data) self.assertIn('conductor_group', data) self.assertIn('alive', data) self.assertIn('drivers', data) self.assertEqual(data['hostname'], 'rocky.rocks') self.assertFalse(data['alive']) def test_get_one_with_invalid_api(self): response = self.get_json( '/conductors/rocky.rocks', headers={api_base.Version.string: '1.48'}, expect_errors=True) self.assertEqual(http_client.NOT_FOUND, response.status_int) def test_get_one_custom_fields(self): obj_utils.create_test_conductor(self.context, hostname='rocky.rocks') fields = 'hostname,alive' data = self.get_json( '/conductors/rocky.rocks?fields=%s' % fields, headers={api_base.Version.string: str(api_v1.max_version())}) self.assertItemsEqual(['hostname', 'alive', 'links'], data) def test_get_collection_custom_fields(self): obj_utils.create_test_conductor(self.context, hostname='rocky.rocks') obj_utils.create_test_conductor(self.context, hostname='stein.rocks') fields = 'hostname,alive' data = self.get_json( '/conductors?fields=%s' % fields, headers={api_base.Version.string: str(api_v1.max_version())}) self.assertEqual(2, len(data['conductors'])) for c in data['conductors']: self.assertItemsEqual(['hostname', 'alive', 'links'], c) def test_get_custom_fields_invalid_fields(self): obj_utils.create_test_conductor(self.context, hostname='rocky.rocks') fields = 'hostname,spongebob' response = self.get_json( '/conductors/rocky.rocks?fields=%s' % fields, headers={api_base.Version.string: str(api_v1.max_version())}, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIn('spongebob', response.json['error_message']) def _test_links(self, public_url=None): cfg.CONF.set_override('public_endpoint', public_url, 'api') obj_utils.create_test_conductor(self.context, hostname='rocky.rocks') headers = {api_base.Version.string: str(api_v1.max_version())} data = self.get_json( '/conductors/rocky.rocks', headers=headers) self.assertIn('links', data) self.assertEqual(2, len(data['links'])) self.assertIn('rocky.rocks', data['links'][0]['href']) for l in data['links']: bookmark = l['rel'] == 'bookmark' self.assertTrue(self.validate_link(l['href'], bookmark=bookmark, headers=headers)) if public_url is not None: expected = [{'href': '%s/v1/conductors/rocky.rocks' % public_url, 'rel': 'self'}, {'href': '%s/conductors/rocky.rocks' % public_url, 'rel': 'bookmark'}] for i in expected: self.assertIn(i, data['links']) def test_links(self): self._test_links() def test_links_public_url(self): self._test_links(public_url='http://foo') def test_collection_links(self): conductors = [] for id in range(5): hostname = uuidutils.generate_uuid() conductor = obj_utils.create_test_conductor(self.context, hostname=hostname) conductors.append(conductor.hostname) data = self.get_json( '/conductors/?limit=3', headers={api_base.Version.string: str(api_v1.max_version())}) self.assertEqual(3, len(data['conductors'])) next_marker = data['conductors'][-1]['hostname'] self.assertIn(next_marker, data['next']) def test_collection_links_default_limit(self): cfg.CONF.set_override('max_limit', 3, 'api') conductors = [] for id in range(5): hostname = uuidutils.generate_uuid() conductor = obj_utils.create_test_conductor(self.context, hostname=hostname) conductors.append(conductor.hostname) data = self.get_json( '/conductors', headers={api_base.Version.string: str(api_v1.max_version())}) self.assertEqual(3, len(data['conductors'])) next_marker = data['conductors'][-1]['hostname'] self.assertIn(next_marker, data['next']) def test_collection_links_custom_fields(self): cfg.CONF.set_override('max_limit', 3, 'api') conductors = [] fields = 'hostname,alive' for id in range(5): hostname = uuidutils.generate_uuid() conductor = obj_utils.create_test_conductor(self.context, hostname=hostname) conductors.append(conductor.hostname) data = self.get_json( '/conductors?fields=%s' % fields, headers={api_base.Version.string: str(api_v1.max_version())}) self.assertEqual(3, len(data['conductors'])) next_marker = data['conductors'][-1]['hostname'] self.assertIn(next_marker, data['next']) self.assertIn('fields', data['next']) def test_sort_key(self): conductors = [] for id in range(5): hostname = uuidutils.generate_uuid() conductor = obj_utils.create_test_conductor(self.context, hostname=hostname) conductors.append(conductor.hostname) data = self.get_json( '/conductors?sort_key=hostname', headers={api_base.Version.string: str(api_v1.max_version())}) hosts = [n['hostname'] for n in data['conductors']] self.assertEqual(sorted(conductors), hosts) def test_sort_key_invalid(self): invalid_keys_list = ['alive', 'drivers'] headers = {api_base.Version.string: str(api_v1.max_version())} for invalid_key in invalid_keys_list: response = self.get_json('/conductors?sort_key=%s' % invalid_key, headers=headers, expect_errors=True) self.assertEqual(http_client.BAD_REQUEST, response.status_int) self.assertEqual('application/json', response.content_type) self.assertIn(invalid_key, response.json['error_message'])
py
1a590584f73b10fe4a46d474029b25dcd81ebf96
from statsmodels.stats.proportion import proportion_confint # an example of NCBI alpha = 0.02 total, rate = 964, 0.8959 positives = int(total*rate) low, _ = proportion_confint(positives, total, method='wilson', alpha=alpha) lower_bound = rate - low print('lower bound = {a}'.format(a=lower_bound))
py
1a590597aa1c9d13b890b66b8a5313035dd3ec64
""" Analysis code for IGMSurvey objects """ from __future__ import print_function, absolute_import, division, unicode_literals import numpy as np import glob import json import pdb from astropy.table import Table from linetools import utils as ltu def calc_slgrid_atan(surveys, Agrid, Bgrid, Cgrid, C2grid): """ Calculate the sightline grid for a Atan l(z) fit Breaking this off for bootstrap speed-up Parameters ---------- surveys : list of DLASurvey objects Agrid Bgrid Cgrid C2grid Returns ------- slgrid : ndarray Sightline term in likelihood function """ # Integrating over the sightlines slgrid = np.zeros_like(Agrid) # Int(atan[x-a]) = (a-x) atan(a-x) - 0.5 * ln(a**2 - 2ax + x**2 + 1) for isurvey in surveys: slines = isurvey.sightlines gds = slines['Z_START'] < slines['Z_END'] zstart = slines['Z_START'][gds] zend = slines['Z_END'][gds] # Integrate constant term AAgrid = Agrid * np.sum(zend-zstart) slgrid += AAgrid # Integrate second term for iz in zend: CCgrid = (Cgrid-iz) * np.arctan(Cgrid-iz) - 0.5 * np.log( C2grid - 2*Cgrid*iz + iz**2 + 1) slgrid += Bgrid * CCgrid if np.min(CCgrid) < -0.1: pdb.set_trace() for iz in zstart: CCgrid = (Cgrid-iz) * np.arctan(Cgrid-iz) - 0.5 * np.log( C2grid - 2*Cgrid*iz + iz**2 + 1) slgrid -= Bgrid * CCgrid # Return return slgrid def fit_atan_dla_lz(surveys, nstep=20, bootstrap=True, nboot=10, nproc=2, fit_out=None, boot_out=None, verbose=True): """ Fit a A + B * atan(z-C) l(z) model to AbsSys data Writes bootstrap analysis to hard-drive Code used in Prochaska & Neeleman 2017 for DLAs Parameters ---------- surveys : list of IGMSurvey objects If None, a default list is loaded nstep : int, optional Steps in each dimension of the grid bootstrap : bool, optional Perform bootstrap analysis nboot : int, optional Number of bootstrap iterations nproc : int, optional Number of processors to use fit_out : str, optional Output filename for best fit (JSON) boot_out : str, optional Output filename for bootstrap analysis verbose : bool, optional Returns ------- dfits : dict Best fit parameters boot_tbl : Table Returned if bootstrap=True else return None """ # Name and date # Init if boot_out is None: boot_out = './lz_boot.fits.gz' if fit_out is None: fit_out = './lz_fit.json' # Synthesize all_z = np.concatenate([isurvey.zabs for isurvey in surveys]) ndla = len(all_z) # Model : l(z) = A + B * atan(C-z) Aparm = np.linspace(0.05, 0.5, num=nstep).astype(np.float32) Bparm = np.linspace(0.05, 0.5, num=nstep).astype(np.float32) Cparm = np.linspace(1., 6., num=nstep).astype(np.float32) # Generate grids (float32) Agrid, Bgrid, Cgrid = np.meshgrid(Aparm, Bparm, Cparm, copy=False) C2grid = Cgrid**2 # Sightline grid if verbose: print("Sightline calculation...") slgrid = calc_slgrid_atan(surveys, Agrid, Bgrid, Cgrid, C2grid) if bootstrap: if verbose: print("Bootstrapping!") sv_fits = [] rN = np.random.poisson(ndla, size=nboot) # Boot me z_list = [] for kk,irN in enumerate(rN): # Draw nPoisson rval = (np.random.uniform(size=irN)*ndla).astype(int) # Draw from all_z draw_z = all_z[rval] z_list.append(draw_z) # Run if nproc == 1: for draw_z in z_list: if verbose: print("Working on iteration: {:d} of {:d}".format(kk, nboot)) dfits, _, _ = Ln_lz_atan(Agrid, Bgrid, Cgrid, slgrid, draw_z, write=False) # Save sv_fits.append(dfits.copy()) else: import multiprocessing pool = multiprocessing.Pool(nproc) # initialize thread pool N threads inp_list = [] for ii in range(nboot): inp_list.append( dict(A=Agrid, B=Bgrid, C=Cgrid, sl=slgrid, z=z_list[ii])) if verbose: print("Mapping...") sv_fits = pool.map(map_Ln_atan, inp_list) # Write boot_tbl = Table() for key in ['A', 'B', 'C']: boot_tbl[key] = [ifits['lz']['atan'][key] for ifits in sv_fits] boot_tbl.write(boot_out, overwrite=True) if verbose: print("Wrote {:s}".format(boot_out)) else: boot_tbl = None # Best dfits, _, _ = Ln_lz_atan(Agrid, Bgrid, Cgrid, slgrid, all_z, write=True) # Finish return dfits, boot_tbl def Ln_lz_atan(Agrid, Bgrid, Cgrid, slgrid, all_z, write=True, verbose=True): """ Likelihood function for arctan model Parameters ---------- Agrid Bgrid Cgrid slgrid all_z write Returns ------- dfits : dict Contains best fit model dlagrid : ndarray for debugging lngrid : ndarray """ # z0 estimate from 21cm surveys lz_z0 = dict(value=np.mean([0.026, 0.045]), sig=0.01) # Init dlagrid = np.zeros_like(Agrid) # Generate Likelihood for DLAs np.seterr(invalid='ignore') for z in all_z: dlagrid += np.log(Agrid + Bgrid * np.arctan(z-Cgrid)) bad = np.isnan(dlagrid) dlagrid[bad] = -1e9 # Likelihood lngrid = dlagrid - slgrid # z=0 model_z0 = Agrid + Bgrid * np.arctan(0.-Cgrid) lnP = -1 * (model_z0-lz_z0['value'])**2 / 2 / (lz_z0['sig']**2) lngrid += lnP # Best indices = np.where(lngrid == np.max(lngrid)) best = Agrid[indices][0], Bgrid[indices][0], Cgrid[indices][0] if verbose: print('Best fit: A={}, B={}, C={}'.format(best[0], best[1], best[2])) # Load dfits = {} # Write dfits['lz'] = {} dfits['lz']['atan'] = dict(A=Agrid[indices][0], B=Bgrid[indices][0], C=Cgrid[indices][0], form='A + B*atan(z-C)') # Return return dfits, dlagrid, lngrid def map_Ln_atan(map_dict): """ For multiprocessing the bootstrap Parameters ---------- map_dict Returns ------- """ dfits, _, _ = Ln_lz_atan(map_dict['A'], map_dict['B'], map_dict['C'], map_dict['sl'], map_dict['z'], write=False, verbose=False) return dfits def fit_fN_dblpow(NHI, a3_mnx, a4_mnx, Nd_mnx, nstep=100, Nmin=10**(20.3), Nmax=1e99, verbose=True): """ Fit a double power-law to an input NHI distribution Only does the shape Done in float32 to preserve memory Code from Prochaska & Neeleman (2017) [and also PHW05] Parameters ---------- NHI : ndarray log10 NHI values a3_mnx : tuple min/max of lower NHI power-law a4_mnx : tuple min/max of upper NHI power-law Nd_mnx : tuple min/max of break column in log10 nstep : int, optional Nmin : float, optional Minimum NHI in the analysis [usually DLA criterion] Nmax : float, optional Maximum NHI in the analysis Returns ------- dfits : dict Contains the fit best : tuple Best fit values in grid for Nd, a3, a4 Ndgrid a3grid a4grid lik """ # Generate 1D arrays a3stp = np.linspace(a3_mnx[0], a3_mnx[1], nstep).astype(np.float32) a4stp = np.linspace(a4_mnx[0], a4_mnx[1], nstep).astype(np.float32) Ndstp = np.linspace(Nd_mnx[0], Nd_mnx[1], nstep).astype(np.float32) # Generate grids (float32) a3grid, a4grid, Ndgrid = np.meshgrid(a3stp, a4stp, Ndstp, copy=False) # Linear Ns10 = 10.**Ndgrid # Calculate denominator denom = Ns10 * ((1. - (Nmin / Ns10)**(a3grid + 1.)) / (1. + a3grid) + ( (Nmax / Ns10)**(a4grid + 1) - 1.) / (a4grid + 1.)) num = np.zeros_like(Ns10) # Numerator # Loop on DLAs for iNHI10 in 10.**NHI: # Upper end high = iNHI10 > Ns10 if np.sum(high) > 0: num[high] += a4grid[high] * np.log(iNHI10 / Ns10[high]) # Low end if np.sum(~high) > 0: num[~high] += a3grid[~high] * np.log(iNHI10 / Ns10[~high]) # Liklihood (Beware of Signs!) lik = num - NHI.size * np.log(denom) mxL = np.max(lik) indices = np.where(lik == mxL) best = Ndgrid[indices][0], a3grid[indices][0], a4grid[indices][0] if verbose: print('Best fit: Nd={}, a3={}, a4={}'.format(best[0], best[1], best[2])) # Load dfits = {} # Write dfits['fN'] = {} dfits['fN']['dpow'] = dict(Nd=Ndgrid[indices][0], a3=a3grid[indices][0], a4=a4grid[indices][0], form='(N/Nd)**aa with aa=a3 if N<Nd else aa=a4') # KS Test ks_test = False if ks_test: ns10 = 10**best[0] dblpow_k = 1. / (ns10 * (1. - (Nmin / Ns10)**(best[1] + 1)) / (1. + best[1]) + ( (Nmax / Ns10)**(best[2] + 1) - 1.) / (best[2] + 1)) dblpow_b1 = best[1] dblpow_b2 = best[2] dblpow_nd = ns10 dblpow_nmin = Nmin noise = 0.02 dNHI = 10**(NHI + noise * np.random.uniform(size=NHI.size)) #ksone, darr, 'x_maxdblpow_kscumf', d, ksprob return dfits, best, Ndgrid, a3grid, a4grid, lik
py
1a5907fba701b78ed820ef209fe8cb2ae3c82b60
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging from typing import Any, Optional from flask import g, request, Response from flask_appbuilder.api import expose, permission_name, protect, rison, safe from flask_appbuilder.hooks import before_request from flask_appbuilder.models.sqla.interface import SQLAInterface from flask_babel import ngettext from marshmallow import ValidationError from superset import is_feature_enabled from superset.charts.filters import ChartFilter from superset.constants import MODEL_API_RW_METHOD_PERMISSION_MAP, RouteMethod from superset.dashboards.filters import DashboardAccessFilter from superset.databases.filters import DatabaseFilter from superset.models.reports import ReportSchedule from superset.reports.commands.bulk_delete import BulkDeleteReportScheduleCommand from superset.reports.commands.create import CreateReportScheduleCommand from superset.reports.commands.delete import DeleteReportScheduleCommand from superset.reports.commands.exceptions import ( ReportScheduleBulkDeleteFailedError, ReportScheduleCreateFailedError, ReportScheduleDeleteFailedError, ReportScheduleForbiddenError, ReportScheduleInvalidError, ReportScheduleNotFoundError, ReportScheduleUpdateFailedError, ) from superset.reports.commands.update import UpdateReportScheduleCommand from superset.reports.filters import ReportScheduleAllTextFilter from superset.reports.schemas import ( get_delete_ids_schema, openapi_spec_methods_override, ReportSchedulePostSchema, ReportSchedulePutSchema, ) from superset.views.base_api import ( BaseSupersetModelRestApi, RelatedFieldFilter, statsd_metrics, ) from superset.views.filters import FilterRelatedOwners logger = logging.getLogger(__name__) class ReportScheduleRestApi(BaseSupersetModelRestApi): datamodel = SQLAInterface(ReportSchedule) @before_request def ensure_alert_reports_enabled(self) -> Optional[Response]: if not is_feature_enabled("ALERT_REPORTS"): return self.response_404() return None include_route_methods = RouteMethod.REST_MODEL_VIEW_CRUD_SET | { RouteMethod.RELATED, "bulk_delete", # not using RouteMethod since locally defined } class_permission_name = "ReportSchedule" method_permission_name = MODEL_API_RW_METHOD_PERMISSION_MAP resource_name = "report" allow_browser_login = True show_columns = [ "id", "active", "chart.id", "chart.slice_name", "chart.viz_type", "context_markdown", "creation_method", "crontab", "dashboard.dashboard_title", "dashboard.id", "database.database_name", "database.id", "description", "grace_period", "last_eval_dttm", "last_state", "last_value", "last_value_row_json", "log_retention", "name", "owners.first_name", "owners.id", "owners.last_name", "recipients.id", "recipients.recipient_config_json", "recipients.type", "report_format", "sql", "timezone", "type", "validator_config_json", "validator_type", "working_timeout", ] show_select_columns = show_columns + [ "chart.datasource_id", "chart.datasource_type", ] list_columns = [ "active", "changed_by.first_name", "changed_by.last_name", "changed_on", "changed_on_delta_humanized", "created_by.first_name", "created_by.last_name", "created_on", "creation_method", "crontab", "crontab_humanized", "description", "id", "last_eval_dttm", "last_state", "name", "owners.first_name", "owners.id", "owners.last_name", "recipients.id", "recipients.type", "timezone", "type", ] add_columns = [ "active", "chart", "context_markdown", "creation_method", "crontab", "dashboard", "database", "description", "grace_period", "log_retention", "name", "owners", "recipients", "report_format", "sql", "timezone", "type", "validator_config_json", "validator_type", "working_timeout", ] edit_columns = add_columns add_model_schema = ReportSchedulePostSchema() edit_model_schema = ReportSchedulePutSchema() order_columns = [ "active", "description", "created_by.first_name", "changed_by.first_name", "changed_on", "changed_on_delta_humanized", "created_on", "crontab", "last_eval_dttm", "name", "type", "crontab_humanized", ] search_columns = [ "name", "active", "created_by", "type", "last_state", "creation_method", "dashboard_id", "chart_id", ] search_filters = {"name": [ReportScheduleAllTextFilter]} allowed_rel_fields = {"owners", "chart", "dashboard", "database", "created_by"} filter_rel_fields = { "chart": [["id", ChartFilter, lambda: []]], "dashboard": [["id", DashboardAccessFilter, lambda: []]], "database": [["id", DatabaseFilter, lambda: []]], } text_field_rel_fields = { "dashboard": "dashboard_title", "chart": "slice_name", "database": "database_name", } related_field_filters = { "dashboard": "dashboard_title", "chart": "slice_name", "database": "database_name", "owners": RelatedFieldFilter("first_name", FilterRelatedOwners), } apispec_parameter_schemas = { "get_delete_ids_schema": get_delete_ids_schema, } openapi_spec_tag = "Report Schedules" openapi_spec_methods = openapi_spec_methods_override @expose("/<int:pk>", methods=["DELETE"]) @protect() @safe @statsd_metrics @permission_name("delete") def delete(self, pk: int) -> Response: """Delete a Report Schedule --- delete: description: >- Delete a Report Schedule parameters: - in: path schema: type: integer name: pk description: The report schedule pk responses: 200: description: Item deleted content: application/json: schema: type: object properties: message: type: string 403: $ref: '#/components/responses/403' 404: $ref: '#/components/responses/404' 422: $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' """ try: DeleteReportScheduleCommand(g.user, pk).run() return self.response(200, message="OK") except ReportScheduleNotFoundError: return self.response_404() except ReportScheduleForbiddenError: return self.response_403() except ReportScheduleDeleteFailedError as ex: logger.error( "Error deleting report schedule %s: %s", self.__class__.__name__, str(ex), exc_info=True, ) return self.response_422(message=str(ex)) @expose("/", methods=["POST"]) @protect() @statsd_metrics @permission_name("post") def post(self) -> Response: """Creates a new Report Schedule --- post: description: >- Create a new Report Schedule requestBody: description: Report Schedule schema required: true content: application/json: schema: $ref: '#/components/schemas/{{self.__class__.__name__}}.post' responses: 201: description: Report schedule added content: application/json: schema: type: object properties: id: type: number result: $ref: '#/components/schemas/{{self.__class__.__name__}}.post' 400: $ref: '#/components/responses/400' 401: $ref: '#/components/responses/401' 404: $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500' """ if not request.is_json: return self.response_400(message="Request is not JSON") try: item = self.add_model_schema.load(request.json) # This validates custom Schema with custom validations except ValidationError as error: return self.response_400(message=error.messages) try: new_model = CreateReportScheduleCommand(g.user, item).run() return self.response(201, id=new_model.id, result=item) except ReportScheduleNotFoundError as ex: return self.response_400(message=str(ex)) except ReportScheduleInvalidError as ex: return self.response_422(message=ex.normalized_messages()) except ReportScheduleCreateFailedError as ex: logger.error( "Error creating report schedule %s: %s", self.__class__.__name__, str(ex), exc_info=True, ) return self.response_422(message=str(ex)) @expose("/<int:pk>", methods=["PUT"]) @protect() @safe @statsd_metrics @permission_name("put") def put(self, pk: int) -> Response: """Updates an Report Schedule --- put: description: >- Updates a Report Schedule parameters: - in: path schema: type: integer name: pk description: The Report Schedule pk requestBody: description: Report Schedule schema required: true content: application/json: schema: $ref: '#/components/schemas/{{self.__class__.__name__}}.put' responses: 200: description: Report Schedule changed content: application/json: schema: type: object properties: id: type: number result: $ref: '#/components/schemas/{{self.__class__.__name__}}.put' 400: $ref: '#/components/responses/400' 401: $ref: '#/components/responses/401' 403: $ref: '#/components/responses/403' 404: $ref: '#/components/responses/404' 500: $ref: '#/components/responses/500' """ if not request.is_json: return self.response_400(message="Request is not JSON") try: item = self.edit_model_schema.load(request.json) # This validates custom Schema with custom validations except ValidationError as error: return self.response_400(message=error.messages) try: new_model = UpdateReportScheduleCommand(g.user, pk, item).run() return self.response(200, id=new_model.id, result=item) except ReportScheduleNotFoundError: return self.response_404() except ReportScheduleInvalidError as ex: return self.response_422(message=ex.normalized_messages()) except ReportScheduleForbiddenError: return self.response_403() except ReportScheduleUpdateFailedError as ex: logger.error( "Error updating report %s: %s", self.__class__.__name__, str(ex), exc_info=True, ) return self.response_422(message=str(ex)) @expose("/", methods=["DELETE"]) @protect() @safe @statsd_metrics @rison(get_delete_ids_schema) def bulk_delete(self, **kwargs: Any) -> Response: """Delete bulk Report Schedule layers --- delete: description: >- Deletes multiple report schedules in a bulk operation. parameters: - in: query name: q content: application/json: schema: $ref: '#/components/schemas/get_delete_ids_schema' responses: 200: description: Report Schedule bulk delete content: application/json: schema: type: object properties: message: type: string 401: $ref: '#/components/responses/401' 403: $ref: '#/components/responses/403' 404: $ref: '#/components/responses/404' 422: $ref: '#/components/responses/422' 500: $ref: '#/components/responses/500' """ item_ids = kwargs["rison"] try: BulkDeleteReportScheduleCommand(g.user, item_ids).run() return self.response( 200, message=ngettext( "Deleted %(num)d report schedule", "Deleted %(num)d report schedules", num=len(item_ids), ), ) except ReportScheduleNotFoundError: return self.response_404() except ReportScheduleForbiddenError: return self.response_403() except ReportScheduleBulkDeleteFailedError as ex: return self.response_422(message=str(ex))
py
1a59094e3fd390fb968bbd591a0e3d98209f717d
import traitlets import ipyvuetify as v def set_msg( pts, bands_combo, sources, basename, mosaics, image_size, square_size, driver ): # transform sources in a str source_name = " & ".join(sources) if type(sources) == list else None nb_pts = len(pts) # compute the surface pts_conform = pts.to_crs("ESRI:54009") minx, miny, maxx, maxy = pts_conform.total_bounds surface = (maxx - minx) * (maxy - miny) / 10e6 # in km2 msg = f""" <div> <p> You're about to launch the following downloading : <p> <ul> <li> <b>{nb_pts}</b> points distributed on <b>{surface:.2f}</b> km\u00B2 </li> <li> Using the images coming from <b>{source_name if source_name else driver}</b> <li> Using the <b>{bands_combo}</b> band combination </li> <li> Using <b>{len(mosaics)}</b> different mosaics </li> <li> Using thumbnails of <b>{image_size}x{image_size}</b> m\u00B2 </li> <li> Displaying squares of <b>{square_size}x{square_size}</b> m\u00B2 </li> <li> Saved in a file using <b>{basename}</b> as a basename </li> </ul> <p> If you agree with these input you can start the downloading, if not please change the inputs in the previous panels </p> </div> """ # create a Html widget class MyHTML(v.VuetifyTemplate): template = traitlets.Unicode(msg).tag(sync=True) return MyHTML()
py
1a5909cee990f6b8f721bdafb8de838912d921b1
import copy from membase.helper.cluster_helper import ClusterOperationHelper from couchbase_helper.documentgenerator import BlobGenerator from .xdcrnewbasetests import XDCRNewBaseTest from .xdcrnewbasetests import NodeHelper from .xdcrnewbasetests import Utility, BUCKET_NAME, OPS from remote.remote_util import RemoteMachineShellConnection from lib.memcached.helper.data_helper import MemcachedClientHelper from membase.api.rest_client import RestConnection # Assumption that at least 2 nodes on every cluster class bidirectional(XDCRNewBaseTest): def setUp(self): super(bidirectional, self).setUp() self.src_cluster = self.get_cb_cluster_by_name('C1') self.src_master = self.src_cluster.get_master_node() self.dest_cluster = self.get_cb_cluster_by_name('C2') self.dest_master = self.dest_cluster.get_master_node() def tearDown(self): super(bidirectional, self).tearDown() def __perform_ops_joint_sets(self): # Merging the keys as keys are actually replicated. temp_expires = self._expires self._expires = 0 # Assigning it to 0, so that merge_buckets don't wait for expiration here. self.merge_all_buckets() tasks = [] kv_gen_src = self.src_cluster.get_kv_gen()[OPS.CREATE] gen_update = BlobGenerator(kv_gen_src.name, kv_gen_src.seed, kv_gen_src.value_size, start=0, end=int(kv_gen_src.end * (float)(self._perc_upd) / 100)) gen_delete = BlobGenerator(kv_gen_src.name, kv_gen_src.seed, kv_gen_src.value_size, start=int((kv_gen_src.end) * (float)(100 - self._perc_del) / 100), end=kv_gen_src.end) if "C1" in self._upd_clusters: tasks += self.src_cluster.async_load_all_buckets_from_generator(gen_update, OPS.UPDATE, self._expires) if "C2" in self._upd_clusters: tasks += self.dest_cluster.async_load_all_buckets_from_generator(gen_update, OPS.UPDATE, self._expires) if "C1" in self._del_clusters: tasks += self.src_cluster.async_load_all_buckets_from_generator(gen_delete, OPS.DELETE, 0) if "C2" in self._del_clusters: tasks += self.dest_cluster.async_load_all_buckets_from_generator(gen_delete, OPS.DELETE, 0) for task in tasks: task.result() self._expires = temp_expires if (self._wait_for_expiration and self._expires) and ("C1" in self._upd_clusters or "C2" in self._upd_clusters): self.sleep(self._expires) self.sleep(self._wait_timeout) """Bidirectional replication between two clusters(currently), create-updates-deletes on DISJOINT sets on same bucket.""" def load_with_ops(self): self.setup_xdcr_and_load() self.perform_update_delete() self.verify_results() """Bidirectional replication between two clusters(currently), create-updates-deletes on DISJOINT sets on same bucket. Here running incremental load on both cluster1 and cluster2 as specified by the user/conf file""" def load_with_async_ops(self): self.setup_xdcr_and_load() self.async_perform_update_delete() self.verify_results() """Testing Bidirectional load( Loading at source/destination). Failover node at Source/Destination while Create/Update/Delete are performed in parallel based on doc-ops specified by the user. Verifying whether XDCR replication is successful on subsequent destination clusters. """ def load_with_async_ops_and_joint_sets(self): self.setup_xdcr_and_load() self.async_perform_update_delete() self.verify_results() def load_with_async_ops_with_warmup(self): self.setup_xdcr_and_load() warmupnodes = [] if "C1" in self._warmup: warmupnodes.append(self.src_cluster.warmup_node()) if "C2" in self._warmup: warmupnodes.append(self.dest_cluster.warmup_node()) self.sleep(self._wait_timeout) NodeHelper.wait_warmup_completed(warmupnodes) self.async_perform_update_delete() self.sleep(self._wait_timeout // 2) self.verify_results() def load_with_async_ops_with_warmup_master(self): self.setup_xdcr_and_load() warmupnodes = [] if "C1" in self._warmup: warmupnodes.append(self.src_cluster.warmup_node(master=True)) if "C2" in self._warmup: warmupnodes.append(self.dest_cluster.warmup_node(master=True)) self.sleep(self._wait_timeout) NodeHelper.wait_warmup_completed(warmupnodes) self.async_perform_update_delete() self.sleep(self._wait_timeout // 2) self.verify_results() def load_with_async_ops_and_joint_sets_with_warmup(self): bucket_type = self._input.param("bucket_type", "membase") if bucket_type == "ephemeral": "Test case does not apply for Ephemeral buckets" return self.setup_xdcr_and_load() warmupnodes = [] if "C1" in self._warmup: warmupnodes.append(self.src_cluster.warmup_node()) if "C2" in self._warmup: warmupnodes.append(self.dest_cluster.warmup_node()) self.sleep(self._wait_timeout) self.async_perform_update_delete() self.sleep(self._wait_timeout // 2) NodeHelper.wait_warmup_completed(warmupnodes) self.verify_results() def load_with_async_ops_and_joint_sets_with_warmup_master(self): self.setup_xdcr_and_load() warmupnodes = [] if "C1" in self._warmup: warmupnodes.append(self.src_cluster.warmup_node(master=True)) if "C2" in self._warmup: warmupnodes.append(self.dest_cluster.warmup_node(master=True)) self.sleep(self._wait_timeout) self.async_perform_update_delete() self.sleep(self._wait_timeout // 2) NodeHelper.wait_warmup_completed(warmupnodes) self.verify_results() def load_with_failover(self): self.setup_xdcr_and_load() if "C1" in self._failover: self.src_cluster.failover_and_rebalance_nodes() if "C2" in self._failover: self.dest_cluster.failover_and_rebalance_nodes() self.sleep(self._wait_timeout // 6) self.perform_update_delete() self.sleep(300) self.verify_results() def load_with_failover_then_add_back(self): self.setup_xdcr_and_load() if "C1" in self._failover: self.src_cluster.failover_and_rebalance_nodes(rebalance=False) self.src_cluster.add_back_node() if "C2" in self._failover: self.dest_cluster.failover_and_rebalance_nodes(rebalance=False) self.dest_cluster.add_back_node() self.perform_update_delete() self.verify_results() def load_with_failover_master(self): self.setup_xdcr_and_load() if "C1" in self._failover: self.src_cluster.failover_and_rebalance_master() if "C2" in self._failover: self.dest_cluster.failover_and_rebalance_master() self.sleep(self._wait_timeout // 6) self.perform_update_delete() self.verify_results() """Replication with compaction ddocs and view queries on both clusters. This test begins by loading a given number of items on both clusters. It creates _num_views as development/production view with default map view funcs(_is_dev_ddoc = True by default) on both clusters. Then we disabled compaction for ddoc on src cluster. While we don't reach expected fragmentation for ddoc on src cluster we update docs and perform view queries for all views. Then we start compaction when fragmentation was reached fragmentation_value. When compaction was completed we perform a full verification: wait for the disk queues to drain and then verify that there has been no data loss on both clusters.""" def replication_with_ddoc_compaction(self): bucket_type = self._input.param("bucket_type", "membase") if bucket_type == "ephemeral": self.log.info("Test case does not apply to ephemeral") return self.setup_xdcr() self.src_cluster.load_all_buckets(self._num_items) self.dest_cluster.load_all_buckets(self._num_items) num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) fragmentation_value = self._input.param("fragmentation_value", 80) for bucket in self.src_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = {"full_set": "true", "stale": "false"} tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) for task in tasks: task.result(self._poll_timeout) self.src_cluster.disable_compaction() fragmentation_monitor = self.src_cluster.async_monitor_view_fragmentation(prefix + ddoc_name, fragmentation_value, BUCKET_NAME.DEFAULT) # generate load until fragmentation reached while fragmentation_monitor.state != "FINISHED": # update docs to create fragmentation self.src_cluster.update_delete_data(OPS.UPDATE) for view in views: # run queries to create indexes self.src_cluster.query_view(prefix + ddoc_name, view.name, query) self.dest_cluster.query_view(prefix + ddoc_name, view.name, query) fragmentation_monitor.result() compaction_task = self.src_cluster.async_compact_view(prefix + ddoc_name, 'default') self.assertTrue(compaction_task.result()) self.verify_results() def replication_with_view_queries_and_ops(self): bucket_type = self._input.param("bucket_type", "membase") if bucket_type == "ephemeral": self.log.info("Test case does not apply to ephemeral") return tasks = [] try: self.setup_xdcr() self.src_cluster.load_all_buckets(self._num_items) self.dest_cluster.load_all_buckets(self._num_items) num_views = self._input.param("num_views", 5) is_dev_ddoc = self._input.param("is_dev_ddoc", True) for bucket in self.src_cluster.get_buckets(): views = Utility.make_default_views(bucket.name, num_views, is_dev_ddoc) ddoc_name = "ddoc1" prefix = ("", "dev_")[is_dev_ddoc] query = {"full_set": "true", "stale": "false", "connection_timeout": 60000} tasks = self.src_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) tasks += self.dest_cluster.async_create_views(ddoc_name, views, BUCKET_NAME.DEFAULT) for task in tasks: task.result(self._poll_timeout) tasks = [] # Setting up doc-ops at source nodes if "C1" in self._upd_clusters: tasks.extend(self.src_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires)) if "C1" in self._del_clusters: tasks.extend(self.src_cluster.async_update_delete(OPS.DELETE, self._perc_del)) if "C2" in self._upd_clusters: tasks.extend(self.dest_cluster.async_update_delete(OPS.UPDATE, self._perc_upd, self._expires)) if "C2" in self._del_clusters: tasks.extend(self.dest_cluster.async_update_delete(OPS.DELETE, self._perc_del)) self.sleep(5) while True: for view in views: self.src_cluster.query_view(prefix + ddoc_name, view.name, query) self.dest_cluster.query_view(prefix + ddoc_name, view.name, query) if {task.state for task in tasks} != {"FINISHED"}: continue else: if self._wait_for_expiration: if "C1" in self._upd_clusters or "C2" in self._upd_clusters: self.sleep(self._expires) break self.merge_all_buckets() self.src_cluster.verify_items_count() self.dest_cluster.verify_items_count() tasks = [] src_buckets = self.src_cluster.get_buckets() dest_buckets = self.dest_cluster.get_buckets() for view in views: tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, src_buckets[0].kvs[1].__len__())) tasks.append(self.src_cluster.async_query_view(prefix + ddoc_name, view.name, query, dest_buckets[0].kvs[1].__len__())) for task in tasks: task.result(self._poll_timeout) self.verify_results() finally: # For timeout error, all tasks to be cancelled # Before proceeding to next test for task in tasks: task.cancel() """Replication with disabled/enabled ddoc compaction on both clusters. This test begins by loading a given number of items on both clusters. Then we disabled or enabled compaction on both clusters( set via params). Then we mutate and delete data on clusters 3 times. After deletion we recreate deleted items. When data was changed 3 times we perform a full verification: wait for the disk queues to drain and then verify that there has been no data loss on both clusters.""" def replication_with_disabled_ddoc_compaction(self): self.setup_xdcr() self.src_cluster.load_all_buckets(self._num_items) self.dest_cluster.load_all_buckets(self._num_items) if "C1" in self._disable_compaction: self.src_cluster.disable_compaction() if "C2" in self._disable_compaction: self.dest_cluster.disable_compaction() # perform doc's ops 3 times to increase rev number for _ in range(3): self.async_perform_update_delete() # wait till deletes have been sent to recreate self.sleep(60) # restore(re-creating) deleted items if 'C1' in self._del_clusters: c1_kv_gen = self.src_cluster.get_kv_gen() c1_gen_delete = copy.deepcopy(c1_kv_gen[OPS.DELETE]) if self._expires: # if expiration set, recreate those keys before # trying to update c1_gen_update = copy.deepcopy(c1_kv_gen[OPS.UPDATE]) self.src_cluster.load_all_buckets_from_generator(kv_gen=c1_gen_update) self.src_cluster.load_all_buckets_from_generator(kv_gen=c1_gen_delete) if 'C2' in self._del_clusters: c2_kv_gen = self.dest_cluster.get_kv_gen() c2_gen_delete = copy.deepcopy(c2_kv_gen[OPS.DELETE]) if self._expires: c2_gen_update = copy.deepcopy(c2_kv_gen[OPS.UPDATE]) self.dest_cluster.load_all_buckets_from_generator(kv_gen=c2_gen_update) self.dest_cluster.load_all_buckets_from_generator(kv_gen=c2_gen_delete) # wait till we recreate deleted keys before we can delete/update self.sleep(300) self.verify_results() def replication_while_rebooting_a_non_master_src_dest_node(self): bucket_type = self._input.param("bucket_type", "membase") if bucket_type == "ephemeral": self.log.info("Test case does not apply to ephemeral") return self.setup_xdcr_and_load() self.async_perform_update_delete() self.sleep(self._wait_timeout) reboot_node_dest = self.dest_cluster.reboot_one_node(self) NodeHelper.wait_node_restarted(reboot_node_dest, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True) reboot_node_src = self.src_cluster.reboot_one_node(self) NodeHelper.wait_node_restarted(reboot_node_src, self, wait_time=self._wait_timeout * 4, wait_if_warmup=True) self.sleep(120) ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_dest], self, wait_if_warmup=True) ClusterOperationHelper.wait_for_ns_servers_or_assert([reboot_node_src], self, wait_if_warmup=True) self.verify_results() def test_disk_full(self): self.setup_xdcr_and_load() self.verify_results() self.sleep(self._wait_timeout) zip_file = "%s.zip" % (self._input.param("file_name", "collectInfo")) try: for node in [self.src_master, self.dest_master]: self.shell = RemoteMachineShellConnection(node) self.shell.execute_cbcollect_info(zip_file) if self.shell.extract_remote_info().type.lower() != "windows": command = "unzip %s" % (zip_file) output, error = self.shell.execute_command(command) self.shell.log_command_output(output, error) if len(error) > 0: raise Exception("unable to unzip the files. Check unzip command output for help") cmd = 'grep -R "Approaching full disk warning." cbcollect_info*/' output, _ = self.shell.execute_command(cmd) else: cmd = "curl -0 http://{1}:{2}@{0}:8091/diag 2>/dev/null | grep 'Approaching full disk warning.'".format( self.src_master.ip, self.src_master.rest_username, self.src_master.rest_password) output, _ = self.shell.execute_command(cmd) self.assertNotEqual(len(output), 0, "Full disk warning not generated as expected in %s" % node.ip) self.log.info("Full disk warning generated as expected in %s" % node.ip) self.shell.delete_files(zip_file) self.shell.delete_files("cbcollect_info*") except Exception as e: self.log.info(e) def test_rollback(self): bucket = self.src_cluster.get_buckets()[0] src_nodes = self.src_cluster.get_nodes() dest_nodes = self.dest_cluster.get_nodes() nodes = src_nodes + dest_nodes # Stop Persistence on Node A & Node B for node in nodes: mem_client = MemcachedClientHelper.direct_client(node, bucket) mem_client.stop_persistence() goxdcr_log = NodeHelper.get_goxdcr_log_dir(self._input.servers[0])\ + '/goxdcr.log*' self.setup_xdcr() self.src_cluster.pause_all_replications() self.dest_cluster.pause_all_replications() gen = BlobGenerator("C1-", "C1-", self._value_size, end=self._num_items) self.src_cluster.load_all_buckets_from_generator(gen) gen = BlobGenerator("C2-", "C2-", self._value_size, end=self._num_items) self.dest_cluster.load_all_buckets_from_generator(gen) self.src_cluster.resume_all_replications() self.dest_cluster.resume_all_replications() # Perform mutations on the bucket self.async_perform_update_delete() rest1 = RestConnection(self.src_cluster.get_master_node()) rest2 = RestConnection(self.dest_cluster.get_master_node()) # Fetch count of docs in src and dest cluster _count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1] _count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1] self.log.info("Before rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2)) # Kill memcached on Node A so that Node B becomes master shell = RemoteMachineShellConnection(self.src_cluster.get_master_node()) shell.kill_memcached() shell = RemoteMachineShellConnection(self.dest_cluster.get_master_node()) shell.kill_memcached() # Start persistence on Node B mem_client = MemcachedClientHelper.direct_client(src_nodes[1], bucket) mem_client.start_persistence() mem_client = MemcachedClientHelper.direct_client(dest_nodes[1], bucket) mem_client.start_persistence() # Failover Node B failover_task = self.src_cluster.async_failover() failover_task.result() failover_task = self.dest_cluster.async_failover() failover_task.result() # Wait for Failover & rollback to complete self.sleep(60) # Fetch count of docs in src and dest cluster _count1 = rest1.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1] _count2 = rest2.fetch_bucket_stats(bucket=bucket.name)["op"]["samples"]["curr_items"][-1] self.log.info("After rollback src cluster count = {0} dest cluster count = {1}".format(_count1, _count2)) self.assertTrue(self.src_cluster.wait_for_outbound_mutations(), "Mutations in source cluster not replicated to target after rollback") self.assertTrue(self.dest_cluster.wait_for_outbound_mutations(), "Mutations in target cluster not replicated to source after rollback") _, count = NodeHelper.check_goxdcr_log( src_nodes[0], "Received rollback from DCP stream", goxdcr_log) self.assertGreater(count, 0, "rollback did not happen as expected") self.log.info("rollback happened as expected") _, count = NodeHelper.check_goxdcr_log( dest_nodes[0], "Received rollback from DCP stream", goxdcr_log) self.assertGreater(count, 0, "rollback did not happen as expected") self.log.info("rollback happened as expected") def test_scramsha(self): """ Creates a new bi-xdcr replication with scram-sha Make sure to pass use-scramsha=True from command line """ self.setup_xdcr() self.sleep(60, "wait before checking logs") for node in [self.src_cluster.get_master_node()]+[self.dest_cluster.get_master_node()]: _, count = NodeHelper.check_goxdcr_log(node, "HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60) if count <= 0: self.fail("Node {0} does not use SCRAM-SHA authentication".format(node.ip)) else: self.log.info("SCRAM-SHA auth successful on node {0}".format(node.ip)) self.verify_results() def test_update_to_scramsha_auth(self): """ Start with ordinary replication, then switch to use scram_sha_auth Search for success log stmtsS """ _, old_count = NodeHelper.check_goxdcr_log(self.src_cluster.get_master_node(), "HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60) self.setup_xdcr() # modify remote cluster ref to use scramsha for remote_cluster in self.src_cluster.get_remote_clusters()+self.dest_cluster.get_remote_clusters(): remote_cluster.use_scram_sha_auth() self.sleep(60, "wait before checking the logs for using scram-sha") for node in [self.src_cluster.get_master_node()]+[self.dest_cluster.get_master_node()]: _, count = NodeHelper.check_goxdcr_log(node, "HttpAuthMech=ScramSha for remote cluster reference remoteCluster", timeout=60) if count <= old_count: self.fail("Node {0} does not use SCRAM-SHA authentication".format(node.ip)) else: self.log.info("SCRAM-SHA auth successful on node {0}".format(node.ip)) self.verify_results()
py
1a590a46498bf11d098399d531709815eeb94ab4
# -*- coding: utf-8 -*- """ Provides the service module for systemd .. versionadded:: 0.10.0 .. important:: If you feel that Salt should be using this module to manage services on a minion, and it is using a different module (or gives an error similar to *'service.start' is not available*), see :ref:`here <module-provider-override>`. .. important:: This is an implementation of virtual 'service' module. As such, you must call it under the name 'service' and NOT 'systemd'. You can see that also in the examples below. """ # Import Python libs from __future__ import absolute_import, print_function, unicode_literals import errno import fnmatch import glob import logging import os import re import shlex # Import Salt libs import salt.utils.files import salt.utils.itertools import salt.utils.path import salt.utils.stringutils import salt.utils.systemd from salt.exceptions import CommandExecutionError # Import 3rd-party libs from salt.ext import six log = logging.getLogger(__name__) __func_alias__ = { "reload_": "reload", "unmask_": "unmask", } SYSTEM_CONFIG_PATHS = ("/lib/systemd/system", "/usr/lib/systemd/system") LOCAL_CONFIG_PATH = "/etc/systemd/system" INITSCRIPT_PATH = "/etc/init.d" VALID_UNIT_TYPES = ( "service", "socket", "device", "mount", "automount", "swap", "target", "path", "timer", ) # Define the module's virtual name __virtualname__ = "service" # Disable check for string substitution # pylint: disable=E1321 def __virtual__(): """ Only work on systems that have been booted with systemd """ if __grains__.get("kernel") == "Linux" and salt.utils.systemd.booted(__context__): return __virtualname__ return ( False, "The systemd execution module failed to load: only available on Linux " "systems which have been booted with systemd.", ) def _root(path, root): """ Relocate an absolute path to a new root directory. """ if root: return os.path.join(root, os.path.relpath(path, os.path.sep)) else: return path def _canonical_unit_name(name): """ Build a canonical unit name treating unit names without one of the valid suffixes as a service. """ if not isinstance(name, six.string_types): name = six.text_type(name) if any(name.endswith(suffix) for suffix in VALID_UNIT_TYPES): return name return "%s.service" % name def _check_available(name): """ Returns boolean telling whether or not the named service is available """ _status = _systemctl_status(name) sd_version = salt.utils.systemd.version(__context__) if sd_version is not None and sd_version >= 231: # systemd 231 changed the output of "systemctl status" for unknown # services, and also made it return an exit status of 4. If we are on # a new enough version, check the retcode, otherwise fall back to # parsing the "systemctl status" output. # See: https://github.com/systemd/systemd/pull/3385 # Also: https://github.com/systemd/systemd/commit/3dced37 return 0 <= _status["retcode"] < 4 out = _status["stdout"].lower() if "could not be found" in out: # Catch cases where the systemd version is < 231 but the return code # and output changes have been backported (e.g. RHEL 7.3). return False for line in salt.utils.itertools.split(out, "\n"): match = re.match(r"\s+loaded:\s+(\S+)", line) if match: ret = match.group(1) != "not-found" break else: raise CommandExecutionError("Failed to get information on unit '%s'" % name) return ret def _check_for_unit_changes(name): """ Check for modified/updated unit files, and run a daemon-reload if any are found. """ contextkey = "systemd._check_for_unit_changes.{0}".format(name) if contextkey not in __context__: if _untracked_custom_unit_found(name) or _unit_file_changed(name): systemctl_reload() # Set context key to avoid repeating this check __context__[contextkey] = True def _check_unmask(name, unmask, unmask_runtime, root=None): """ Common code for conditionally removing masks before making changes to a service's state. """ if unmask: unmask_(name, runtime=False, root=root) if unmask_runtime: unmask_(name, runtime=True, root=root) def _clear_context(): """ Remove context """ # Using list() here because modifying a dictionary during iteration will # raise a RuntimeError. for key in list(__context__): try: if key.startswith("systemd._systemctl_status."): __context__.pop(key) except AttributeError: continue def _default_runlevel(): """ Try to figure out the default runlevel. It is kept in /etc/init/rc-sysinit.conf, but can be overridden with entries in /etc/inittab, or via the kernel command-line at boot """ # Try to get the "main" default. If this fails, throw up our # hands and just guess "2", because things are horribly broken try: with salt.utils.files.fopen("/etc/init/rc-sysinit.conf") as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if line.startswith("env DEFAULT_RUNLEVEL"): runlevel = line.split("=")[-1].strip() except Exception: # pylint: disable=broad-except return "2" # Look for an optional "legacy" override in /etc/inittab try: with salt.utils.files.fopen("/etc/inittab") as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) if not line.startswith("#") and "initdefault" in line: runlevel = line.split(":")[1] except Exception: # pylint: disable=broad-except pass # The default runlevel can also be set via the kernel command-line. try: valid_strings = set( ("0", "1", "2", "3", "4", "5", "6", "s", "S", "-s", "single") ) with salt.utils.files.fopen("/proc/cmdline") as fp_: for line in fp_: line = salt.utils.stringutils.to_unicode(line) for arg in line.strip().split(): if arg in valid_strings: runlevel = arg break except Exception: # pylint: disable=broad-except pass return runlevel def _get_systemd_services(root): """ Use os.listdir() to get all the unit files """ ret = set() for path in SYSTEM_CONFIG_PATHS + (LOCAL_CONFIG_PATH,): # Make sure user has access to the path, and if the path is a # link it's likely that another entry in SYSTEM_CONFIG_PATHS # or LOCAL_CONFIG_PATH points to it, so we can ignore it. path = _root(path, root) if os.access(path, os.R_OK) and not os.path.islink(path): for fullname in os.listdir(path): try: unit_name, unit_type = fullname.rsplit(".", 1) except ValueError: continue if unit_type in VALID_UNIT_TYPES: ret.add(unit_name if unit_type == "service" else fullname) return ret def _get_sysv_services(root, systemd_services=None): """ Use os.listdir() and os.access() to get all the initscripts """ initscript_path = _root(INITSCRIPT_PATH, root) try: sysv_services = os.listdir(initscript_path) except OSError as exc: if exc.errno == errno.ENOENT: pass elif exc.errno == errno.EACCES: log.error( "Unable to check sysvinit scripts, permission denied to %s", initscript_path, ) else: log.error( "Error %d encountered trying to check sysvinit scripts: %s", exc.errno, exc.strerror, ) return [] if systemd_services is None: systemd_services = _get_systemd_services(root) ret = [] for sysv_service in sysv_services: if os.access(os.path.join(initscript_path, sysv_service), os.X_OK): if sysv_service in systemd_services: log.debug( "sysvinit script '%s' found, but systemd unit " "'%s.service' already exists", sysv_service, sysv_service, ) continue ret.append(sysv_service) return ret def _get_service_exec(): """ Returns the path to the sysv service manager (either update-rc.d or chkconfig) """ contextkey = "systemd._get_service_exec" if contextkey not in __context__: executables = ("update-rc.d", "chkconfig") for executable in executables: service_exec = salt.utils.path.which(executable) if service_exec is not None: break else: raise CommandExecutionError( "Unable to find sysv service manager (tried {0})".format( ", ".join(executables) ) ) __context__[contextkey] = service_exec return __context__[contextkey] def _runlevel(): """ Return the current runlevel """ contextkey = "systemd._runlevel" if contextkey in __context__: return __context__[contextkey] out = __salt__["cmd.run"]("runlevel", python_shell=False, ignore_retcode=True) try: ret = out.split()[1] except IndexError: # The runlevel is unknown, return the default ret = _default_runlevel() __context__[contextkey] = ret return ret def _strip_scope(msg): """ Strip unnecessary message about running the command with --scope from stderr so that we can raise an exception with the remaining stderr text. """ ret = [] for line in msg.splitlines(): if not line.endswith(".scope"): ret.append(line) return "\n".join(ret).strip() def _systemctl_cmd(action, name=None, systemd_scope=False, no_block=False, root=None): """ Build a systemctl command line. Treat unit names without one of the valid suffixes as a service. """ ret = [] if ( systemd_scope and salt.utils.systemd.has_scope(__context__) and __salt__["config.get"]("systemd.scope", True) ): ret.extend(["systemd-run", "--scope"]) ret.append("systemctl") if no_block: ret.append("--no-block") if root: ret.extend(["--root", root]) if isinstance(action, six.string_types): action = shlex.split(action) ret.extend(action) if name is not None: ret.append(_canonical_unit_name(name)) if "status" in ret: ret.extend(["-n", "0"]) return ret def _systemctl_status(name): """ Helper function which leverages __context__ to keep from running 'systemctl status' more than once. """ contextkey = "systemd._systemctl_status.%s" % name if contextkey in __context__: return __context__[contextkey] __context__[contextkey] = __salt__["cmd.run_all"]( _systemctl_cmd("status", name), python_shell=False, redirect_stderr=True, ignore_retcode=True, ) return __context__[contextkey] def _sysv_enabled(name, root): """ A System-V style service is assumed disabled if the "startup" symlink (starts with "S") to its script is found in /etc/init.d in the current runlevel. """ # Find exact match (disambiguate matches like "S01anacron" for cron) rc = _root("/etc/rc{}.d/S*{}".format(_runlevel(), name), root) for match in glob.glob(rc): if re.match(r"S\d{,2}%s" % name, os.path.basename(match)): return True return False def _untracked_custom_unit_found(name, root=None): """ If the passed service name is not available, but a unit file exist in /etc/systemd/system, return True. Otherwise, return False. """ system = _root("/etc/systemd/system", root) unit_path = os.path.join(system, _canonical_unit_name(name)) return os.access(unit_path, os.R_OK) and not _check_available(name) def _unit_file_changed(name): """ Returns True if systemctl reports that the unit file has changed, otherwise returns False. """ status = _systemctl_status(name)["stdout"].lower() return "'systemctl daemon-reload'" in status def systemctl_reload(): """ .. versionadded:: 0.15.0 Reloads systemctl, an action needed whenever unit files are updated. CLI Example: .. code-block:: bash salt '*' service.systemctl_reload """ out = __salt__["cmd.run_all"]( _systemctl_cmd("--system daemon-reload"), python_shell=False, redirect_stderr=True, ) if out["retcode"] != 0: raise CommandExecutionError( "Problem performing systemctl daemon-reload: %s" % out["stdout"] ) _clear_context() return True def get_running(): """ Return a list of all running services, so far as systemd is concerned CLI Example: .. code-block:: bash salt '*' service.get_running """ ret = set() # Get running systemd units out = __salt__["cmd.run"]( _systemctl_cmd("--full --no-legend --no-pager"), python_shell=False, ignore_retcode=True, ) for line in salt.utils.itertools.split(out, "\n"): try: comps = line.strip().split() fullname = comps[0] if len(comps) > 3: active_state = comps[3] except ValueError as exc: log.error(exc) continue else: if active_state != "running": continue try: unit_name, unit_type = fullname.rsplit(".", 1) except ValueError: continue if unit_type in VALID_UNIT_TYPES: ret.add(unit_name if unit_type == "service" else fullname) return sorted(ret) def get_enabled(root=None): """ Return a list of all enabled services root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.get_enabled """ ret = set() # Get enabled systemd units. Can't use --state=enabled here because it's # not present until systemd 216. out = __salt__["cmd.run"]( _systemctl_cmd("--full --no-legend --no-pager list-unit-files", root=root), python_shell=False, ignore_retcode=True, ) for line in salt.utils.itertools.split(out, "\n"): try: fullname, unit_state = line.strip().split(None, 1) except ValueError: continue else: if unit_state != "enabled": continue try: unit_name, unit_type = fullname.rsplit(".", 1) except ValueError: continue if unit_type in VALID_UNIT_TYPES: ret.add(unit_name if unit_type == "service" else fullname) # Add in any sysvinit services that are enabled ret.update(set([x for x in _get_sysv_services(root) if _sysv_enabled(x, root)])) return sorted(ret) def get_disabled(root=None): """ Return a list of all disabled services root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.get_disabled """ ret = set() # Get disabled systemd units. Can't use --state=disabled here because it's # not present until systemd 216. out = __salt__["cmd.run"]( _systemctl_cmd("--full --no-legend --no-pager list-unit-files", root=root), python_shell=False, ignore_retcode=True, ) for line in salt.utils.itertools.split(out, "\n"): try: fullname, unit_state = line.strip().split(None, 1) except ValueError: continue else: if unit_state != "disabled": continue try: unit_name, unit_type = fullname.rsplit(".", 1) except ValueError: continue if unit_type in VALID_UNIT_TYPES: ret.add(unit_name if unit_type == "service" else fullname) # Add in any sysvinit services that are disabled ret.update(set([x for x in _get_sysv_services(root) if not _sysv_enabled(x, root)])) return sorted(ret) def get_static(root=None): """ .. versionadded:: 2015.8.5 Return a list of all static services root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.get_static """ ret = set() # Get static systemd units. Can't use --state=static here because it's # not present until systemd 216. out = __salt__["cmd.run"]( _systemctl_cmd("--full --no-legend --no-pager list-unit-files", root=root), python_shell=False, ignore_retcode=True, ) for line in salt.utils.itertools.split(out, "\n"): try: fullname, unit_state = line.strip().split(None, 1) except ValueError: continue else: if unit_state != "static": continue try: unit_name, unit_type = fullname.rsplit(".", 1) except ValueError: continue if unit_type in VALID_UNIT_TYPES: ret.add(unit_name if unit_type == "service" else fullname) # sysvinit services cannot be static return sorted(ret) def get_all(root=None): """ Return a list of all available services root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.get_all """ ret = _get_systemd_services(root) ret.update(set(_get_sysv_services(root, systemd_services=ret))) return sorted(ret) def available(name): """ .. versionadded:: 0.10.4 Check that the given service is available taking into account template units. CLI Example: .. code-block:: bash salt '*' service.available sshd """ _check_for_unit_changes(name) return _check_available(name) def missing(name): """ .. versionadded:: 2014.1.0 The inverse of :py:func:`service.available <salt.modules.systemd.available>`. Returns ``True`` if the specified service is not available, otherwise returns ``False``. CLI Example: .. code-block:: bash salt '*' service.missing sshd """ return not available(name) def unmask_(name, runtime=False, root=None): """ .. versionadded:: 2015.5.0 .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Unmask the specified service with systemd runtime : False Set to ``True`` to unmask this service only until the next reboot .. versionadded:: 2017.7.0 In previous versions, this function would remove whichever mask was identified by running ``systemctl is-enabled`` on the service. However, since it is possible to both have both indefinite and runtime masks on a service simultaneously, this function now removes a runtime mask only when this argument is set to ``True``, and otherwise removes an indefinite mask. root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.unmask foo salt '*' service.unmask foo runtime=True """ _check_for_unit_changes(name) if not masked(name, runtime, root=root): log.debug("Service '%s' is not %smasked", name, "runtime-" if runtime else "") return True cmd = "unmask --runtime" if runtime else "unmask" out = __salt__["cmd.run_all"]( _systemctl_cmd(cmd, name, systemd_scope=True, root=root), python_shell=False, redirect_stderr=True, ) if out["retcode"] != 0: raise CommandExecutionError("Failed to unmask service '%s'" % name) return True def mask(name, runtime=False, root=None): """ .. versionadded:: 2015.5.0 .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Mask the specified service with systemd runtime : False Set to ``True`` to mask this service only until the next reboot .. versionadded:: 2015.8.5 root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.mask foo salt '*' service.mask foo runtime=True """ _check_for_unit_changes(name) cmd = "mask --runtime" if runtime else "mask" out = __salt__["cmd.run_all"]( _systemctl_cmd(cmd, name, systemd_scope=True, root=root), python_shell=False, redirect_stderr=True, ) if out["retcode"] != 0: raise CommandExecutionError( "Failed to mask service '%s'" % name, info=out["stdout"] ) return True def masked(name, runtime=False, root=None): """ .. versionadded:: 2015.8.0 .. versionchanged:: 2015.8.5 The return data for this function has changed. If the service is masked, the return value will now be the output of the ``systemctl is-enabled`` command (so that a persistent mask can be distinguished from a runtime mask). If the service is not masked, then ``False`` will be returned. .. versionchanged:: 2017.7.0 This function now returns a boolean telling the user whether a mask specified by the new ``runtime`` argument is set. If ``runtime`` is ``False``, this function will return ``True`` if an indefinite mask is set for the named service (otherwise ``False`` will be returned). If ``runtime`` is ``False``, this function will return ``True`` if a runtime mask is set, otherwise ``False``. Check whether or not a service is masked runtime : False Set to ``True`` to check for a runtime mask .. versionadded:: 2017.7.0 In previous versions, this function would simply return the output of ``systemctl is-enabled`` when the service was found to be masked. However, since it is possible to both have both indefinite and runtime masks on a service simultaneously, this function now only checks for runtime masks if this argument is set to ``True``. Otherwise, it will check for an indefinite mask. root Enable/disable/mask unit files in the specified root directory CLI Examples: .. code-block:: bash salt '*' service.masked foo salt '*' service.masked foo runtime=True """ _check_for_unit_changes(name) root_dir = _root("/run" if runtime else "/etc", root) link_path = os.path.join(root_dir, "systemd", "system", _canonical_unit_name(name)) try: return os.readlink(link_path) == "/dev/null" except OSError as exc: if exc.errno == errno.ENOENT: log.trace( "Path %s does not exist. This is normal if service '%s' is " "not masked or does not exist.", link_path, name, ) elif exc.errno == errno.EINVAL: log.error( "Failed to check mask status for service %s. Path %s is a " "file, not a symlink. This could be caused by changes in " "systemd and is probably a bug in Salt. Please report this " "to the developers.", name, link_path, ) return False def start(name, no_block=False, unmask=False, unmask_runtime=False): """ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Start the specified service with systemd no_block : False Set to ``True`` to start the service using ``--no-block``. .. versionadded:: 2017.7.0 unmask : False Set to ``True`` to remove an indefinite mask before attempting to start the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before starting. This behavior is no longer the default. unmask_runtime : False Set to ``True`` to remove a runtime mask before attempting to start the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before starting. This behavior is no longer the default. CLI Example: .. code-block:: bash salt '*' service.start <service name> """ _check_for_unit_changes(name) _check_unmask(name, unmask, unmask_runtime) ret = __salt__["cmd.run_all"]( _systemctl_cmd("start", name, systemd_scope=True, no_block=no_block), python_shell=False, ) if ret["retcode"] != 0: # Instead of returning a bool, raise an exception so that we can # include the error message in the return data. This helps give more # information to the user in instances where the service is masked. raise CommandExecutionError(_strip_scope(ret["stderr"])) return True def stop(name, no_block=False): """ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Stop the specified service with systemd no_block : False Set to ``True`` to start the service using ``--no-block``. .. versionadded:: 2017.7.0 CLI Example: .. code-block:: bash salt '*' service.stop <service name> """ _check_for_unit_changes(name) # Using cmd.run_all instead of cmd.retcode here to make unit tests easier return ( __salt__["cmd.run_all"]( _systemctl_cmd("stop", name, systemd_scope=True, no_block=no_block), python_shell=False, )["retcode"] == 0 ) def restart(name, no_block=False, unmask=False, unmask_runtime=False): """ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Restart the specified service with systemd no_block : False Set to ``True`` to start the service using ``--no-block``. .. versionadded:: 2017.7.0 unmask : False Set to ``True`` to remove an indefinite mask before attempting to restart the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before restarting. This behavior is no longer the default. unmask_runtime : False Set to ``True`` to remove a runtime mask before attempting to restart the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before restarting. This behavior is no longer the default. CLI Example: .. code-block:: bash salt '*' service.restart <service name> """ _check_for_unit_changes(name) _check_unmask(name, unmask, unmask_runtime) ret = __salt__["cmd.run_all"]( _systemctl_cmd("restart", name, systemd_scope=True, no_block=no_block), python_shell=False, ) if ret["retcode"] != 0: # Instead of returning a bool, raise an exception so that we can # include the error message in the return data. This helps give more # information to the user in instances where the service is masked. raise CommandExecutionError(_strip_scope(ret["stderr"])) return True def reload_(name, no_block=False, unmask=False, unmask_runtime=False): """ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Reload the specified service with systemd no_block : False Set to ``True`` to reload the service using ``--no-block``. .. versionadded:: 2017.7.0 unmask : False Set to ``True`` to remove an indefinite mask before attempting to reload the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before reloading. This behavior is no longer the default. unmask_runtime : False Set to ``True`` to remove a runtime mask before attempting to reload the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before reloading. This behavior is no longer the default. CLI Example: .. code-block:: bash salt '*' service.reload <service name> """ _check_for_unit_changes(name) _check_unmask(name, unmask, unmask_runtime) ret = __salt__["cmd.run_all"]( _systemctl_cmd("reload", name, systemd_scope=True, no_block=no_block), python_shell=False, ) if ret["retcode"] != 0: # Instead of returning a bool, raise an exception so that we can # include the error message in the return data. This helps give more # information to the user in instances where the service is masked. raise CommandExecutionError(_strip_scope(ret["stderr"])) return True def force_reload(name, no_block=True, unmask=False, unmask_runtime=False): """ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html .. versionadded:: 0.12.0 Force-reload the specified service with systemd no_block : False Set to ``True`` to start the service using ``--no-block``. .. versionadded:: 2017.7.0 unmask : False Set to ``True`` to remove an indefinite mask before attempting to force-reload the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before force-reloading. This behavior is no longer the default. unmask_runtime : False Set to ``True`` to remove a runtime mask before attempting to force-reload the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before force-reloading. This behavior is no longer the default. CLI Example: .. code-block:: bash salt '*' service.force_reload <service name> """ _check_for_unit_changes(name) _check_unmask(name, unmask, unmask_runtime) ret = __salt__["cmd.run_all"]( _systemctl_cmd("force-reload", name, systemd_scope=True, no_block=no_block), python_shell=False, ) if ret["retcode"] != 0: # Instead of returning a bool, raise an exception so that we can # include the error message in the return data. This helps give more # information to the user in instances where the service is masked. raise CommandExecutionError(_strip_scope(ret["stderr"])) return True # The unused sig argument is required to maintain consistency with the API # established by Salt's service management states. def status(name, sig=None): # pylint: disable=unused-argument """ Return the status for a service via systemd. If the name contains globbing, a dict mapping service name to True/False values is returned. .. versionchanged:: 2018.3.0 The service name can now be a glob (e.g. ``salt*``) Args: name (str): The name of the service to check sig (str): Not implemented Returns: bool: True if running, False otherwise dict: Maps service name to True if running, False otherwise CLI Example: .. code-block:: bash salt '*' service.status <service name> [service signature] """ contains_globbing = bool(re.search(r"\*|\?|\[.+\]", name)) if contains_globbing: services = fnmatch.filter(get_all(), name) else: services = [name] results = {} for service in services: _check_for_unit_changes(service) results[service] = ( __salt__["cmd.retcode"]( _systemctl_cmd("is-active", service), python_shell=False, ignore_retcode=True, ) == 0 ) if contains_globbing: return results return results[name] # **kwargs is required to maintain consistency with the API established by # Salt's service management states. def enable( name, no_block=False, unmask=False, unmask_runtime=False, root=None, **kwargs ): # pylint: disable=unused-argument """ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Enable the named service to start when the system boots no_block : False Set to ``True`` to start the service using ``--no-block``. .. versionadded:: 2017.7.0 unmask : False Set to ``True`` to remove an indefinite mask before attempting to enable the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before enabling. This behavior is no longer the default. unmask_runtime : False Set to ``True`` to remove a runtime mask before attempting to enable the service. .. versionadded:: 2017.7.0 In previous releases, Salt would simply unmask a service before enabling. This behavior is no longer the default. root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.enable <service name> """ _check_for_unit_changes(name) _check_unmask(name, unmask, unmask_runtime, root) if name in _get_sysv_services(root): cmd = [] if salt.utils.systemd.has_scope(__context__) and __salt__["config.get"]( "systemd.scope", True ): cmd.extend(["systemd-run", "--scope"]) service_exec = _get_service_exec() if service_exec.endswith("/update-rc.d"): cmd.extend([service_exec, "-f", name, "defaults", "99"]) elif service_exec.endswith("/chkconfig"): cmd.extend([service_exec, name, "on"]) return ( __salt__["cmd.retcode"](cmd, python_shell=False, ignore_retcode=True) == 0 ) ret = __salt__["cmd.run_all"]( _systemctl_cmd( "enable", name, systemd_scope=True, no_block=no_block, root=root ), python_shell=False, ignore_retcode=True, ) if ret["retcode"] != 0: # Instead of returning a bool, raise an exception so that we can # include the error message in the return data. This helps give more # information to the user in instances where the service is masked. raise CommandExecutionError(_strip_scope(ret["stderr"])) return True # The unused kwargs argument is required to maintain consistency with the API # established by Salt's service management states. def disable( name, no_block=False, root=None, **kwargs ): # pylint: disable=unused-argument """ .. versionchanged:: 2015.8.12,2016.3.3,2016.11.0 On minions running systemd>=205, `systemd-run(1)`_ is now used to isolate commands run by this function from the ``salt-minion`` daemon's control group. This is done to avoid a race condition in cases where the ``salt-minion`` service is restarted while a service is being modified. If desired, usage of `systemd-run(1)`_ can be suppressed by setting a :mod:`config option <salt.modules.config.get>` called ``systemd.scope``, with a value of ``False`` (no quotes). .. _`systemd-run(1)`: https://www.freedesktop.org/software/systemd/man/systemd-run.html Disable the named service to not start when the system boots no_block : False Set to ``True`` to start the service using ``--no-block``. .. versionadded:: 2017.7.0 root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.disable <service name> """ _check_for_unit_changes(name) if name in _get_sysv_services(root): cmd = [] if salt.utils.systemd.has_scope(__context__) and __salt__["config.get"]( "systemd.scope", True ): cmd.extend(["systemd-run", "--scope"]) service_exec = _get_service_exec() if service_exec.endswith("/update-rc.d"): cmd.extend([service_exec, "-f", name, "remove"]) elif service_exec.endswith("/chkconfig"): cmd.extend([service_exec, name, "off"]) return ( __salt__["cmd.retcode"](cmd, python_shell=False, ignore_retcode=True) == 0 ) # Using cmd.run_all instead of cmd.retcode here to make unit tests easier return ( __salt__["cmd.run_all"]( _systemctl_cmd( "disable", name, systemd_scope=True, no_block=no_block, root=root ), python_shell=False, ignore_retcode=True, )["retcode"] == 0 ) # The unused kwargs argument is required to maintain consistency with the API # established by Salt's service management states. def enabled(name, root=None, **kwargs): # pylint: disable=unused-argument """ Return if the named service is enabled to start on boot root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.enabled <service name> """ # Try 'systemctl is-enabled' first, then look for a symlink created by # systemctl (older systemd releases did not support using is-enabled to # check templated services), and lastly check for a sysvinit service. if ( __salt__["cmd.retcode"]( _systemctl_cmd("is-enabled", name, root=root), python_shell=False, ignore_retcode=True, ) == 0 ): return True elif "@" in name: # On older systemd releases, templated services could not be checked # with ``systemctl is-enabled``. As a fallback, look for the symlinks # created by systemctl when enabling templated services. local_config_path = _root(LOCAL_CONFIG_PATH, "/") cmd = [ "find", local_config_path, "-name", name, "-type", "l", "-print", "-quit", ] # If the find command returns any matches, there will be output and the # string will be non-empty. if bool(__salt__["cmd.run"](cmd, python_shell=False)): return True elif name in _get_sysv_services(root): return _sysv_enabled(name, root) return False def disabled(name, root=None): """ Return if the named service is disabled from starting on boot root Enable/disable/mask unit files in the specified root directory CLI Example: .. code-block:: bash salt '*' service.disabled <service name> """ return not enabled(name, root=root) def show(name, root=None): """ .. versionadded:: 2014.7.0 Show properties of one or more units/jobs or the manager root Enable/disable/mask unit files in the specified root directory CLI Example: salt '*' service.show <service name> """ ret = {} out = __salt__["cmd.run"]( _systemctl_cmd("show", name, root=root), python_shell=False ) for line in salt.utils.itertools.split(out, "\n"): comps = line.split("=") name = comps[0] value = "=".join(comps[1:]) if value.startswith("{"): value = value.replace("{", "").replace("}", "") ret[name] = {} for item in value.split(" ; "): comps = item.split("=") ret[name][comps[0].strip()] = comps[1].strip() elif name in ("Before", "After", "Wants"): ret[name] = value.split() else: ret[name] = value return ret def execs(root=None): """ .. versionadded:: 2014.7.0 Return a list of all files specified as ``ExecStart`` for all services. root Enable/disable/mask unit files in the specified root directory CLI Example: salt '*' service.execs """ ret = {} for service in get_all(root=root): data = show(service, root=root) if "ExecStart" not in data: continue ret[service] = data["ExecStart"]["path"] return ret def firstboot( locale=None, locale_message=None, keymap=None, timezone=None, hostname=None, machine_id=None, root=None, ): """ .. versionadded:: TBD Call systemd-firstboot to configure basic settings of the system locale Set primary locale (LANG=) locale_message Set message locale (LC_MESSAGES=) keymap Set keymap timezone Set timezone hostname Set host name machine_id Set machine ID root Operate on an alternative filesystem root CLI Example: salt '*' service.firstboot keymap=jp locale=en_US.UTF-8 """ cmd = ["systemd-firstboot"] parameters = [ ("locale", locale), ("locale-message", locale_message), ("keymap", keymap), ("timezone", timezone), ("hostname", hostname), ("machine-ID", machine_id), ("root", root), ] for parameter, value in parameters: if value: cmd.extend(["--{}".format(parameter), str(value)]) out = __salt__["cmd.run_all"](cmd) if out["retcode"] != 0: raise CommandExecutionError("systemd-firstboot error: {}".format(out["stderr"])) return True
py
1a590a56e84cd660052ca1e253145cc08ad1ff71
from distutils.core import setup setup(name='metametamerge', author='Vitor C. Piro', author_email='[email protected]', url='https://github.com/pirovc/metametamerge', version='1.1', package_dir={'metametamerge': ''}, packages=['metametamerge'], scripts=['MetaMetaMerge.py'] )
py
1a590ac59aa6d248c437bd9e8ded3caaeac778f7
# ------------------------------------------------- # # Auto generated. Modification will be overwritten. # # ------------------------------------------------- # import base64 file_data_list=[ { 'name': '/app/login.html', 'data': base64.b64decode(b'PCFET0NUWVBFIGh0bWw+CjxodG1sIG5nLWFwcD0iY29zbW9zQXBwIj4KPGhlYWQgbGFuZz0iZW4iPgogICAgPG1ldGEgY2hhcnNldD0iVVRGLTgiPgogICAgPHRpdGxlPkNvc21vcyBzZXJ2aWNlIGNvbnN1bWVyPC90aXRsZT4KPC9oZWFkPgo8Ym9keSAgcm9sZT0iZG9jdW1lbnQiIG5nLWNvbnRyb2xsZXI9IlNlcnZpY2VDb25zdW1lckN0cmwiPgogICAgPGRpdiBjbGFzcz0ibmF2YmFyIG5hdmJhci1pbnZlcnNlIG5hdmJhci1maXhlZC10b3AiIHJvbGU9Im5hdmlnYXRpb24iPgogICAgICA8ZGl2IGNsYXNzPSJjb250YWluZXIiPgogICAgICAgIDxkaXYgY2xhc3M9Im5hdmJhci1oZWFkZXIiPgogICAgICAgICAgPGEgY2xhc3M9Im5hdmJhci1icmFuZCIgaHJlZj0iLyI+Q29zbW9zIEZyYW1ld29yazwvYT4KICAgICAgICA8L2Rpdj4KICAgICAgICA8ZGl2IGNsYXNzPSJuYXZiYXItY29sbGFwc2UgY29sbGFwc2UiPgogICAgICAgICAgPHVsIGNsYXNzPSJuYXYgbmF2YmFyLW5hdiI+CiAgICAgICAgICAgIDxsaSBjbGFzcz0iYWN0aXZlIj48YSBocmVmPSIvIj5Ib21lPC9hPjwvbGk+CiAgICAgICAgICAgIDxsaSBjbGFzcz0iZHJvcGRvd24iIG5nLXNob3c9ImxvZ2dlZEluKCkiPgogICAgICAgICAgICAgIDxhIGhyZWY9IiMiIGNsYXNzPSJkcm9wZG93bi10b2dnbGUiIGRhdGEtdG9nZ2xlPSJkcm9wZG93biIgbmctYmluZD0idXNlck5hbWUiPjxiIGNsYXNzPSJjYXJldCI+PC9iPjwvYT4KICAgICAgICAgICAgICA8dWwgY2xhc3M9ImRyb3Bkb3duLW1lbnUiPgogICAgICAgICAgICAgICAgPGxpPjxhIGhyZWY9Ii9sb2dvdXQvIj5Mb2dvdXQ8L2E+PC9saT4KICAgICAgICAgICAgICAgIDxsaSBjbGFzcz0iZGl2aWRlciI+PC9saT4KICAgICAgICAgICAgICAgIDxsaT48YSBocmVmPSIjIj5TZXR0aW5nczwvYT48L2xpPgogICAgICAgICAgICAgIDwvdWw+CiAgICAgICAgICAgIDwvbGk+CiAgICAgICAgICA8L3VsPgogICAgICAgIDwvZGl2PjwhLS0vLm5hdi1jb2xsYXBzZSAtLT4KICAgICAgPC9kaXY+CiAgICA8L2Rpdj4KCiAgICA8ZGl2IGNsYXNzPSJjb250YWluZXIgdGhlbWUtc2hvd2Nhc2UiIHJvbGU9Im1haW4iPgogICAgICAgICAgPGRpdiBjbGFzcz0ianVtYm90cm9uIj4KICAgICAgICAgICAgPGgzICBuZy1zaG93PSIhbG9nZ2VkSW4oKSI+TG9naW48L2gzPgogICAgICAgICAgICA8ZGl2ICBuZy1zaG93PSIhbG9nZ2VkSW4oKSI+CiAgICAgICAgICAgICAgICA8YSBjbGFzcz0iYnRuIGJ0bi1zbWFsbCBidG4tcHJpbWFyeSIgaHJlZj0iL2xvZ2luL2dvb2dsZS8iPkdvb2dsZTwvYT4KICAgICAgICAgICAgICAgIDxhIGNsYXNzPSJidG4gYnRuLXNtYWxsIGJ0bi1wcmltYXJ5IiBocmVmPSIvbG9naW4vZmFjZWJvb2tncmFwaC8iPkZhY2Vib29rPC9hPgogICAgICAgICAgICAgICAgPGJyIC8+CiAgICAgICAgICAgICAgICA8aHIgLz4KICAgICAgICAgICAgICAgIDxmb3JtIGFjdGlvbj0iL2xvZ2luL29wZW5pZC8iIG1ldGhvZD0iZ2V0Ij4KICAgICAgICAgICAgICAgICAgICA8bGFiZWw+T3BlbmlkPC9sYWJlbD48aW5wdXQgdHlwZT0idGV4dCIgbmFtZT0ib3BlbmlkLm9wX2VuZHBvaW50Ij4KICAgICAgICAgICAgICAgICAgICA8aW5wdXQgdHlwZT0ic3VibWl0IiB2YWx1ZT0iTG9naW4iPgogICAgICAgICAgICAgICAgPC9mb3JtPgogICAgICAgICAgICAgICAgPGhyIC8+CiAgICAgICAgICAgIDwvZGl2PgogICAgICAgICAgICA8Zm9ybSBuZy1zaG93PSIhbG9nZ2VkSW4oKSIgcm9sZT0iZm9ybSIgbWV0aG9kPSJwb3N0IiBhY3Rpb249Ii9sb2dpbi8iPgogICAgICAgICAgICAgICAgPGRpdiBjbGFzcz0iZm9ybS1ncm91cCI+CiAgICAgICAgICAgICAgICAgICAgPGxhYmVsPlVzZXIgbmFtZTwvbGFiZWw+CiAgICAgICAgICAgICAgICAgICAgPGlucHV0IGNsYXNzPSJmb3JtLWNvbnRyb2wiIGlkPSJ1c2VybmFtZSIgdHlwZT0idGV4dCIgbmFtZT0idXNlcm5hbWUiLz4KICAgICAgICAgICAgICAgIDwvZGl2PgogICAgICAgICAgICAgICAgPGRpdiBjbGFzcz0iZm9ybS1ncm91cCI+CiAgICAgICAgICAgICAgICAgICAgPGxhYmVsPlBhc3N3b3JkPC9sYWJlbD4KICAgICAgICAgICAgICAgICAgICA8aW5wdXQgY2xhc3M9ImZvcm0tY29udHJvbCIgdHlwZT0icGFzc3dvcmQiIGlkPSJwYXNzd29yZCIgbmFtZT0icGFzc3dvcmQiIC8+CiAgICAgICAgICAgICAgICA8L2Rpdj4KICAgICAgICAgICAgICAgIDxkaXYgY2xhc3M9ImZvcm0tZ3JvdXAiPgogICAgICAgICAgICAgICAgICAgIDxpbnB1dCB0eXBlPSJzdWJtaXQiIHZhbHVlPSJMb2dpbiIgY2xhc3M9ImJ0biBidG4tcHJpbWFyeSIgLz4KICAgICAgICAgICAgICAgICA8L2Rpdj4KICAgICAgICAgICAgPC9mb3JtPgogICAgICAgICAgICAgPGRpdiBuZy1zaG93PSJsb2dnZWRJbigpIj5XZWxjb21lPC9kaXY+CiAgICAgICAgICA8L2Rpdj4KICAgICAgICA8ZGl2IGlkPSJzdGF0dXMiPjwvZGl2PgogICAgPC9kaXY+CgogICAgPHNjcmlwdCBzcmM9Ii8vYWpheC5nb29nbGVhcGlzLmNvbS9hamF4L2xpYnMvanF1ZXJ5LzEuMTEuMS9qcXVlcnkubWluLmpzIj48L3NjcmlwdD4KICAgIDxzY3JpcHQgc3JjPSIvL2FqYXguZ29vZ2xlYXBpcy5jb20vYWpheC9saWJzL2FuZ3VsYXJqcy8xLjIuMTcvYW5ndWxhci5taW4uanMiPjwvc2NyaXB0PgogICAgPHNjcmlwdCBzcmM9Ii8vbmV0ZG5hLmJvb3RzdHJhcGNkbi5jb20vYm9vdHN0cmFwLzMuMS4xL2pzL2Jvb3RzdHJhcC5taW4uanMiPjwvc2NyaXB0PgogICAgPHNjcmlwdCBzcmM9Ii9zZXJ2aWNlYXBpLmpzIj48L3NjcmlwdD4KICAgIDxzY3JpcHQgc3JjPSIvY29udHJvbGxlcnMuanMiPjwvc2NyaXB0PgogICAgPGxpbmsgcmVsPSJzdHlsZXNoZWV0IiBocmVmPSIvL25ldGRuYS5ib290c3RyYXBjZG4uY29tL2Jvb3RzdHJhcC8zLjEuMS9jc3MvYm9vdHN0cmFwLm1pbi5jc3MiPgo8L2JvZHk+CjwvaHRtbD4K') }, { 'name': '/app/serviceapi.js', 'data': base64.b64decode(b'LyoqCiAqIENyZWF0ZWQgYnkgTWFydWYgTWFuaXJ1enphbWFuIChtYXJ1Zm1AY29zbW9zZnJhbWV3b3JrLmNvbSkgb24gNi8xNC8xNC4KICovCgpmdW5jdGlvbiBwcm9jZXNzRXJyb3IoanFYSFIsIHRleHRTdGF0dXMsIGVycm9yVGhyb3duKXsKICAgIGpRdWVyeSgiI3N0YXR1cyIpLmh0bWwoIjxoMz5FcnJvcjwvaDM+PGRpdj4iK2Vycm9yVGhyb3duKyI8L2Rpdj4iKTsKfQoKZnVuY3Rpb24gcHJvY2Vzc1N1Y2Nlc3MoZGF0YSl7CiAgICB2YXIgZGF0YTIgPSBKU09OLnBhcnNlKGRhdGEpOwogICAgalF1ZXJ5KCIjcmVzdWx0IikudmFsKEpTT04uc3RyaW5naWZ5KGRhdGEyLHVuZGVmaW5lZCwgMikpOwp9CgpmdW5jdGlvbiBnZXRTZXJ2aWNlVXJsKHNlcnZpY2UpIHsKICAgIHZhciByb290ID0gZG9jdW1lbnQuVVJMOwogICAgaWYoc2VydmljZVswXSA9PSAnLycpewogICAgICAgIHNlcnZpY2UgPSBzZXJ2aWNlLnN1YnN0cmluZygxKTsKICAgIH0KICAgIHJldHVybiByb290K3NlcnZpY2U7Cn0KCmZ1bmN0aW9uIGRvX29wZXJhdGlvbih1cmwsIG1ldGhvZCwgZGF0YSwgY2FsbGJhY2ssIGVycm9yX2NhbGxiYWNrKXsKICAgIGpRdWVyeSgiI3Jlc3VsdCIpLnZhbCgiIik7CiAgICBqUXVlcnkoIiNzdGF0dXMiKS5odG1sKCIiKTsKICAgIGNvbnNvbGUubG9nKG1ldGhvZCsgIjogIit1cmwpCiAgICAkLmFqYXgoewogICAgICAgIHVybDogdXJsLAogICAgICAgIHR5cGU6IG1ldGhvZCwKICAgICAgICBkYXRhOmRhdGEsCiAgICAgICAgc3VjY2VzczogY2FsbGJhY2ssCiAgICAgICAgZXJyb3I6IGVycm9yX2NhbGxiYWNrCiAgICB9KTsKfQoKZnVuY3Rpb24gaW5zZXJ0U2VydmljZSgpewogICAgdmFyIG5hbWUgPSBqUXVlcnkoIiNuYW1lIikudmFsKCk7CiAgICB2YXIgZGF0YSA9IGpRdWVyeSgiI2RhdGEiKS52YWwoKTsKCiAgICB2YXIgdXJsID0gIGdldFNlcnZpY2VVcmwobmFtZSk7CiAgICBkb19vcGVyYXRpb24odXJsLCAnUE9TVCcsIGRhdGEsIHByb2Nlc3NTdWNjZXNzLCBwcm9jZXNzRXJyb3IpOwp9CgpmdW5jdGlvbiBkZWxldGVJdGVtKCl7CiAgICB2YXIgbmFtZSA9IGpRdWVyeSgiI25hbWUiKS52YWwoKTsKICAgIHZhciBkYXRhID0galF1ZXJ5KCIjZGF0YSIpLnZhbCgpOwoKICAgIHZhciB1cmwgPSAgZ2V0U2VydmljZVVybChuYW1lKTsKCiAgICBkb19vcGVyYXRpb24odXJsLCAnREVMRVRFJywgZGF0YSwgcHJvY2Vzc1N1Y2Nlc3MsIHByb2Nlc3NFcnJvcik7Cn0KCmZ1bmN0aW9uIGxvYWRJdGVtKCl7CiAgICB2YXIgbmFtZSA9IGpRdWVyeSgiI25hbWUiKS52YWwoKTsKICAgIHZhciBkYXRhID0galF1ZXJ5KCIjZGF0YSIpLnZhbCgpOwogICAgdmFyIGNvbHVtbnMgPSBqUXVlcnkoIiNjb2x1bW5zIikudmFsKCk7CiAgICB2YXIgZmlsdGVyID0galF1ZXJ5KCIjZmlsdGVyIikudmFsKCk7CgogICAgdmFyIHVybCA9ICBnZXRTZXJ2aWNlVXJsKG5hbWUpOwogICAgaWYoY29sdW1ucyl7CiAgICAgICAgdXJsID0gdXJsKyI/IjsKICAgICAgICB1cmwgPXVybCsgImNvbHVtbnM9Iitjb2x1bW5zOwogICAgfQogICAgaWYoZmlsdGVyKXsKICAgICAgICBpZighY29sdW1ucykgewogICAgICAgICAgICB1cmwgPSB1cmwgKyAiPyI7CiAgICAgICAgfQogICAgICAgIGVsc2V7CiAgICAgICAgICAgIHVybD11cmwrIiYiOwogICAgICAgIH0KICAgICAgICB1cmwgPSB1cmwrImZpbHRlcj0iK2ZpbHRlcgogICAgfQoKICAgIGRvX29wZXJhdGlvbih1cmwsICdHRVQnLCBkYXRhLCBwcm9jZXNzU3VjY2VzcyAscHJvY2Vzc0Vycm9yKTsKfQoKZnVuY3Rpb24gdXBkYXRlSXRlbSgpewogICAgdmFyIG5hbWUgPSBqUXVlcnkoIiNuYW1lIikudmFsKCk7CiAgICB2YXIgZGF0YSA9IGpRdWVyeSgiI2RhdGEiKS52YWwoKTsKCiAgICB2YXIgdXJsID0gIGdldFNlcnZpY2VVcmwobmFtZSk7CgogICAgZG9fb3BlcmF0aW9uKHVybCwgJ1BVVCcsIGRhdGEsIHByb2Nlc3NTdWNjZXNzLCBwcm9jZXNzRXJyb3IpOwp9CgpmdW5jdGlvbiBzdWJzY3JpYmUgKG1vbml0b3JfZW5kcG9pbnQsIG9iamVjdF9uYW1lLCBjYWxsYmFjaykgewogICAgLy9UT0RPOiBVbmlmeSB0aGUgb2JzZXJ2ZXIgdG8gbGlzdGVuIHRvIG1hbnkgb2JqZWN0cyBhdCBhIHRpbWUgdXNpbmcgZGlmZmVyZW50IGNhbGxiYWNrCiAgICAvL3ZhciBtb25pdG9yX2VuZHBvaW50ID0gIndzOi8vbG9jYWxob3N0OjgwODAvY2hhbmdlbW9uaXRvciIKICAgIHZhciB3ZWJzb2NrZXQgPSBuZXcgV2ViU29ja2V0KG1vbml0b3JfZW5kcG9pbnQpOwogICAgdmFyIGZuID0gY2FsbGJhY2s7CiAgICB3ZWJzb2NrZXQub25vcGVuID0gZnVuY3Rpb24gKCkgewogICAgICAgIGNvbnNvbGUubG9nKCJTb2NrZXQgb3BlbmVkIik7CiAgICAgICAgd2Vic29ja2V0LnNlbmQoSlNPTi5zdHJpbmdpZnkoeyJ0eXBlIjogIm1vbml0b3JfbnMiLCAibnMiOiBvYmplY3RfbmFtZX0pKQogICAgfQoKICAgIHdlYnNvY2tldC5vbm1lc3NhZ2UgPSBmdW5jdGlvbiAoZXZ0KSB7CiAgICAgICAgY29uc29sZS5sb2coIk9uIG1lc3NhZ2U6ICIgKyBldnQuZGF0YSk7CiAgICAgICAgaWYgKGZuKSB7CiAgICAgICAgICAgIGZuKGV2dC5kYXRhKTsKICAgICAgICAgICAgLyoKICAgICAgICAgICAgIC8vSW4gY2FsbGJhY2sgeW91IG1heSBkbyBzb21ldGhpbmcgbGlrZSAoYWxzbyBjb25zaWRlciB0aGUgZXZ0LmRhdGEgaWYgeW91IGFyZSBtb25pdG9yaW5nIG11bHRpcGxlIG9iamVjdHMuOgogICAgICAgICAgICAgJGh0dHAuZ2V0KCcvc2VydmljZS8nK29iamVjdF9uYW1lKycvJykuc3VjY2VzcyhmdW5jdGlvbihkYXRhKSB7CiAgICAgICAgICAgICAkc2NvcGUuZGF0YSA9IGRhdGE7CiAgICAgICAgICAgICB9KTsKICAgICAgICAgICAgICovCiAgICAgICAgfQoKICAgIH0KCiAgICB3ZWJzb2NrZXQub25jbG9zZSA9IGZ1bmN0aW9uICgpIHsKICAgICAgICBjb25zb2xlLmxvZygiT24gY2xvc2VkIik7CiAgICB9Cn0=') }, { 'name': '/app/controllers.js', 'data': base64.b64decode(b'LyoqCiAqIENyZWF0ZWQgYnkgTWFydWYgTWFuaXJ1enphbWFuIG9uIDYvMTYvMTQuCiAqLwoKdmFyIGNvc21vc0FwcCA9IGFuZ3VsYXIubW9kdWxlKCdjb3Ntb3NBcHAnLCBbXSk7CgpmdW5jdGlvbiBnZXRDb29raWUobmFtZSkgewogIHZhciB2YWx1ZSA9ICI7ICIgKyBkb2N1bWVudC5jb29raWU7CiAgdmFyIHBhcnRzID0gdmFsdWUuc3BsaXQoIjsgIiArIG5hbWUgKyAiPSIpOwogIGlmIChwYXJ0cy5sZW5ndGggPT0gMikgcmV0dXJuIHBhcnRzLnBvcCgpLnNwbGl0KCI7Iikuc2hpZnQoKTsKfQoKZnVuY3Rpb24gZ2V0VXNlck5hbWUoZGVmYXVsdF91c2VyKXsKICAgIHZhciB1c2VyQ29va2llID0gZ2V0Q29va2llKCJ1c2VyIik7CiAgICBpZih1c2VyQ29va2llKXsKICAgICAgICB1c2VyQ29va2llID0gdXNlckNvb2tpZS5yZXBsYWNlKC9cIi9nLCAiIikKICAgICAgICB2YXIgZGVjb2RlZCA9IEpTT04ucGFyc2Uod2luZG93LmF0b2IodXNlckNvb2tpZSkpOwogICAgICAgIHJldHVybiBkZWNvZGVkWyJ1c2VybmFtZSJdIHx8IGRlZmF1bHRfdXNlcjsKICAgIH0KCiAgICByZXR1cm4gZGVmYXVsdF91c2VyOwp9CgpmdW5jdGlvbiBsb2dnZWRJbigpewogICAgICAgIHZhciB1c2VyQ29va2llID0gZ2V0Q29va2llKCJ1c2Vyc2VjcmV0Iik7CiAgICAgICAgaWYodXNlckNvb2tpZSl7CiAgICAgICAgICAgIHJldHVybiB0cnVlOwogICAgICAgIH0KICAgICAgICByZXR1cm4gZmFsc2U7Cn0KCmNvc21vc0FwcC5jb250cm9sbGVyKCdTZXJ2aWNlQ29uc3VtZXJDdHJsJywgZnVuY3Rpb24gKCRzY29wZSwgJGh0dHApIHsKICAgICRzY29wZS51c2VyTmFtZSA9IGdldFVzZXJOYW1lKCJObyBOYW1lIik7CiAgICAkc2NvcGUubG9nZ2VkSW4gPSBsb2dnZWRJbjsKfSk7') }, { 'name': '/app/index.html', 'data': base64.b64decode(b'PCFET0NUWVBFIGh0bWw+CjxodG1sIG5nLWFwcD0iY29zbW9zQXBwIj4KPGhlYWQgbGFuZz0iZW4iPgogICAgPG1ldGEgY2hhcnNldD0iVVRGLTgiPgogICAgPHRpdGxlPkNvc21vcyBzZXJ2aWNlIGNvbnN1bWVyPC90aXRsZT4KPC9oZWFkPgo8Ym9keSAgcm9sZT0iZG9jdW1lbnQiIG5nLWNvbnRyb2xsZXI9IlNlcnZpY2VDb25zdW1lckN0cmwiPgogICAgPGRpdiBjbGFzcz0ibmF2YmFyIG5hdmJhci1pbnZlcnNlIG5hdmJhci1maXhlZC10b3AiIHJvbGU9Im5hdmlnYXRpb24iPgogICAgICA8ZGl2IGNsYXNzPSJjb250YWluZXIiPgogICAgICAgIDxkaXYgY2xhc3M9Im5hdmJhci1oZWFkZXIiPgogICAgICAgICAgPGEgY2xhc3M9Im5hdmJhci1icmFuZCIgaHJlZj0iLyI+Q29zbW9zIEZyYW1ld29yazwvYT4KICAgICAgICA8L2Rpdj4KICAgICAgICA8ZGl2IGNsYXNzPSJuYXZiYXItY29sbGFwc2UgY29sbGFwc2UiPgogICAgICAgICAgPHVsIGNsYXNzPSJuYXYgbmF2YmFyLW5hdiI+CiAgICAgICAgICAgIDxsaSBjbGFzcz0iYWN0aXZlIj48YSBocmVmPSIvIj5Ib21lPC9hPjwvbGk+CiAgICAgICAgICAgIDxsaSBjbGFzcz0iZHJvcGRvd24iIG5nLXNob3c9ImxvZ2dlZEluKCkiPgogICAgICAgICAgICAgIDxhIGhyZWY9IiMiIGNsYXNzPSJkcm9wZG93bi10b2dnbGUiIGRhdGEtdG9nZ2xlPSJkcm9wZG93biI+PHNwYW4gbmctYmluZD0idXNlck5hbWUiPjwvc3Bhbj48YiBjbGFzcz0iY2FyZXQiPjwvYj48L2E+CiAgICAgICAgICAgICAgPHVsIGNsYXNzPSJkcm9wZG93bi1tZW51Ij4KICAgICAgICAgICAgICAgIDxsaT48YSBocmVmPSIvbG9nb3V0LyI+TG9nb3V0PC9hPjwvbGk+CiAgICAgICAgICAgICAgICA8bGkgY2xhc3M9ImRpdmlkZXIiPjwvbGk+CiAgICAgICAgICAgICAgICA8bGk+PGEgaHJlZj0iIyI+U2V0dGluZ3M8L2E+PC9saT4KICAgICAgICAgICAgICA8L3VsPgogICAgICAgICAgICA8L2xpPgogICAgICAgICAgICA8bGkgbmctc2hvdz0iIWxvZ2dlZEluKCkiPjxhIGhyZWY9Ii8jL2xvZ2luLyI+TG9naW48L2E+PC9saT4KICAgICAgICAgIDwvdWw+CiAgICAgICAgPC9kaXY+PCEtLS8ubmF2LWNvbGxhcHNlIC0tPgogICAgICA8L2Rpdj4KICAgIDwvZGl2PgoKICAgIDxkaXYgY2xhc3M9ImNvbnRhaW5lciB0aGVtZS1zaG93Y2FzZSIgcm9sZT0ibWFpbiI+CiAgICAgICAgICA8ZGl2IGNsYXNzPSJqdW1ib3Ryb24iPgogICAgICAgICAgIDxoMz5PcGVyYXRpb25zPC9oMz4KICAgICAgICAgICAgPGZvcm0gcm9sZT0iZm9ybSI+CiAgICAgICAgICAgICAgICA8ZGl2IGNsYXNzPSJmb3JtLWdyb3VwIj4KICAgICAgICAgICAgICAgICAgICA8bGFiZWw+U2VydmljZSBVUkwgKHJlbGF0aXZlKSAqPC9sYWJlbD4KICAgICAgICAgICAgICAgICAgICA8aW5wdXQgY2xhc3M9ImZvcm0tY29udHJvbCIgaWQ9Im5hbWUiIHR5cGU9InRleHQiIG5hbWU9Im5hbWUiIHZhbHVlPSIvc2VydmljZS8iIC8+CiAgICAgICAgICAgICAgICA8L2Rpdj4KICAgICAgICAgICAgICAgIDxkaXYgY2xhc3M9ImZvcm0tZ3JvdXAiPgogICAgICAgICAgICAgICAgICAgIDxsYWJlbD5Db2x1bW5zIFtHRVRdPC9sYWJlbD4KICAgICAgICAgICAgICAgICAgICA8aW5wdXQgY2xhc3M9ImZvcm0tY29udHJvbCIgaWQ9ImNvbHVtbnMiIHR5cGU9InRleHQiIG5hbWU9ImNvbHVtbnMiICAvPgogICAgICAgICAgICAgICAgPC9kaXY+CiAgICAgICAgICAgICAgICA8ZGl2IGNsYXNzPSJmb3JtLWdyb3VwIj4KICAgICAgICAgICAgICAgICAgICA8bGFiZWw+RmlsdGVyIFtHRVRdPC9sYWJlbD4KICAgICAgICAgICAgICAgICAgICA8aW5wdXQgY2xhc3M9ImZvcm0tY29udHJvbCIgaWQ9ImZpbHRlciIgdHlwZT0idGV4dCIgbmFtZT0idmlsdGVyIiAvPgogICAgICAgICAgICAgICAgPC9kaXY+CiAgICAgICAgICAgICAgICA8ZGl2IGNsYXNzPSJmb3JtLWdyb3VwIj4KICAgICAgICAgICAgICAgICAgICA8bGFiZWw+RGF0YSBbKiBQT1NUfFBVVF08L2xhYmVsPgogICAgICAgICAgICAgICAgICAgIDx0ZXh0YXJlYSBjbGFzcz0iZm9ybS1jb250cm9sIiBpZD0iZGF0YSIgbmFtZT0iZGF0YSI+PC90ZXh0YXJlYT4KICAgICAgICAgICAgICAgIDwvZGl2PgoKICAgICAgICAgICAgICAgIDxkaXYgY2xhc3M9ImZvcm0tZ3JvdXAiPgogICAgICAgICAgICAgICAgICAgIDxsYWJlbD5SZXN1bHQ8L2xhYmVsPgogICAgICAgICAgICAgICAgICAgIDx0ZXh0YXJlYSBjbGFzcz0iZm9ybS1jb250cm9sIiBpZD0icmVzdWx0IiBuYW1lPSJyZXN1bHQiPjwvdGV4dGFyZWE+CiAgICAgICAgICAgICAgICA8L2Rpdj4KICAgICAgICAgICAgICAgIDxkaXYgY2xhc3M9ImZvcm0tZ3JvdXAiPgogICAgICAgICAgICAgICAgICAgIDxidXR0b24gY2xhc3M9ImJ0biBidG4tcHJpbWFyeSIgb25jbGljaz0ibG9hZEl0ZW0oKSI+ICBHRVQgPC9idXR0b24+CiAgICAgICAgICAgICAgICAgICAgPGJ1dHRvbiBjbGFzcz0iYnRuIGJ0bi1wcmltYXJ5IiBvbmNsaWNrPSJpbnNlcnRTZXJ2aWNlKCkiPiBQT1NUIDwvYnV0dG9uPgogICAgICAgICAgICAgICAgICAgIDxidXR0b24gY2xhc3M9ImJ0biBidG4taW5mbyIgb25jbGljaz0idXBkYXRlSXRlbSgpIj4gIFBVVCA8L2J1dHRvbj4KICAgICAgICAgICAgICAgICAgICA8YnV0dG9uIGNsYXNzPSJidG4gYnRuLWRhbmdlciIgb25jbGljaz0iZGVsZXRlSXRlbSgpIj5ERUxFVEU8L2J1dHRvbj4KICAgICAgICAgICAgICAgICA8L2Rpdj4KICAgICAgICAgICAgPC9mb3JtPgogICAgICAgICAgICA8ZGl2IGlkPSJzdGF0dXMiPjwvZGl2PgogICAgICAgICAgPC9kaXY+CiAgICA8L2Rpdj4KCiAgICA8c2NyaXB0IHNyYz0iLy9hamF4Lmdvb2dsZWFwaXMuY29tL2FqYXgvbGlicy9qcXVlcnkvMS4xMS4xL2pxdWVyeS5taW4uanMiPjwvc2NyaXB0PgogICAgPHNjcmlwdCBzcmM9Ii8vYWpheC5nb29nbGVhcGlzLmNvbS9hamF4L2xpYnMvYW5ndWxhcmpzLzEuMi4xNy9hbmd1bGFyLm1pbi5qcyI+PC9zY3JpcHQ+CiAgICA8c2NyaXB0IHNyYz0iLy9uZXRkbmEuYm9vdHN0cmFwY2RuLmNvbS9ib290c3RyYXAvMy4xLjEvanMvYm9vdHN0cmFwLm1pbi5qcyI+PC9zY3JpcHQ+CiAgICA8c2NyaXB0IHNyYz0iL3NlcnZpY2VhcGkuanMiPjwvc2NyaXB0PgogICAgPHNjcmlwdCBzcmM9Ii9jb250cm9sbGVycy5qcyI+PC9zY3JpcHQ+CiAgICA8bGluayByZWw9InN0eWxlc2hlZXQiIGhyZWY9Ii8vbmV0ZG5hLmJvb3RzdHJhcGNkbi5jb20vYm9vdHN0cmFwLzMuMS4xL2Nzcy9ib290c3RyYXAubWluLmNzcyI+CjwvYm9keT4KPC9odG1sPg==') }]
py
1a590e9618a568740b0ac7ef2d12c5f577bb1434
from app import create_app from app.cli import cli if __name__ == "__main__": app = create_app() cli()
py
1a590ec72b4af2535054b66816e4ac93c04af43e
from __future__ import unicode_literals import hashlib import itertools import json import re from ..compat import compat_HTTPError, compat_str from ..utils import (ExtractorError, float_or_none, get_element_by_attribute, int_or_none, lowercase_escape, std_headers, try_get, url_or_none) from .common import InfoExtractor class InstagramIE(InfoExtractor): _VALID_URL = ( r"(?P<url>https?://(?:www\.)?instagram\.com/(?:p|tv|reel)/(?P<id>[^/?#&]+))" ) _TESTS = [ { "url": "https://instagram.com/p/aye83DjauH/?foo=bar#abc", "md5": "0d2da106a9d2631273e192b372806516", "info_dict": { "id": "aye83DjauH", "ext": "mp4", "title": "Video by naomipq", "description": "md5:1f17f0ab29bd6fe2bfad705f58de3cb8", "thumbnail": r"re:^https?://.*\.jpg", "duration": 0, "timestamp": 1371748545, "upload_date": "20130620", "uploader_id": "naomipq", "uploader": "B E A U T Y F O R A S H E S", "like_count": int, "comment_count": int, "comments": list, }, }, { # missing description "url": "https://www.instagram.com/p/BA-pQFBG8HZ/?taken-by=britneyspears", "info_dict": { "id": "BA-pQFBG8HZ", "ext": "mp4", "title": "Video by britneyspears", "thumbnail": r"re:^https?://.*\.jpg", "duration": 0, "timestamp": 1453760977, "upload_date": "20160125", "uploader_id": "britneyspears", "uploader": "Britney Spears", "like_count": int, "comment_count": int, "comments": list, }, "params": { "skip_download": True, }, }, { # multi video post "url": "https://www.instagram.com/p/BQ0eAlwhDrw/", "playlist": [ { "info_dict": { "id": "BQ0dSaohpPW", "ext": "mp4", "title": "Video 1", }, }, { "info_dict": { "id": "BQ0dTpOhuHT", "ext": "mp4", "title": "Video 2", }, }, { "info_dict": { "id": "BQ0dT7RBFeF", "ext": "mp4", "title": "Video 3", }, }, ], "info_dict": { "id": "BQ0eAlwhDrw", "title": "Post by instagram", "description": "md5:0f9203fc6a2ce4d228da5754bcf54957", }, }, { # IGTV "url": "https://www.instagram.com/tv/BkfuX9UB-eK/", "info_dict": { "id": "BkfuX9UB-eK", "ext": "mp4", "title": "Fingerboarding Tricks with @cass.fb", "thumbnail": r"re:^https?://.*\.jpg", "duration": 53.83, "timestamp": 1530032919, "upload_date": "20180626", "uploader_id": "instagram", "uploader": "Instagram", "like_count": int, "comment_count": int, "comments": list, "description": "Meet Cass Hirst (@cass.fb), a fingerboarding pro who can perform tiny ollies and kickflips while blindfolded.", }, }, { "url": "https://instagram.com/p/-Cmh1cukG2/", "only_matching": True, }, { "url": "http://instagram.com/p/9o6LshA7zy/embed/", "only_matching": True, }, { "url": "https://www.instagram.com/tv/aye83DjauH/", "only_matching": True, }, { "url": "https://www.instagram.com/reel/CDUMkliABpa/", "only_matching": True, }, ] @staticmethod def _extract_embed_url(webpage): mobj = re.search( r'<iframe[^>]+src=(["\'])(?P<url>(?:https?:)?//(?:www\.)?instagram\.com/p/[^/]+/embed.*?)\1', webpage, ) if mobj: return mobj.group("url") blockquote_el = get_element_by_attribute("class", "instagram-media", webpage) if blockquote_el is None: return mobj = re.search(r'<a[^>]+href=([\'"])(?P<link>[^\'"]+)\1', blockquote_el) if mobj: return mobj.group("link") def _real_extract(self, url): mobj = re.match(self._VALID_URL, url) video_id = mobj.group("id") url = mobj.group("url") webpage = self._download_webpage(url, video_id) ( media, video_url, description, thumbnail, timestamp, uploader, uploader_id, like_count, comment_count, comments, height, width, ) = [None] * 12 shared_data = self._parse_json( self._search_regex( r"window\._sharedData\s*=\s*({.+?});", webpage, "shared data", default="{}", ), video_id, fatal=False, ) if shared_data: media = try_get( shared_data, ( lambda x: x["entry_data"]["PostPage"][0]["graphql"][ "shortcode_media" ], lambda x: x["entry_data"]["PostPage"][0]["media"], ), dict, ) # _sharedData.entry_data.PostPage is empty when authenticated (see # https://github.com/nextdl/nextdl/pull/22880) if not media: additional_data = self._parse_json( self._search_regex( r"window\.__additionalDataLoaded\s*\(\s*[^,]+,\s*({.+?})\s*\)\s*;", webpage, "additional data", default="{}", ), video_id, fatal=False, ) if additional_data: media = try_get( additional_data, lambda x: x["graphql"]["shortcode_media"], dict ) if media: video_url = media.get("video_url") height = int_or_none(media.get("dimensions", {}).get("height")) width = int_or_none(media.get("dimensions", {}).get("width")) description = try_get( media, lambda x: x["edge_media_to_caption"]["edges"][0]["node"]["text"], compat_str, ) or media.get("caption") title = media.get("title") thumbnail = media.get("display_src") or media.get("display_url") duration = float_or_none(media.get("video_duration")) timestamp = int_or_none( media.get("taken_at_timestamp") or media.get("date") ) uploader = media.get("owner", {}).get("full_name") uploader_id = media.get("owner", {}).get("username") def get_count(keys, kind): if not isinstance(keys, (list, tuple)): keys = [keys] for key in keys: count = int_or_none( try_get( media, ( lambda x: x["edge_media_%s" % key]["count"], lambda x: x["%ss" % kind]["count"], ), ) ) if count is not None: return count like_count = get_count("preview_like", "like") comment_count = get_count( ("preview_comment", "to_comment", "to_parent_comment"), "comment" ) comments = [ { "author": comment.get("user", {}).get("username"), "author_id": comment.get("user", {}).get("id"), "id": comment.get("id"), "text": comment.get("text"), "timestamp": int_or_none(comment.get("created_at")), } for comment in media.get("comments", {}).get("nodes", []) if comment.get("text") ] if not video_url: edges = ( try_get( media, lambda x: x["edge_sidecar_to_children"]["edges"], list ) or [] ) if edges: entries = [] for edge_num, edge in enumerate(edges, start=1): node = try_get(edge, lambda x: x["node"], dict) if not node: continue node_video_url = url_or_none(node.get("video_url")) if not node_video_url: continue entries.append( { "id": node.get("shortcode") or node["id"], "title": node.get("title") or "Video %d" % edge_num, "url": node_video_url, "thumbnail": node.get("display_url"), "duration": float_or_none(node.get("video_duration")), "width": int_or_none( try_get(node, lambda x: x["dimensions"]["width"]) ), "height": int_or_none( try_get(node, lambda x: x["dimensions"]["height"]) ), "view_count": int_or_none(node.get("video_view_count")), } ) return self.playlist_result( entries, video_id, "Post by %s" % uploader_id if uploader_id else None, description, ) if not video_url: video_url = self._og_search_video_url(webpage, secure=False) formats = [ { "url": video_url, "width": width, "height": height, } ] if not uploader_id: uploader_id = self._search_regex( r'"owner"\s*:\s*{\s*"username"\s*:\s*"(.+?)"', webpage, "uploader id", fatal=False, ) if not description: description = self._search_regex( r'"caption"\s*:\s*"(.+?)"', webpage, "description", default=None ) if description is not None: description = lowercase_escape(description) if not thumbnail: thumbnail = self._og_search_thumbnail(webpage) return { "id": video_id, "formats": formats, "ext": "mp4", "title": title or "Video by %s" % uploader_id, "description": description, "duration": duration, "thumbnail": thumbnail, "timestamp": timestamp, "uploader_id": uploader_id, "uploader": uploader, "like_count": like_count, "comment_count": comment_count, "comments": comments, } class InstagramPlaylistIE(InfoExtractor): # A superclass for handling any kind of query based on GraphQL which # results in a playlist. _gis_tmpl = None # used to cache GIS request type def _parse_graphql(self, webpage, item_id): # Reads a webpage and returns its GraphQL data. return self._parse_json( self._search_regex( r"sharedData\s*=\s*({.+?})\s*;\s*[<\n]", webpage, "data" ), item_id, ) def _extract_graphql(self, data, url): # Parses GraphQL queries containing videos and generates a playlist. def get_count(suffix): return int_or_none( try_get(node, lambda x: x["edge_media_" + suffix]["count"]) ) uploader_id = self._match_id(url) csrf_token = data["config"]["csrf_token"] rhx_gis = data.get("rhx_gis") or "3c7ca9dcefcf966d11dacf1f151335e8" cursor = "" for page_num in itertools.count(1): variables = { "first": 12, "after": cursor, } variables.update(self._query_vars_for(data)) variables = json.dumps(variables) if self._gis_tmpl: gis_tmpls = [self._gis_tmpl] else: gis_tmpls = [ "%s" % rhx_gis, "", "%s:%s" % (rhx_gis, csrf_token), "%s:%s:%s" % (rhx_gis, csrf_token, std_headers["User-Agent"]), ] # try all of the ways to generate a GIS query, and not only use the # first one that works, but cache it for future requests for gis_tmpl in gis_tmpls: try: json_data = self._download_json( "https://www.instagram.com/graphql/query/", uploader_id, "Downloading JSON page %d" % page_num, headers={ "X-Requested-With": "XMLHttpRequest", "X-Instagram-GIS": hashlib.md5( ("%s:%s" % (gis_tmpl, variables)).encode("utf-8") ).hexdigest(), }, query={ "query_hash": self._QUERY_HASH, "variables": variables, }, ) media = self._parse_timeline_from(json_data) self._gis_tmpl = gis_tmpl break except ExtractorError as e: # if it's an error caused by a bad query, and there are # more GIS templates to try, ignore it and keep trying if isinstance(e.cause, compat_HTTPError) and e.cause.code == 403: if gis_tmpl != gis_tmpls[-1]: continue raise edges = media.get("edges") if not edges or not isinstance(edges, list): break for edge in edges: node = edge.get("node") if not node or not isinstance(node, dict): continue if ( node.get("__typename") != "GraphVideo" and node.get("is_video") is not True ): continue video_id = node.get("shortcode") if not video_id: continue info = self.url_result( "https://instagram.com/p/%s/" % video_id, ie=InstagramIE.ie_key(), video_id=video_id, ) description = try_get( node, lambda x: x["edge_media_to_caption"]["edges"][0]["node"]["text"], compat_str, ) thumbnail = node.get("thumbnail_src") or node.get("display_src") timestamp = int_or_none(node.get("taken_at_timestamp")) comment_count = get_count("to_comment") like_count = get_count("preview_like") view_count = int_or_none(node.get("video_view_count")) info.update( { "description": description, "thumbnail": thumbnail, "timestamp": timestamp, "comment_count": comment_count, "like_count": like_count, "view_count": view_count, } ) yield info page_info = media.get("page_info") if not page_info or not isinstance(page_info, dict): break has_next_page = page_info.get("has_next_page") if not has_next_page: break cursor = page_info.get("end_cursor") if not cursor or not isinstance(cursor, compat_str): break def _real_extract(self, url): user_or_tag = self._match_id(url) webpage = self._download_webpage(url, user_or_tag) data = self._parse_graphql(webpage, user_or_tag) self._set_cookie("instagram.com", "ig_pr", "1") return self.playlist_result( self._extract_graphql(data, url), user_or_tag, user_or_tag ) class InstagramUserIE(InstagramPlaylistIE): _VALID_URL = r"https?://(?:www\.)?instagram\.com/(?P<id>[^/]{2,})/?(?:$|[?#])" IE_DESC = "Instagram user profile" IE_NAME = "instagram:user" _TEST = { "url": "https://instagram.com/porsche", "info_dict": { "id": "porsche", "title": "porsche", }, "playlist_count": 5, "params": { "extract_flat": True, "skip_download": True, "playlistend": 5, }, } _QUERY_HASH = ("42323d64886122307be10013ad2dcc44",) @staticmethod def _parse_timeline_from(data): # extracts the media timeline data from a GraphQL result return data["data"]["user"]["edge_owner_to_timeline_media"] @staticmethod def _query_vars_for(data): # returns a dictionary of variables to add to the timeline query based # on the GraphQL of the original page return {"id": data["entry_data"]["ProfilePage"][0]["graphql"]["user"]["id"]} class InstagramTagIE(InstagramPlaylistIE): _VALID_URL = r"https?://(?:www\.)?instagram\.com/explore/tags/(?P<id>[^/]+)" IE_DESC = "Instagram hashtag search" IE_NAME = "instagram:tag" _TEST = { "url": "https://instagram.com/explore/tags/lolcats", "info_dict": { "id": "lolcats", "title": "lolcats", }, "playlist_count": 50, "params": { "extract_flat": True, "skip_download": True, "playlistend": 50, }, } _QUERY_HASH = ("f92f56d47dc7a55b606908374b43a314",) @staticmethod def _parse_timeline_from(data): # extracts the media timeline data from a GraphQL result return data["data"]["hashtag"]["edge_hashtag_to_media"] @staticmethod def _query_vars_for(data): # returns a dictionary of variables to add to the timeline query based # on the GraphQL of the original page return { "tag_name": data["entry_data"]["TagPage"][0]["graphql"]["hashtag"]["name"] }
py
1a591005698ab1e3cc0f9e8872abf1e662a8445b
import time import inspect from functools import update_wrapper from fixate.core.common import mode_builder, unit_scale from fixate.core.exceptions import ParameterError, InstrumentError from fixate.drivers.funcgen.helper import FuncGen MODES = { ":SINusoid": { " [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}, ":CH2": {" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}}, }, ":SQUare": { " [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}, ":CH2": {" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}}, }, ":RAMP": { " [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}, ":CH2": {" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}}, }, ":PULSE": { " [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}, ":CH2": {" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}}, }, ":NOISe DEFault": {",[{amplitude}]": {",[{offset}]": {}}}, ":DC DEFault,DEFault": {",[{offset}]": {}}, ":USER": {" [{frequency}]": {",[{amplitude}]": {",[{offset}]": {}}}}, } ADV_MODES = {":SQUare:" "DCYCle"} # ----------------------------------------------------------------------------------------------------------------------- class RigolDG1022(FuncGen): REGEX_ID = "RIGOL TECHNOLOGIES,DG1022" INSTR_TYPE = "VISA" retrys_on_timeout = 3 _verify = True def __init__(self, instrument): """ The self._ values indicate the user values as entered if valid. The self.__ values are the sanitised values used internally in the system to parse between functions Limitations: The function generator switches internal relays at certain thresholds. Try to avoid these ranges in design if the function generator is loaded with a relatively low impedance Table of ranges on the same relay arrangement Min mVpp Max mVpp 4 60 60.1 199.9 200 599.9 600 2000 2001 6000 6001 20000 :param instrument: :return: """ super().__init__(instrument) self.instrument.query_delay = 0.2 self.instrument.timeout = 1000 # Rigol Restrictions self.__restr_bandwidth = {"min": unit_scale("4uHz"), "max": unit_scale("20MHz")} self.__restr_phase = {"min": -180, "max": 180} self.__restr_amplitude = { "min": unit_scale("4mVpp"), "max": unit_scale("20Vpp"), } self._amplitude = None self._store = {"ch1_duty": "50", "ch2_duty": "50"} self.api = [ # WAVEFORM SELECTION: # Channel 1: ( "channel1.waveform.sin", self.store_and_write, ("FUNC SIN", {"ch1_waveform_handler": None}), # base_str ), # handler ( "channel1.waveform.square", self.store_and_write, ( "FUNC SQU\r\nFUNC:SQU:DCYC {self._store[ch1_duty]}", {"ch1_waveform_handler": "channel1.waveform.square"}, ), ), ( "channel1.waveform.ramp", self.store_and_write, ("FUNC RAMP", {"ch1_waveform_handler": None}), ), ( "channel1.waveform.pulse", self.store_and_write, ( "FUNC PULS\r\nPULS:DCYC {self._store[ch1_duty]}", {"ch1_waveform_handler": "channel1.waveform.pulse"}, ), ), ( "channel1.waveform.arb", self.store_and_write, ("FUNC USER", {"ch1_waveform_handler": None}), ), ( "channel1.waveform.triangle", self.store_and_write, ("FUNC TRI", {"ch1_waveform_handler": None}), ), ( "channel1.waveform.noise", self.store_and_write, ("FUNC NOIS", {"ch1_waveform_handler": None}), ), ( "channel1.waveform.dc", self.store_and_write, ("FUNC DC", {"ch1_waveform_handler": None}), ), # Channel 2: ( "channel2.waveform.sin", self.store_and_write, ("FUNC:CH2 SIN", {"ch2_waveform_handler": None}), # base_str ), # handler ( "channel2.waveform.square", self.store_and_write, ( "FUNC:CH2 SQU\r\nFUNC:SQU:DCYC:CH2 {self._store[ch2_duty]}", {"ch2_waveform_handler": "channel2.waveform.square"}, ), ), ( "channel2.waveform.ramp", self.store_and_write, ("FUNC:CH2 RAMP", {"ch2_waveform_handler": None}), ), ( "channel2.waveform.pulse", self.store_and_write, ( "FUNC:CH2 PULS\r\nPULS:DCYC {self._store[ch2_duty]}", {"ch2_waveform_handler": "channel2.waveform.pulse"}, ), ), ( "channel2.waveform.arb", self.store_and_write, ("FUNC:CH2 USER", {"ch2_waveform_handler": None}), ), ( "channel2.waveform.triangle", self.store_and_write, ("FUNC:CH2 TRI", {"ch2_waveform_handler": None}), ), ( "channel2.waveform.noise", self.store_and_write, ("FUNC:CH2 NOIS", {"ch2_waveform_handler": None}), ), ( "channel2.waveform.dc", self.store_and_write, ("FUNC:CH2 DC", {"ch2_waveform_handler": None}), ), # CHANNEL CONFIGURATION: # Channel 1: ("channel1.vrms", self.write, "VOLT:UNIT VRMS\r\nVOLT {value}"), ("channel1.vpp", self.write, "VOLT:UNIT VPP\r\nVOLT {value}"), ("channel1.dbm", self.write, "VOLT:UNIT DBM\r\nVOLT {value}"), ("channel1.offset", self.write, "VOLT:OFFS {value}"), ("channel1.phase", self.write, "PHAS {value}"), ( "channel1.duty", self.store_and_execute, ({"ch1_duty": "{value}"}, "ch1_waveform_handler"), ), ("channel1.frequency", self.write, "FREQ {value}"), # Channel 2: ("channel2.vrms", self.write, "VOLT:UNIT:CH2 VRMS\r\nVOLT {value}"), ("channel2.vpp", self.write, "VOLT:UNIT:CH2 VPP\r\nVOLT {value}"), ("channel2.dbm", self.write, "VOLT:UNIT:CH2 DBM\r\nVOLT {value}"), ("channel2.offset", self.write, "VOLT:OFFS:CH2 {value}"), ("channel2.phase", self.write, "PHAS:CH2 {value}"), ("channel2.duty", self.store, {"ch2_duty": "{value}"}), ("channel2.frequency", self.write, "FREQ:CH2 {value}"), # CHANNEL ACTIVATION: ( "channel1._call", self.write, "OUTP {value}", ), # True won't work here needs to be ON or 1, OFF or 0 ( "channel2._call", self.write, "OUTP:CH2 {value}", ), # True won't work here needs to be ON or 1, OFF or 0 # SYNC CONFIGURATION: ("sync.polarity.normal", self.write, ""), ("sync.mode.normal", self.write, ""), ("sync.mode.source", self.write, ""), ("sync._call", self.write, "OUTP {value}"), # TRIGGER CONFIGURATION: ("trigger.immediate", self.write, "TRIG:SOUR IMM"), ("trigger.external._call", self.write, "TRIG:SOUR EXT"), ("trigger.external.rising", self.write, "TRIG:SOUR EXT\r\n TRIG1:SLOP POS"), ( "trigger.external.falling", self.write, "TRIG:SOUR EXT\r\n TRIG1:SLOP NEG", ), ("trigger.manual", self.write, "TRIG:SOUR BUS"), ("trigger.delay", self.write, "TRIG:DEL {seconds}"), ("trigger.out.off", self.write, "OUTP:TRIG OFF"), ("trigger.out._call", self.write, "OUTP:TRIG {output}"), ("trigger.out.rising", self.write, "OUTP:TRIG:SLOP POS"), ("trigger.out.falling", self.write, "OUTP:TRIG:SLOP NEG"), # Modulate # Channel 1: ( "channel1.modulate.am._call", self.store, {"ch1_modulate_state": "AM", "ch1_modulate_setting": "FREQ"}, ), ( "channel1.modulate.fm._call", self.store, {"ch1_modulate_state": "FM", "ch1_modulate_setting": "FREQ"}, ), ( "channel1.modulate.pm._call", self.store, {"ch1_modulate_state": "PM", "ch1_modulate_setting": "FREQ"}, ), ( "channel1.modulate.fsk._call", self.store, {"ch1_modulate_state": "FSK", "ch1_modulate_setting": "RATE"}, ), ( "channel1.modulate.bpsk._call", self.store, {"ch1_modulate_state": "BPSK", "ch1_modulate_setting": "RATE"}, ), ( "channel1.modulate.sum._call", self.store, {"ch1_modulate_state": "SUM", "ch1_modulate_setting": "RATE"}, ), # MODULATE SOURCES: ( "channel1.modulate.source.internal._call", self.store_and_write, ( "{self._store[ch1_modulate_state]}:SOUR INT", {"ch1_modulate_source": "INT"}, ), ), ( "channel1.modulate.source.external", self.store_and_write, ( "{self._store[ch1_modulate_state]}:SOUR EXT", {"ch1_modulate_source": "EXT"}, ), ), # MODULATE ACTIVATION: # Channel 1: ( "channel1.modulate._call", self.write, "{self._store[ch1_modulate_state]}:STAT {value}\r\n{self._store[ch1_modulate_state]}:SOUR" "{self._store[ch1_modulate_source]}", ), # MODULATE OPTIONS: # Channel 1: ("channel1.modulate.am.depth", self.write, "AM:DEPT {value}"), ("channel1.modulate.fm.freq_dev", self.write, "FM:DEV {value}"), ("channel1.modulate.pm.phase_dev", self.write, "PM:DEV{value}"), ("channel1.modulate.fsk.hop_freq", self.write, "FSK:FREQ {value}"), ("channel1.modulate.fsk.rate", self.write, "FSK:INT:RATE {value}"), # MODULATE SHAPES: # Channel 1: ( "channel1.modulate.source.internal.shape.sin", self.write, "{self._store[ch1_modulate_state]}:INT:FUNC SIN", ), ( "channel1.modulate.source.internal.shape.square", self.write, "{self._store[ch1_modulate_state]}:INT:FUNC SQU", ), ( "channel1.modulate.source.internal.shape.triangle", self.write, "{self._store[ch1_modulate_state]}:INT:FUNC TRI", ), ( "channel1.modulate.source.internal.shape.up_ramp", self.write, "{self._store[ch1_modulate_state]}:INT:FUNC RAMP", ), ( "channel1.modulate.source.internal.shape.down_ramp", self.write, "{self._store[ch1_modulate_state]}:INT:FUNC NRAMP", ), ( "channel1.modulate.source.internal.shape.noise", self.write, "{self._store[ch1_modulate_state]}:INT:FUNC NOIS", ), # BURST # Channel 1: ("channel1.burst.gated._call", self.write, "BURS:MODE GAT"), ("channel1.burst._call", self.write, "BURS:STAT {value}"), ("channel1.burst.ncycle._call", self.write, "BURS:MODE TRIG"), ("channel1.burst.ncycle.cycles._call", self.write, "BURS:NCYC {cycles}"), ("channel1.burst.ncycle.cycles.infinite", self.write, "BURS:NCYC INF"), ( "channel1.burst.ncycle.burst_period", self.write, "BURS:INT:PER {seconds}", ), ("channel1.burst.gated.positive", self.write, "BURS:GATE:POL NORM"), ("channel1.burst.gated.negative", self.write, "BURS:GATE:POL INV"), ("channel1.burst.phase", self.write, "BURS:PHAS {degrees}"), # Modulate Frequency ( "channel1.modulate.source.internal.frequency", self.write, "{self._store[ch1_modulate_state]}:INT:{self._store[ch1_modulate_setting]} {value}", ), # LOAD: # channel1: ("channel1.load._call", self.write, "OUTP:LOAD {ohms}"), ("channel1.load.infinite", self.write, "OUTP:LOAD INF"), # channel2: ("channel2.load._call", self.write, "OUTP:LOAD:CH2 {ohms}"), ("channel2.load.infinite", self.write, "OUTP:LOAD:CH2 INF"), ] # ----------------------------------------------------------------------------------------------------------------------- self.init_api() def sync_output(self, sync): """ :param sync: True or False :return: None """ if sync: self._write(["OUTPut:SYNC ON"]) else: self._write(["OUTPut:SYNC OFF"]) def trigger_output(self, trigger, rising=False, falling=False): """ :param sync: True or False :return: None """ if rising and falling: raise ValueError("Cannot trigger on both rising and falling edges") if trigger: if rising: self._write(["OUTPut:TRIGger:SLOPe POSitive"]) if falling: self._write(["OUTPut:TRIGger:SLOPe NEGative"]) self._write(["OUTPut:TRIGger ON"]) else: self._write(["OUTPut:TRIGger OFF"]) @property def verify_values(self): return self._verify @verify_values.setter def verify_values(self, val): if val not in [True, False]: raise ValueError("Invalid value. Use True or False") self._verify = val @property def amplitude_ch1(self): return self.instrument.query_ascii_values("VOLTAGE?")[0] @property def amplitude_ch2(self): return self.instrument.query_ascii_values("VOLTAGE:CH2?")[0] @amplitude_ch1.setter def amplitude_ch1(self, val): self._write("VOLTAGE {}".format(val)) @amplitude_ch2.setter def amplitude_ch2(self, val): self._write("VOLTAGE:CH2 {}".format(val)) @property def output_ch1(self): resp = self.instrument.query("OUTP?") if "OFF" in resp: return False elif "ON" in resp: return True @output_ch1.setter def output_ch1(self, val): if val not in [True, False]: raise ParameterError( "Unknown output {} value for CH1\r\nPlease select True or False".format( val ) ) if val: self._write("OUTP ON") else: self._write("OUTP OFF") @property def output_ch2(self): resp = self.instrument.query("OUTP:CH2?") if "OFF" in resp: return False elif "ON" in resp: return True @output_ch2.setter def output_ch2(self, val): if val not in [True, False]: raise ParameterError( "Unknown output {} value for CH2\nPlease select True or False".format( val ) ) if val: self._write("OUTP:CH2 ON") else: self._write("OUTP:CH2 OFF") @FuncGen.output_sync.setter def output_sync(self, val): time.sleep(0.5) if val not in [True, False]: raise ParameterError( "Unknown output {} value for SYNC\nPlease select True or False".format( val ) ) self._output_sync = val if self._output_sync: self._write("OUTP:SYNC ON") else: self._write("OUTP:SYNC OFF") def local(self): """ Gives local control back to the instrument Remote control is activated on any other commands set to the device :return: """ time.sleep(0.5) self._write("SYSTem:LOCal") def reset(self): """ Be aware that the funcgen can have a short period where it sets to 5Vpp 1kHz with the output on for a short period. This could cause issues. Ensure that setup is in a safe state to receive such a signal. :return: """ # Due to the 5Vpp 1kHz signal. Explicit call to turn output off first self.output_ch1 = False self.output_ch2 = False self._write("*RST") def function( self, waveform, channel=1, duty_cycle=None, symmetry=None, phase=None, **kwargs ): """ if parameters empty then uses previous set mode The mode and mode parameters are used in mode_build to search recursively through the MODES dictionary to build the visa string necessary for the equipment to interpret the commands. usage function('sin') parsed to visa: 'APPLy:SINusoid' function('square', channel=2, amplit=5, offset=2.5, freq='1kHz') parsed to visa: 'APPLy:SQUare:CH2 1000, 5, 2.5' corresponds to a square wave at 1kHz, where the min of the wave is at 0 and the max at 5V for more advanced functions that cannot be explained through waveform, amplitude, offset and frequency: use adv_function. """ if int(channel) in range(1, 3): channel = "CH{}".format(channel) else: raise ValueError( "Invalid channel {} use a number between 1-2".format(channel) ) mode = (waveform, channel) # self.reset() if duty_cycle is not None: if waveform.upper() not in "PULSE": if channel == "CH1": self._write( ["FUNCtion:{}:DCYCle {}".format(waveform.upper(), duty_cycle)] ) else: self._write( [ "FUNCtion:{}:DCYCle:{} {}".format( waveform.upper(), channel, duty_cycle ) ] ) else: if channel == "CH1": self._write(["PULSe:DCYC {}".format(duty_cycle)]) else: self._write(["PULSe:DCYC:{} {}".format(channel, duty_cycle)]) if symmetry is not None: if channel == "CH1": self._write(["FUNCtion:RAMP:SYMMetry {}".format(symmetry)]) else: self._write(["FUNCtion:RAMP:SYMMetry:{} {}".format(channel, symmetry)]) if phase is not None: if channel == "CH1": self._write(["PHASe {}".format(phase)]) else: self._write(["PHASe:CH2 {}".format(phase)]) self._write(["APPLy{}".format(mode_builder(MODES, {}, *mode, **kwargs))]) def am(self, frequency, depth, source=None, waveform="SIN"): self._write( [ "AM:SOURce INT", "AM:INT:FREQuency {frequency}".format(frequency=frequency), "AM:DEPTh {depth}".format(depth=depth), "AM:INT:FUNC {waveform}".format(waveform=waveform), "AM:STATe ON", ] ) def disable_am(self): self._write(["AM:STATe OFF"]) def enable_am(self): self._write(["AM:STATe ON"]) def adv_function(self, *mode, **mode_params): """ Exposes the advanced functionality of the function generator. Currently not implemented :param mode: :param mode_params: :return: """ raise NotImplementedError def _write(self, data): """ The DG1022 cannot respond to visa commands as quickly as some other devices A 100ms delay was found to be reliable for most commands with the exception of the *IDN? identification command. An extra 100ms should be allowed for explicit calls to *IDN? Note: The 6000 number for the sleep is derived from trial and error. The write calls don't seem to block at the rate they write. By allowing 166uS delay for each byte of data then the Funcgen doesn't choke on the next call. A flat 100ms is added to allow processing time. This is especially important for commands that write large amounts of data such as user arbitrary forms. """ if data: if isinstance(data, str): data = data.split("\r\n") for itm in data: self.instrument.write(itm) time.sleep(0.1 + len(itm) / 6000) else: raise ParameterError("Missing data in instrument write") self._is_error() def _check_errors(self): resp = self.instrument.query("SYST:ERR?") code, msg = resp.strip("\n").split(",") code = int(code) msg = msg.strip('"') return code, msg def _is_error(self, silent=False): errors = [] while True: code, msg = self._check_errors() if code != 0: errors.append((code, msg)) else: break if errors: if silent: return errors else: raise InstrumentError( "Error(s) Returned from FuncGen\n" + "\n".join( [ "Code: {}\nMessage:{}".format(code, msg) for code, msg in errors ] ) ) def write(self, base_str, *args, **kwargs): formatted_string = self._format_string(base_str, **kwargs) self._write(formatted_string) def _format_string(self, base_str, **kwargs): kwargs["self"] = self prev_string = base_str cur_string = "" while True: cur_string = prev_string.format(**kwargs) if cur_string == prev_string: break prev_string = cur_string return cur_string def store(self, store_dict, *args, **kwargs): """ Store a dictionary of values in TestClass :param kwargs: Dictionary containing the parameters to store :return: """ new_dict = store_dict.copy() for k, v in store_dict.items(): # I want the same function from write to set up the string before putting it in new_dict try: new_dict[k] = v.format(**kwargs) except: pass self._store.update(new_dict) def store_and_execute(self, params, *args, **kwargs): store_dict, handler_id = params self.store(store_dict, *args, **kwargs) handler_string = self._store[handler_id] if handler_string is not None: *parents, func = handler_string.split(".") parent_obj = self for parent in parents: parent_obj = getattr(parent_obj, parent) handler = getattr(parent_obj, func) handler() def store_and_write(self, params, *args, **kwargs): base_str, store_dict = params self.store(store_dict) self.write(base_str, *args, **kwargs) def init_api(self): for func_str, handler, base_str in self.api: *parents, func = func_str.split(".") parent_obj = self try: for parent in parents: parent_obj = getattr(parent_obj, parent) func_obc = getattr(parent_obj, func) except AttributeError: # print("FAILED ON:", func_str) raise setattr(parent_obj, func, self.prepare_string(func_obc, handler, base_str)) def prepare_string(self, func, handler, base_str, *args, **kwargs): def temp_func(*nargs, **nkwargs): """ Only formats using **nkwargs New Temp :param nargs: :param nkwargs: :return: """ sig = inspect.signature(func) keys = [itm[0] for itm in sig.parameters.items()] # Hard coding for RIGOL. BOOLS should be converted to "ON", "OFF" for index, param in enumerate(nargs): nkwargs[keys[index]] = param for k, v in nkwargs.items(): if sig.parameters[k].annotation == bool: if v: nkwargs[k] = "ON" else: nkwargs[k] = "OFF" return handler(base_str, **nkwargs) return update_wrapper(temp_func, func) # ------------------------------------------------------------------------------------------ def get_identity(self): """ Query ID character string of instrument, including a field separated by 4 “,”, manufactory, model, serial number and the edition number that consists of numbers and separated by “.” . :return: RIGOL TECHNOLOGIES,DG1022,DG1000000002,00.01.00.04.00 """ return self.instrument.query("*IDN?").strip()
py
1a59111f208e1020b4706767671279223ffc2d84
import logging from pyvisdk.exceptions import InvalidArgumentError ######################################## # Automatically generated, do not edit. ######################################## log = logging.getLogger(__name__) def HostCpuIdInfo(vim, *args, **kwargs): '''The CpuIdInfo data object type is used to represent the CPU features of a particular host or product, or to specify what the CPU feature requirements are for a particular virtual machine or guest operating system.For each register (eax,ebx,ecx,edx), the string is a bit mask of the form:When used to represent the features of a specific processor package (cpuPkg), the features common to all processors on a host (cpuFeature), or the features supported by a virtualization platform (supportedCpuFeature), each bit is either '0' or '1', or '-' for unknown/unspecified. In these feature vectors, the vendor field is never set.Optional values in these feature vectors default to '----:----:----:----:----:----:----:----'.When specifying the required feature set for a virtual machine or a guest operating system, the bits can take on the values as described below, and the vendor field may be set. The total feature requirements for a virtual machine are composed by using any requirements listed in the virtual machine's configuration to override the requirements listed in the descriptor for the virtual machine's guest OS.Bits used for specifying requirements:The values 'F' and '1' are rarely used but included for completeness. The '0' and '1' values do not promise a faithful virtualization of these features; whether the features work when forced to 0 or 1 is highly dependent on the guest software.Optional values in the requirements from the virtual machine's configuration default to '----:----:----:----:----:----:----:----'. Optional values in the requirements from the guest OS descriptor default to 'xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx:xxxx'.Once the feature requirements for a virtual machine have been composed from the virtual machine's configuration and guest OS descriptor, the bit types above are used to identify whether or not the virtual machine can be powered on or be migrated with VMotion to a particular host. The rules are as follows:''' obj = vim.client.factory.create('{urn:vim25}HostCpuIdInfo') # do some validation checking... if (len(args) + len(kwargs)) < 1: raise IndexError('Expected at least 2 arguments got: %d' % len(args)) required = [ 'level' ] optional = [ 'eax', 'ebx', 'ecx', 'edx', 'vendor', 'dynamicProperty', 'dynamicType' ] for name, arg in zip(required+optional, args): setattr(obj, name, arg) for name, value in kwargs.items(): if name in required + optional: setattr(obj, name, value) else: raise InvalidArgumentError("Invalid argument: %s. Expected one of %s" % (name, ", ".join(required + optional))) return obj
py
1a5911fe20928d4721b836368ad28633be0d92fd
from __future__ import unicode_literals import os import re import sys import types from django.conf import settings from django.core.urlresolvers import Resolver404, resolve from django.http import ( HttpRequest, HttpResponse, HttpResponseNotFound, build_request_repr, ) from django.template import Context, Engine, TemplateDoesNotExist from django.template.defaultfilters import force_escape, pprint from django.utils import lru_cache, six, timezone from django.utils.datastructures import MultiValueDict from django.utils.encoding import force_bytes, smart_text from django.utils.module_loading import import_string from django.utils.translation import ugettext as _ # Minimal Django templates engine to render the error templates # regardless of the project's TEMPLATES setting. DEBUG_ENGINE = Engine(debug=True) HIDDEN_SETTINGS = re.compile('API|TOKEN|KEY|SECRET|PASS|SIGNATURE') CLEANSED_SUBSTITUTE = '********************' def linebreak_iter(template_source): yield 0 p = template_source.find('\n') while p >= 0: yield p + 1 p = template_source.find('\n', p + 1) yield len(template_source) + 1 class CallableSettingWrapper(object): """ Object to wrap callable appearing in settings * Not to call in the debug page (#21345). * Not to break the debug page if the callable forbidding to set attributes (#23070). """ def __init__(self, callable_setting): self._wrapped = callable_setting def __repr__(self): return repr(self._wrapped) def cleanse_setting(key, value): """Cleanse an individual setting key/value of sensitive content. If the value is a dictionary, recursively cleanse the keys in that dictionary. """ try: if HIDDEN_SETTINGS.search(key): cleansed = CLEANSED_SUBSTITUTE else: if isinstance(value, dict): cleansed = {k: cleanse_setting(k, v) for k, v in value.items()} else: cleansed = value except TypeError: # If the key isn't regex-able, just return as-is. cleansed = value if callable(cleansed): # For fixing #21345 and #23070 cleansed = CallableSettingWrapper(cleansed) return cleansed def get_safe_settings(): "Returns a dictionary of the settings module, with sensitive settings blurred out." settings_dict = {} for k in dir(settings): if k.isupper(): settings_dict[k] = cleanse_setting(k, getattr(settings, k)) return settings_dict def technical_500_response(request, exc_type, exc_value, tb, status_code=500): """ Create a technical server error response. The last three arguments are the values returned from sys.exc_info() and friends. """ reporter = ExceptionReporter(request, exc_type, exc_value, tb) if request.is_ajax(): text = reporter.get_traceback_text() return HttpResponse(text, status=status_code, content_type='text/plain') else: html = reporter.get_traceback_html() return HttpResponse(html, status=status_code, content_type='text/html') @lru_cache.lru_cache() def get_default_exception_reporter_filter(): # Instantiate the default filter for the first time and cache it. return import_string(settings.DEFAULT_EXCEPTION_REPORTER_FILTER)() def get_exception_reporter_filter(request): default_filter = get_default_exception_reporter_filter() return getattr(request, 'exception_reporter_filter', default_filter) class ExceptionReporterFilter(object): """ Base for all exception reporter filter classes. All overridable hooks contain lenient default behaviors. """ def get_request_repr(self, request): if request is None: return repr(None) else: return build_request_repr(request, POST_override=self.get_post_parameters(request)) def get_post_parameters(self, request): if request is None: return {} else: return request.POST def get_traceback_frame_variables(self, request, tb_frame): return list(six.iteritems(tb_frame.f_locals)) class SafeExceptionReporterFilter(ExceptionReporterFilter): """ Use annotations made by the sensitive_post_parameters and sensitive_variables decorators to filter out sensitive information. """ def is_active(self, request): """ This filter is to add safety in production environments (i.e. DEBUG is False). If DEBUG is True then your site is not safe anyway. This hook is provided as a convenience to easily activate or deactivate the filter on a per request basis. """ return settings.DEBUG is False def get_cleansed_multivaluedict(self, request, multivaluedict): """ Replaces the keys in a MultiValueDict marked as sensitive with stars. This mitigates leaking sensitive POST parameters if something like request.POST['nonexistent_key'] throws an exception (#21098). """ sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: multivaluedict = multivaluedict.copy() for param in sensitive_post_parameters: if param in multivaluedict: multivaluedict[param] = CLEANSED_SUBSTITUTE return multivaluedict def get_post_parameters(self, request): """ Replaces the values of POST parameters marked as sensitive with stars (*********). """ if request is None: return {} else: sensitive_post_parameters = getattr(request, 'sensitive_post_parameters', []) if self.is_active(request) and sensitive_post_parameters: cleansed = request.POST.copy() if sensitive_post_parameters == '__ALL__': # Cleanse all parameters. for k, v in cleansed.items(): cleansed[k] = CLEANSED_SUBSTITUTE return cleansed else: # Cleanse only the specified parameters. for param in sensitive_post_parameters: if param in cleansed: cleansed[param] = CLEANSED_SUBSTITUTE return cleansed else: return request.POST def cleanse_special_types(self, request, value): try: # If value is lazy or a complex object of another kind, this check # might raise an exception. isinstance checks that lazy HttpRequests # or MultiValueDicts will have a return value. is_request = isinstance(value, HttpRequest) except Exception as e: return '{!r} while evaluating {!r}'.format(e, value) if is_request: # Cleanse the request's POST parameters. value = self.get_request_repr(value) elif isinstance(value, MultiValueDict): # Cleanse MultiValueDicts (request.POST is the one we usually care about) value = self.get_cleansed_multivaluedict(request, value) return value def get_traceback_frame_variables(self, request, tb_frame): """ Replaces the values of variables marked as sensitive with stars (*********). """ # Loop through the frame's callers to see if the sensitive_variables # decorator was used. current_frame = tb_frame.f_back sensitive_variables = None while current_frame is not None: if (current_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in current_frame.f_locals): # The sensitive_variables decorator was used, so we take note # of the sensitive variables' names. wrapper = current_frame.f_locals['sensitive_variables_wrapper'] sensitive_variables = getattr(wrapper, 'sensitive_variables', None) break current_frame = current_frame.f_back cleansed = {} if self.is_active(request) and sensitive_variables: if sensitive_variables == '__ALL__': # Cleanse all variables for name, value in tb_frame.f_locals.items(): cleansed[name] = CLEANSED_SUBSTITUTE else: # Cleanse specified variables for name, value in tb_frame.f_locals.items(): if name in sensitive_variables: value = CLEANSED_SUBSTITUTE else: value = self.cleanse_special_types(request, value) cleansed[name] = value else: # Potentially cleanse the request and any MultiValueDicts if they # are one of the frame variables. for name, value in tb_frame.f_locals.items(): cleansed[name] = self.cleanse_special_types(request, value) if (tb_frame.f_code.co_name == 'sensitive_variables_wrapper' and 'sensitive_variables_wrapper' in tb_frame.f_locals): # For good measure, obfuscate the decorated function's arguments in # the sensitive_variables decorator's frame, in case the variables # associated with those arguments were meant to be obfuscated from # the decorated function's frame. cleansed['func_args'] = CLEANSED_SUBSTITUTE cleansed['func_kwargs'] = CLEANSED_SUBSTITUTE return cleansed.items() class ExceptionReporter(object): """ A class to organize and coordinate reporting on exceptions. """ def __init__(self, request, exc_type, exc_value, tb, is_email=False): self.request = request self.filter = get_exception_reporter_filter(self.request) self.exc_type = exc_type self.exc_value = exc_value self.tb = tb self.is_email = is_email self.template_info = getattr(self.exc_value, 'template_debug', None) self.template_does_not_exist = False self.loader_debug_info = None # Handle deprecated string exceptions if isinstance(self.exc_type, six.string_types): self.exc_value = Exception('Deprecated String Exception: %r' % self.exc_type) self.exc_type = type(self.exc_value) def format_path_status(self, path): if not os.path.exists(path): return "File does not exist" return "File exists" def get_traceback_data(self): """Return a dictionary containing traceback information.""" try: default_template_engine = Engine.get_default() except Exception: # Since the debug view must never crash, catch all exceptions. # If Django can't find a default template engine, get_default() # raises ImproperlyConfigured. If some template engines fail to # load, any exception may be raised. default_template_engine = None # TODO: add support for multiple template engines (#24120). # TemplateDoesNotExist should carry all the information. # Replaying the search process isn't a good design. if self.exc_type and issubclass(self.exc_type, TemplateDoesNotExist): if default_template_engine is None: template_loaders = [] else: self.template_does_not_exist = True self.loader_debug_info = [] # If Django fails in get_template_loaders, provide an empty list # for the following loop to not fail. try: template_loaders = default_template_engine.template_loaders except Exception: template_loaders = [] for loader in template_loaders: try: source_list_func = loader.get_template_sources # NOTE: This assumes exc_value is the name of the template that # the loader attempted to load. template_list = [{ 'name': t, 'status': self.format_path_status(t), } for t in source_list_func(str(self.exc_value))] except AttributeError: template_list = [] loader_name = loader.__module__ + '.' + loader.__class__.__name__ self.loader_debug_info.append({ 'loader': loader_name, 'templates': template_list, }) frames = self.get_traceback_frames() for i, frame in enumerate(frames): if 'vars' in frame: frame_vars = [] for k, v in frame['vars']: v = pprint(v) # The force_escape filter assume unicode, make sure that works if isinstance(v, six.binary_type): v = v.decode('utf-8', 'replace') # don't choke on non-utf-8 input # Trim large blobs of data if len(v) > 4096: v = '%s... <trimmed %d bytes string>' % (v[0:4096], len(v)) frame_vars.append((k, force_escape(v))) frame['vars'] = frame_vars frames[i] = frame unicode_hint = '' if self.exc_type and issubclass(self.exc_type, UnicodeError): start = getattr(self.exc_value, 'start', None) end = getattr(self.exc_value, 'end', None) if start is not None and end is not None: unicode_str = self.exc_value.args[1] unicode_hint = smart_text( unicode_str[max(start - 5, 0):min(end + 5, len(unicode_str))], 'ascii', errors='replace' ) from django import get_version c = { 'is_email': self.is_email, 'unicode_hint': unicode_hint, 'frames': frames, 'request': self.request, 'filtered_POST': self.filter.get_post_parameters(self.request), 'settings': get_safe_settings(), 'sys_executable': sys.executable, 'sys_version_info': '%d.%d.%d' % sys.version_info[0:3], 'server_time': timezone.now(), 'django_version_info': get_version(), 'sys_path': sys.path, 'template_info': self.template_info, 'template_does_not_exist': self.template_does_not_exist, 'loader_debug_info': self.loader_debug_info, } # Check whether exception info is available if self.exc_type: c['exception_type'] = self.exc_type.__name__ if self.exc_value: c['exception_value'] = smart_text(self.exc_value, errors='replace') if frames: c['lastframe'] = frames[-1] return c def get_traceback_html(self): "Return HTML version of debug 500 HTTP error page." t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEMPLATE) c = Context(self.get_traceback_data(), use_l10n=False) return t.render(c) def get_traceback_text(self): "Return plain text version of debug 500 HTTP error page." t = DEBUG_ENGINE.from_string(TECHNICAL_500_TEXT_TEMPLATE) c = Context(self.get_traceback_data(), autoescape=False, use_l10n=False) return t.render(c) def _get_lines_from_file(self, filename, lineno, context_lines, loader=None, module_name=None): """ Returns context_lines before and after lineno from file. Returns (pre_context_lineno, pre_context, context_line, post_context). """ source = None if loader is not None and hasattr(loader, "get_source"): try: source = loader.get_source(module_name) except ImportError: pass if source is not None: source = source.splitlines() if source is None: try: with open(filename, 'rb') as fp: source = fp.read().splitlines() except (OSError, IOError): pass if source is None: return None, [], None, [] # If we just read the source from a file, or if the loader did not # apply tokenize.detect_encoding to decode the source into a Unicode # string, then we should do that ourselves. if isinstance(source[0], six.binary_type): encoding = 'ascii' for line in source[:2]: # File coding may be specified. Match pattern from PEP-263 # (http://www.python.org/dev/peps/pep-0263/) match = re.search(br'coding[:=]\s*([-\w.]+)', line) if match: encoding = match.group(1).decode('ascii') break source = [six.text_type(sline, encoding, 'replace') for sline in source] lower_bound = max(0, lineno - context_lines) upper_bound = lineno + context_lines pre_context = source[lower_bound:lineno] context_line = source[lineno] post_context = source[lineno + 1:upper_bound] return lower_bound, pre_context, context_line, post_context def get_traceback_frames(self): def explicit_or_implicit_cause(exc_value): explicit = getattr(exc_value, '__cause__', None) implicit = getattr(exc_value, '__context__', None) return explicit or implicit # Get the exception and all its causes exceptions = [] exc_value = self.exc_value while exc_value: exceptions.append(exc_value) exc_value = explicit_or_implicit_cause(exc_value) frames = [] # No exceptions were supplied to ExceptionReporter if not exceptions: return frames # In case there's just one exception (always in Python 2, # sometimes in Python 3), take the traceback from self.tb (Python 2 # doesn't have a __traceback__ attribute on Exception) exc_value = exceptions.pop() tb = self.tb if not exceptions else exc_value.__traceback__ while tb is not None: # Support for __traceback_hide__ which is used by a few libraries # to hide internal frames. if tb.tb_frame.f_locals.get('__traceback_hide__'): tb = tb.tb_next continue filename = tb.tb_frame.f_code.co_filename function = tb.tb_frame.f_code.co_name lineno = tb.tb_lineno - 1 loader = tb.tb_frame.f_globals.get('__loader__') module_name = tb.tb_frame.f_globals.get('__name__') or '' pre_context_lineno, pre_context, context_line, post_context = self._get_lines_from_file( filename, lineno, 7, loader, module_name, ) if pre_context_lineno is not None: frames.append({ 'exc_cause': explicit_or_implicit_cause(exc_value), 'exc_cause_explicit': getattr(exc_value, '__cause__', True), 'tb': tb, 'type': 'django' if module_name.startswith('django.') else 'user', 'filename': filename, 'function': function, 'lineno': lineno + 1, 'vars': self.filter.get_traceback_frame_variables(self.request, tb.tb_frame), 'id': id(tb), 'pre_context': pre_context, 'context_line': context_line, 'post_context': post_context, 'pre_context_lineno': pre_context_lineno + 1, }) # If the traceback for current exception is consumed, try the # other exception. if not tb.tb_next and exceptions: exc_value = exceptions.pop() tb = exc_value.__traceback__ else: tb = tb.tb_next return frames def format_exception(self): """ Return the same data as from traceback.format_exception. """ import traceback frames = self.get_traceback_frames() tb = [(f['filename'], f['lineno'], f['function'], f['context_line']) for f in frames] list = ['Traceback (most recent call last):\n'] list += traceback.format_list(tb) list += traceback.format_exception_only(self.exc_type, self.exc_value) return list def technical_404_response(request, exception): "Create a technical 404 error response. The exception should be the Http404." try: error_url = exception.args[0]['path'] except (IndexError, TypeError, KeyError): error_url = request.path_info[1:] # Trim leading slash try: tried = exception.args[0]['tried'] except (IndexError, TypeError, KeyError): tried = [] else: if (not tried # empty URLconf or (request.path == '/' and len(tried) == 1 # default URLconf and len(tried[0]) == 1 and getattr(tried[0][0], 'app_name', '') == getattr(tried[0][0], 'namespace', '') == 'admin')): return default_urlconf(request) urlconf = getattr(request, 'urlconf', settings.ROOT_URLCONF) if isinstance(urlconf, types.ModuleType): urlconf = urlconf.__name__ caller = '' try: resolver_match = resolve(request.path) except Resolver404: pass else: obj = resolver_match.func if hasattr(obj, '__name__'): caller = obj.__name__ elif hasattr(obj, '__class__') and hasattr(obj.__class__, '__name__'): caller = obj.__class__.__name__ if hasattr(obj, '__module__'): module = obj.__module__ caller = '%s.%s' % (module, caller) t = DEBUG_ENGINE.from_string(TECHNICAL_404_TEMPLATE) c = Context({ 'urlconf': urlconf, 'root_urlconf': settings.ROOT_URLCONF, 'request_path': error_url, 'urlpatterns': tried, 'reason': force_bytes(exception, errors='replace'), 'request': request, 'settings': get_safe_settings(), 'raising_view_name': caller, }) return HttpResponseNotFound(t.render(c), content_type='text/html') def default_urlconf(request): "Create an empty URLconf 404 error response." t = DEBUG_ENGINE.from_string(DEFAULT_URLCONF_TEMPLATE) c = Context({ "title": _("Welcome to Django"), "heading": _("It worked!"), "subheading": _("Congratulations on your first Django-powered page."), "instructions": _("Of course, you haven't actually done any work yet. " "Next, start your first app by running <code>python manage.py startapp [app_label]</code>."), "explanation": _("You're seeing this message because you have <code>DEBUG = True</code> in your " "Django settings file and you haven't configured any URLs. Get to work!"), }) return HttpResponse(t.render(c), content_type='text/html') # # Templates are embedded in the file so that we know the error handler will # always work even if the template loader is broken. # TECHNICAL_500_TEMPLATE = (""" <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"> <title>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}""" """{% if request %} at {{ request.path_info|escape }}{% endif %}</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; } h2 { margin-bottom:.8em; } h2 span { font-size:80%; color:#666; font-weight:normal; } h3 { margin:1em 0 .5em 0; } h4 { margin:0 0 .5em 0; font-weight: normal; } code, pre { font-size: 100%; white-space: pre-wrap; } table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; } tbody td, tbody th { vertical-align:top; padding:2px 3px; } thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; } tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; } table.vars { margin:5px 0 2px 40px; } table.vars td, table.req td { font-family:monospace; } table td.code { width:100%; } table td.code pre { overflow:hidden; } table.source th { color:#666; } table.source td { font-family:monospace; white-space:pre; border-bottom:1px solid #eee; } ul.traceback { list-style-type:none; color: #222; } ul.traceback li.frame { padding-bottom:1em; color:#666; } ul.traceback li.user { background-color:#e0e0e0; color:#000 } div.context { padding:10px 0; overflow:hidden; } div.context ol { padding-left:30px; margin:0 10px; list-style-position: inside; } div.context ol li { font-family:monospace; white-space:pre; color:#777; cursor:pointer; } div.context ol li pre { display:inline; } div.context ol.context-line li { color:#505050; background-color:#dfdfdf; } div.context ol.context-line li span { position:absolute; right:32px; } .user div.context ol.context-line li { background-color:#bbb; color:#000; } .user div.context ol li { color:#666; } div.commands { margin-left: 40px; } div.commands a { color:#555; text-decoration:none; } .user div.commands a { color: black; } #summary { background: #ffc; } #summary h2 { font-weight: normal; color: #666; } #explanation { background:#eee; } #template, #template-not-exist { background:#f6f6f6; } #template-not-exist ul { margin: 0 0 0 20px; } #unicode-hint { background:#eee; } #traceback { background:#eee; } #requestinfo { background:#f6f6f6; padding-left:120px; } #summary table { border:none; background:transparent; } #requestinfo h2, #requestinfo h3 { position:relative; margin-left:-100px; } #requestinfo h3 { margin-bottom:-1em; } .error { background: #ffc; } .specific { color:#cc3300; font-weight:bold; } h2 span.commands { font-size:.7em;} span.commands a:link {color:#5E5694;} pre.exception_value { font-family: sans-serif; color: #666; font-size: 1.5em; margin: 10px 0 10px 0; } </style> {% if not is_email %} <script type="text/javascript"> //<!-- function getElementsByClassName(oElm, strTagName, strClassName){ // Written by Jonathan Snook, http://www.snook.ca/jon; Add-ons by Robert Nyman, http://www.robertnyman.com var arrElements = (strTagName == "*" && document.all)? document.all : oElm.getElementsByTagName(strTagName); var arrReturnElements = new Array(); strClassName = strClassName.replace(/\-/g, "\\-"); var oRegExp = new RegExp("(^|\\s)" + strClassName + "(\\s|$)"); var oElement; for(var i=0; i<arrElements.length; i++){ oElement = arrElements[i]; if(oRegExp.test(oElement.className)){ arrReturnElements.push(oElement); } } return (arrReturnElements) } function hideAll(elems) { for (var e = 0; e < elems.length; e++) { elems[e].style.display = 'none'; } } window.onload = function() { hideAll(getElementsByClassName(document, 'table', 'vars')); hideAll(getElementsByClassName(document, 'ol', 'pre-context')); hideAll(getElementsByClassName(document, 'ol', 'post-context')); hideAll(getElementsByClassName(document, 'div', 'pastebin')); } function toggle() { for (var i = 0; i < arguments.length; i++) { var e = document.getElementById(arguments[i]); if (e) { e.style.display = e.style.display == 'none' ? 'block': 'none'; } } return false; } function varToggle(link, id) { toggle('v' + id); var s = link.getElementsByTagName('span')[0]; var uarr = String.fromCharCode(0x25b6); var darr = String.fromCharCode(0x25bc); s.innerHTML = s.innerHTML == uarr ? darr : uarr; return false; } function switchPastebinFriendly(link) { s1 = "Switch to copy-and-paste view"; s2 = "Switch back to interactive view"; link.innerHTML = link.innerHTML == s1 ? s2: s1; toggle('browserTraceback', 'pastebinTraceback'); return false; } //--> </script> {% endif %} </head> <body> <div id="summary"> <h1>{% if exception_type %}{{ exception_type }}{% else %}Report{% endif %}""" """{% if request %} at {{ request.path_info|escape }}{% endif %}</h1> <pre class="exception_value">""" """{% if exception_value %}{{ exception_value|force_escape }}{% else %}No exception message supplied{% endif %}""" """</pre> <table class="meta"> {% if request %} <tr> <th>Request Method:</th> <td>{{ request.META.REQUEST_METHOD }}</td> </tr> <tr> <th>Request URL:</th> <td>{{ request.build_absolute_uri|escape }}</td> </tr> {% endif %} <tr> <th>Django Version:</th> <td>{{ django_version_info }}</td> </tr> {% if exception_type %} <tr> <th>Exception Type:</th> <td>{{ exception_type }}</td> </tr> {% endif %} {% if exception_type and exception_value %} <tr> <th>Exception Value:</th> <td><pre>{{ exception_value|force_escape }}</pre></td> </tr> {% endif %} {% if lastframe %} <tr> <th>Exception Location:</th> <td>{{ lastframe.filename|escape }} in {{ lastframe.function|escape }}, line {{ lastframe.lineno }}</td> </tr> {% endif %} <tr> <th>Python Executable:</th> <td>{{ sys_executable|escape }}</td> </tr> <tr> <th>Python Version:</th> <td>{{ sys_version_info }}</td> </tr> <tr> <th>Python Path:</th> <td><pre>{{ sys_path|pprint }}</pre></td> </tr> <tr> <th>Server time:</th> <td>{{server_time|date:"r"}}</td> </tr> </table> </div> {% if unicode_hint %} <div id="unicode-hint"> <h2>Unicode error hint</h2> <p>The string that could not be encoded/decoded was: <strong>{{ unicode_hint|force_escape }}</strong></p> </div> {% endif %} {% if template_does_not_exist %} <div id="template-not-exist"> <h2>Template-loader postmortem</h2> {% if loader_debug_info %} <p>Django tried loading these templates, in this order:</p> <ul> {% for loader in loader_debug_info %} <li>Using loader <code>{{ loader.loader }}</code>: <ul> {% for t in loader.templates %}<li><code>{{ t.name }}</code> ({{ t.status }})</li>{% endfor %} </ul> </li> {% endfor %} </ul> {% else %} <p>Django couldn't find any templates because your <code>'loaders'</code> option is empty!</p> {% endif %} </div> {% endif %} {% if template_info %} <div id="template"> <h2>Error during template rendering</h2> <p>In template <code>{{ template_info.name }}</code>, error at line <strong>{{ template_info.line }}</strong></p> <h3>{{ template_info.message }}</h3> <table class="source{% if template_info.top %} cut-top{% endif %} {% ifnotequal template_info.bottom template_info.total %} cut-bottom{% endifnotequal %}"> {% for source_line in template_info.source_lines %} {% ifequal source_line.0 template_info.line %} <tr class="error"><th>{{ source_line.0 }}</th> <td> {{ template_info.before }} <span class="specific">{{ template_info.during }}</span> {{ template_info.after }} </td> </tr> {% else %} <tr><th>{{ source_line.0 }}</th> <td>{{ source_line.1 }}</td></tr> {% endifequal %} {% endfor %} </table> </div> {% endif %} {% if frames %} <div id="traceback"> <h2>Traceback <span class="commands">{% if not is_email %}<a href="#" onclick="return switchPastebinFriendly(this);"> Switch to copy-and-paste view</a></span>{% endif %} </h2> {% autoescape off %} <div id="browserTraceback"> <ul class="traceback"> {% for frame in frames %} {% ifchanged frame.exc_cause %}{% if frame.exc_cause %} <li><h3> {% if frame.exc_cause_explicit %} The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception: {% else %} During handling of the above exception ({{ frame.exc_cause }}), another exception occurred: {% endif %} </h3></li> {% endif %}{% endifchanged %} <li class="frame {{ frame.type }}"> <code>{{ frame.filename|escape }}</code> in <code>{{ frame.function|escape }}</code> {% if frame.context_line %} <div class="context" id="c{{ frame.id }}"> {% if frame.pre_context and not is_email %} <ol start="{{ frame.pre_context_lineno }}" class="pre-context" id="pre{{ frame.id }}"> {% for line in frame.pre_context %} <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li> {% endfor %} </ol> {% endif %} <ol start="{{ frame.lineno }}" class="context-line"> <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre> {{ frame.context_line|escape }}</pre>{% if not is_email %} <span>...</span>{% endif %}</li></ol> {% if frame.post_context and not is_email %} <ol start='{{ frame.lineno|add:"1" }}' class="post-context" id="post{{ frame.id }}"> {% for line in frame.post_context %} <li onclick="toggle('pre{{ frame.id }}', 'post{{ frame.id }}')"><pre>{{ line|escape }}</pre></li> {% endfor %} </ol> {% endif %} </div> {% endif %} {% if frame.vars %} <div class="commands"> {% if is_email %} <h2>Local Vars</h2> {% else %} <a href="#" onclick="return varToggle(this, '{{ frame.id }}')"><span>&#x25b6;</span> Local vars</a> {% endif %} </div> <table class="vars" id="v{{ frame.id }}"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in frame.vars|dictsort:"0" %} <tr> <td>{{ var.0|force_escape }}</td> <td class="code"><pre>{{ var.1 }}</pre></td> </tr> {% endfor %} </tbody> </table> {% endif %} </li> {% endfor %} </ul> </div> {% endautoescape %} <form action="http://dpaste.com/" name="pasteform" id="pasteform" method="post"> {% if not is_email %} <div id="pastebinTraceback" class="pastebin"> <input type="hidden" name="language" value="PythonConsole"> <input type="hidden" name="title" value="{{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %}"> <input type="hidden" name="source" value="Django Dpaste Agent"> <input type="hidden" name="poster" value="Django"> <textarea name="content" id="traceback_area" cols="140" rows="25"> Environment: {% if request %} Request Method: {{ request.META.REQUEST_METHOD }} Request URL: {{ request.build_absolute_uri|escape }} {% endif %} Django Version: {{ django_version_info }} Python Version: {{ sys_version_info }} Installed Applications: {{ settings.INSTALLED_APPS|pprint }} Installed Middleware: {{ settings.MIDDLEWARE_CLASSES|pprint }} {% if template_does_not_exist %}Template Loader Error: {% if loader_debug_info %}Django tried loading these templates, in this order: {% for loader in loader_debug_info %}Using loader {{ loader.loader }}: {% for t in loader.templates %}{{ t.name }} ({{ t.status }}) {% endfor %}{% endfor %} {% else %}Django couldn't find any templates because your 'loaders' option is empty! {% endif %} {% endif %}{% if template_info %} Template error: In template {{ template_info.name }}, error at line {{ template_info.line }} {{ template_info.message }}{% for source_line in template_info.source_lines %} {% ifequal source_line.0 template_info.line %} {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }} {% else %} {{ source_line.0 }} : {{ source_line.1 }} {% endifequal %}{% endfor %}{% endif %} Traceback: {% for frame in frames %}File "{{ frame.filename|escape }}" in {{ frame.function|escape }} {% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line|escape }}{% endif %} {% endfor %} Exception Type: {{ exception_type|escape }}{% if request %} at {{ request.path_info|escape }}{% endif %} Exception Value: {{ exception_value|force_escape }} </textarea> <br><br> <input type="submit" value="Share this traceback on a public Web site"> </div> </form> </div> {% endif %} {% endif %} <div id="requestinfo"> <h2>Request information</h2> {% if request %} <h3 id="get-info">GET</h3> {% if request.GET %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.GET.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No GET data</p> {% endif %} <h3 id="post-info">POST</h3> {% if filtered_POST %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in filtered_POST.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No POST data</p> {% endif %} <h3 id="files-info">FILES</h3> {% if request.FILES %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.FILES.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No FILES data</p> {% endif %} <h3 id="cookie-info">COOKIES</h3> {% if request.COOKIES %} <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.COOKIES.items %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>No cookie data</p> {% endif %} <h3 id="meta-info">META</h3> <table class="req"> <thead> <tr> <th>Variable</th> <th>Value</th> </tr> </thead> <tbody> {% for var in request.META.items|dictsort:"0" %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> {% else %} <p>Request data not supplied</p> {% endif %} <h3 id="settings-info">Settings</h3> <h4>Using settings module <code>{{ settings.SETTINGS_MODULE }}</code></h4> <table class="req"> <thead> <tr> <th>Setting</th> <th>Value</th> </tr> </thead> <tbody> {% for var in settings.items|dictsort:"0" %} <tr> <td>{{ var.0 }}</td> <td class="code"><pre>{{ var.1|pprint }}</pre></td> </tr> {% endfor %} </tbody> </table> </div> {% if not is_email %} <div id="explanation"> <p> You're seeing this error because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and Django will display a standard page generated by the handler for this status code. </p> </div> {% endif %} </body> </html> """) TECHNICAL_500_TEXT_TEMPLATE = """{% firstof exception_type 'Report' %}{% if request %} at {{ request.path_info }}{% endif %} {% firstof exception_value 'No exception message supplied' %} {% if request %} Request Method: {{ request.META.REQUEST_METHOD }} Request URL: {{ request.build_absolute_uri }}{% endif %} Django Version: {{ django_version_info }} Python Executable: {{ sys_executable }} Python Version: {{ sys_version_info }} Python Path: {{ sys_path }} Server time: {{server_time|date:"r"}} Installed Applications: {{ settings.INSTALLED_APPS|pprint }} Installed Middleware: {{ settings.MIDDLEWARE_CLASSES|pprint }} {% if template_does_not_exist %}Template loader Error: {% if loader_debug_info %}Django tried loading these templates, in this order: {% for loader in loader_debug_info %}Using loader {{ loader.loader }}: {% for t in loader.templates %}{{ t.name }} ({{ t.status }}) {% endfor %}{% endfor %} {% else %}Django couldn't find any templates because your 'loaders' option is empty! {% endif %} {% endif %}{% if template_info %} Template error: In template {{ template_info.name }}, error at line {{ template_info.line }} {{ template_info.message }}{% for source_line in template_info.source_lines %} {% ifequal source_line.0 template_info.line %} {{ source_line.0 }} : {{ template_info.before }} {{ template_info.during }} {{ template_info.after }} {% else %} {{ source_line.0 }} : {{ source_line.1 }} {% endifequal %}{% endfor %}{% endif %}{% if frames %} Traceback: {% for frame in frames %} {% ifchanged frame.exc_cause %} {% if frame.exc_cause %} {% if frame.exc_cause_explicit %} The above exception ({{ frame.exc_cause }}) was the direct cause of the following exception: {% else %} During handling of the above exception ({{ frame.exc_cause }}), another exception occurred: {% endif %} {% endif %} {% endifchanged %} File "{{ frame.filename }}" in {{ frame.function }} {% if frame.context_line %} {{ frame.lineno }}. {{ frame.context_line }}{% endif %} {% endfor %} {% if exception_type %}Exception Type: {{ exception_type }}{% if request %} at {{ request.path_info }}{% endif %} {% if exception_value %}Exception Value: {{ exception_value }}{% endif %}{% endif %}{% endif %} {% if request %}Request information: GET:{% for k, v in request.GET.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No GET data{% endfor %} POST:{% for k, v in filtered_POST.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No POST data{% endfor %} FILES:{% for k, v in request.FILES.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No FILES data{% endfor %} COOKIES:{% for k, v in request.COOKIES.items %} {{ k }} = {{ v|stringformat:"r" }}{% empty %} No cookie data{% endfor %} META:{% for k, v in request.META.items|dictsort:"0" %} {{ k }} = {{ v|stringformat:"r" }}{% endfor %} {% else %}Request data not supplied {% endif %} Settings: Using settings module {{ settings.SETTINGS_MODULE }}{% for k, v in settings.items|dictsort:"0" %} {{ k }} = {{ v|stringformat:"r" }}{% endfor %} You're seeing this error because you have DEBUG = True in your Django settings file. Change that to False, and Django will display a standard page generated by the handler for this status code. """ TECHNICAL_404_TEMPLATE = """ <!DOCTYPE html> <html lang="en"> <head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <title>Page not found at {{ request.path_info|escape }}</title> <meta name="robots" content="NONE,NOARCHIVE"> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; background:#eee; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; margin-bottom:.4em; } h1 span { font-size:60%; color:#666; font-weight:normal; } table { border:none; border-collapse: collapse; width:100%; } td, th { vertical-align:top; padding:2px 3px; } th { width:12em; text-align:right; color:#666; padding-right:.5em; } #info { background:#f6f6f6; } #info ol { margin: 0.5em 4em; } #info ol li { font-family: monospace; } #summary { background: #ffc; } #explanation { background:#eee; border-bottom: 0px none; } </style> </head> <body> <div id="summary"> <h1>Page not found <span>(404)</span></h1> <table class="meta"> <tr> <th>Request Method:</th> <td>{{ request.META.REQUEST_METHOD }}</td> </tr> <tr> <th>Request URL:</th> <td>{{ request.build_absolute_uri|escape }}</td> </tr> {% if raising_view_name %} <tr> <th>Raised by:</th> <td>{{ raising_view_name }}</td> </tr> {% endif %} </table> </div> <div id="info"> {% if urlpatterns %} <p> Using the URLconf defined in <code>{{ urlconf }}</code>, Django tried these URL patterns, in this order: </p> <ol> {% for pattern in urlpatterns %} <li> {% for pat in pattern %} {{ pat.regex.pattern }} {% if forloop.last and pat.name %}[name='{{ pat.name }}']{% endif %} {% endfor %} </li> {% endfor %} </ol> <p>The current URL, <code>{{ request_path|escape }}</code>, didn't match any of these.</p> {% else %} <p>{{ reason }}</p> {% endif %} </div> <div id="explanation"> <p> You're seeing this error because you have <code>DEBUG = True</code> in your Django settings file. Change that to <code>False</code>, and Django will display a standard 404 page. </p> </div> </body> </html> """ DEFAULT_URLCONF_TEMPLATE = """ <!DOCTYPE html> <html lang="en"><head> <meta http-equiv="content-type" content="text/html; charset=utf-8"> <meta name="robots" content="NONE,NOARCHIVE"><title>{{ title }}</title> <style type="text/css"> html * { padding:0; margin:0; } body * { padding:10px 20px; } body * * { padding:0; } body { font:small sans-serif; } body>div { border-bottom:1px solid #ddd; } h1 { font-weight:normal; } h2 { margin-bottom:.8em; } h2 span { font-size:80%; color:#666; font-weight:normal; } h3 { margin:1em 0 .5em 0; } h4 { margin:0 0 .5em 0; font-weight: normal; } table { border:1px solid #ccc; border-collapse: collapse; width:100%; background:white; } tbody td, tbody th { vertical-align:top; padding:2px 3px; } thead th { padding:1px 6px 1px 3px; background:#fefefe; text-align:left; font-weight:normal; font-size:11px; border:1px solid #ddd; } tbody th { width:12em; text-align:right; color:#666; padding-right:.5em; } #summary { background: #e0ebff; } #summary h2 { font-weight: normal; color: #666; } #explanation { background:#eee; } #instructions { background:#f6f6f6; } #summary table { border:none; background:transparent; } </style> </head> <body> <div id="summary"> <h1>{{ heading }}</h1> <h2>{{ subheading }}</h2> </div> <div id="instructions"> <p> {{ instructions|safe }} </p> </div> <div id="explanation"> <p> {{ explanation|safe }} </p> </div> </body></html> """
py
1a591276b25283c3070b793dfc83cd4429f72915
if True: foo = 42 else: foo = None
py
1a5912d80d3b3b9981711368250d6b6dd3410a88
def get_filter_wordlist(): f = open('transskribilo/data/filter_wordlist.txt', 'r') s = set() for line in f: line = line.strip() s.add(line) return s
py
1a59133c13af73681d639b1c62a38b0d64730b8c
EPSILON = 1e-5 DICT_ALIASES_CORE = { 'node': 'NODE', 'displacement': 'DISPLACEMENT', 'disp': 'DISPLACEMENT', 'nodal_stress': 'NodalSTRESS', 'nodal_strain': 'NodalSTRAIN', 'nodal_mises': 'NodalMISES', 't_init': 'INITIAL_TEMPERATURE', 't_cnt': 'CNT_TEMPERATURE', 'reac': 'REACTION_FORCE', 'elemental_stress': 'ElementalSTRESS', 'elemental_strain': 'ElementalSTRAIN', 'elemental_mises': 'ElementalMISES', 'modulus': 'Young_modulus', 'poisson_ratio': 'Poisson_ratio', 'density': 'density', 'lte': 'linear_thermal_expansion_coefficient', 'lte_full': 'linear_thermal_expansion_coefficient_full', 'specific_heat': 'specific_heat', 'thermal_conductivity': 'thermal_conductivity', 'orient': 'ORIENTATION', 'boundary': 'boundary', 'cload': 'cload', 'fixtemp': 'fixtemp', 'istrain1': 'GaussSTRAIN1', 'istrain2': 'GaussSTRAIN2', 'istrain3': 'GaussSTRAIN3', 'istrain4': 'GaussSTRAIN4', 'istrain5': 'GaussSTRAIN5', 'istrain6': 'GaussSTRAIN6', 'istrain7': 'GaussSTRAIN7', 'istrain8': 'GaussSTRAIN8', 'vf': 'VF', 'pressure_start_shrinkage': 'pressure_start_shrinkage', 'specific_volume_start_shrinkage': 'specific_volume_start_shrinkage', 'average_temperature_start_shrinkage': 'average_temperature_start_shrinkage', 'time_start_shrinkage': 'time_start_shrinkage', 'shrinkage': 'shrinkage', 'gradient_temperature_mold': 'gradient_temperature_mold', 'shrinkage_mold': 'shrinkage_mold', 'pressure': 'pressure', 'specific_volume': 'specific_volume', 'average_temperature': 'average_temperature', 'max_temperature': 'max_temperature', 'thickness_flow_layer': 'thickness_flow_layer', 'viscosity': 'viscosity', 'shear_velocity': 'shear_velocity', 'shear_stress': 'shear_stress', 'flow_velocity': 'flow_velocity', 'fiber_orientation_tensor': 'fiber_orientation_tensor', 'fiber_orientation_vector': 'fiber_orientation_vector', 'fiber_velocity': 'fiber_velocity', 'skin_fiber_orientation_vector': 'skin_fiber_orientation_vector', 'inflow_gate': 'inflow_gate', 'flow_length': 'flow_length', 'flow_length_by_thickness': 'flow_length_by_thickness', 'temperature_difference': 'temperature_difference', 'flow_front_time': 'flow_front_time', 'normal': 'normal', 'area': 'area', } DICT_ALIASES = dict(DICT_ALIASES_CORE) DICT_ALIASES.update({ v: v for v in DICT_ALIASES_CORE.values()}) DICT_INVERSE_ALIASES = {v: k for k, v in DICT_ALIASES_CORE.items()} DICT_INVERSE_ALIASES.update({ v: v for v in DICT_INVERSE_ALIASES.values()}) LIST_NODAL = [ 'node', 'displacement', 'disp', 'nodal_mises', 'nodal_stress', 'nodal_strain', 'reac', 't_cnt', 't_init', 'pressure_start_shrinkage', 'specific_volume_start_shrinkage', 'average_temperature_start_shrinkage', 'time_start_shrinkage', 'shrinkage', 'gradient_temperature_mold', 'shrinkage_mold', 'pressure', 'specific_volume', 'average_temperature', 'max_temperature', 'thickness_flow_layer', 'temperature_difference', 'flow_front_time', ] LIST_ELEMENTAL = [ 'density', 'elemental_mises', 'elemental_strain', 'elemental_stress', 'istrain1', 'istrain2', 'istrain3', 'istrain4', 'istrain5', 'istrain6', 'istrain7', 'istrain8', 'lte', 'lte_full', 'modulus', 'orient', 'poisson_ratio', 'vf', 'viscosity', 'shear_velocity', 'shear_stress', 'flow_velocity', 'fiber_orientation_tensor', 'fiber_orientation_vector', 'fiber_velocity', 'skin_fiber_orientation_vector', ] LIST_CONSTRAINTS = [ 'boundary', 'cload', 'fixtemp', ] LIST_MATERIALS = [ 'modulus', 'poisson_ratio', 'density', 'lte', 'lte_full', 'specific_heat', 'thermal_conductivity', 'linear_thermal_expansion_coefficient', 'linear_thermal_expansion_coefficient_full', ] LINE_ELEMENT_NAMES = [ 'line', 'line2', ] SHELL_ELEMENT_NAMES = [ 'tri', 'tri2', 'quad', 'quad2', ] SOLID_ELEMENT_NAMES = [ 'line', 'line2', 'tet', 'tet2', 'pyr', 'pyr2', 'prism', 'prism2', 'hex', 'hex2', ] DICT_FEMIO_ELEMENT_TO_MESHIO_ELEMENT = { 'pt': 'vertex', 'line': 'line', 'line2': 'line3', 'tri': 'triangle', 'tri2': 'triangle6', 'quad': 'quad', 'quad2': 'quad8', 'tet': 'tetra', 'tet2': 'tetra10', 'pyr': 'pyramid', 'pyr2': 'pyramid13', 'prism': 'wedge', 'prism2': 'wedge15', 'hex': 'hexahedron', 'hex2': 'hexahedron27', 'hexprism': 'hexa_prism', } DICT_MESHIO_ELEMENT_TO_FEMIO_ELEMENT = { v: k for k, v in DICT_FEMIO_ELEMENT_TO_MESHIO_ELEMENT.items()} DICT_EXT = { 'fistr': '', 'obj': 'obj', 'stl': 'stl', 'ucd': 'inp', 'vtk': 'vtk', }
py
1a5913591a572844f6f307ae95b2310e5bb226d8
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright 2015, Perceivon Hosting Inc. # Copyright 2021, Vladimir Botka <[email protected]> # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # THIS SOFTWARE IS PROVIDED BY [COPYRIGHT HOLDER] AND CONTRIBUTORS "AS IS" AND # ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED # WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL [COPYRIGHT HOLDER] OR CONTRIBUTORS BE LIABLE FOR # ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES # (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; # LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON # ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS # SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. from __future__ import (absolute_import, division, print_function) __metaclass__ = type DOCUMENTATION = r''' --- module: iocage short_description: FreeBSD iocage jail handling description: - The M(iocage) module allows several iocage commands to be executed through ansible. - document use-cases here options: state: description: - I(state) of the desired result. type: str choices: [basejail, thickjail, template, present, cloned, started, stopped, restarted, fetched, exec, pkg, exists, absent, set, facts] default: facts name: description: - I(name) of the jail (former uuid). type: str pkglist: description: - Path to a JSON file containing packages to install. Only applicable when creating a jail. type: path properties: description: - I(properties) of the jail. type: dict args: description: - Additional arguments. type: dict user: description: - I(user) who runs the command I(cmd). type: str default: root cmd: description: - Execute the command I(cmd) inside the specified jail I(name). type: str clone_from: description: - Clone the jail I(clone_from) to I(name). Use I(properties) to configure the clone. type: str release: description: - Specify which RELEASE to fetch, update, or create a jail. type: str update: description: - Update the fetch to the latest patch level. type: bool default: False components: description: - Uses a local file directory for the root directory instead of HTTP to downloads and/or updates releases. type: list elements: path aliases: [files, component] requirements: - lang/python >= 3.6 - sysutils/iocage notes: - Supports C(check_mode). - The module always creates facts B(iocage_releases), B(iocage_templates), and B(iocage_jails) - There is no mandatory option. - Returns B(module_args) when debugging is set B(ANSIBLE_DEBUG=true) seealso: - name: iocage - A FreeBSD Jail Manager description: iocage 1.2 documentation link: https://iocage.readthedocs.io/en/latest/ - name: iocage -- jail manager using ZFS and VNET description: FreeBSD System Manager's Manual link: https://www.freebsd.org/cgi/man.cgi?query=iocage author: - Johannes Meixner (@xmj) - dgeo (@dgeo) - Berend de Boer (@berenddeboer) - Dr Josef Karthauser (@Infiniverse) - Kevin P. Fleming (@kpfleming) - Ross Williams (@overhacked) - david8001 (@david8001) - luto (@luto) - Keve Müller (@kevemueller) - Mårten Lindblad (@martenlindblad) - Vladimir Botka (@vbotka) ''' EXAMPLES = r''' - name: Create all iocage_* ansible_facts iocage: - name: Display lists of bases, names of templates, and names of jails debug: msg: |- {{ iocage_releases }} {{ iocage_templates.keys()|list }} {{ iocage_jails.keys()|list }} - name: Create jail without cloning iocage: name: foo state: present pkglist: /path/to/pkglist.json properties: ip4_addr: 'lo1|10.1.0.5' boot: true allow_sysvipc: true defaultrouter: '10.1.0.1' - name: Create template iocage: name: tplfoo state: template pkglist: /path/to/pkglist.json properties: ip4_addr: 'lo1|10.1.0.5' boot: true allow_sysvipc: true defaultrouter: '10.1.0.1' - name: Create a cloned jail. Creates basejail if needed. iocage: name: foo state: present clone_from: tplfoo pkglist: /path/to/pkglist.json properties: ip4_addr: 'lo1|10.1.0.5' boot: true allow_sysvipc: true defaultrouter: '10.1.0.1' - name: Start existing jail iocage: name: foo state: started - name: Stop existing jail iocage: name: foo state: stopped - name: Restart existing jail iocage: name: foo state: restarted - name: Execute command in running jail iocage: name: foo state: exec cmd: service sshd start - name: Destroy jail iocage: name: foo state: absent ''' RETURN = r''' ansible_facts: description: Facts to add to ansible_facts. returned: always type: dict contains: iocage_releases: description: List of all bases. returned: always type: list elements: str sample: ['13.0-RELEASE'] iocage_templates: description: Dictionary of all templates. returned: always type: dict sample: {} iocage_jails: description: Dictionary of all jails. returned: always type: dict sample: {} module_args: description: Information on how the module was invoked. returned: debug type: dict ''' import json import re from ansible.module_utils.basic import AnsibleModule from ansible.module_utils._text import to_bytes def _command_fail(module, label, cmd, rc, stdout, stderr): module.fail_json(msg=f"{label}\ncmd: '{cmd}' return: {rc}\nstdout: '{stdout}'\nstderr: '{stderr}'") def _get_iocage_facts(module, iocage_path, argument="all", name=None): opt = dict(jails="list -hl", templates="list -hlt", releases="list -hr", init="list -h") if argument == "all": # _init = _get_iocage_facts(module, iocage_path, "init") _jails = _get_iocage_facts(module, iocage_path, "jails") _templates = _get_iocage_facts(module, iocage_path, "templates") _releases = _get_iocage_facts(module, iocage_path, "releases") return dict(iocage_jails=_jails, iocage_templates=_templates, iocage_releases=_releases) elif argument in opt: cmd = f"{iocage_path} {opt[argument]}" else: module.fail_json(msg=f"_get_iocage_facts({argument}): argument not understood") rc, state, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if rc != 0 and argument != "init": _command_fail(module, "_get_iocage_facts()", cmd, rc, state, err) elif argument == "init": return {} if argument == 'releases': _releases = [] for line in state.split('\n'): if re.match(r'\s*\d', line): _releases.append(line.strip()) return _releases _jails = {} try: for line in state.split('\n'): if line == "": continue _jid = line.split('\t')[0] if _jid == '---': # non-iocage jails: skip all break elif re.match(r'(\d+|-)', _jid): _fragments = line.split('\t') if len(_fragments) == 10: (_jid, _name, _boot, _state, _type, _release, _ip4, _ip6, _template, _basejail) = _fragments else: (_jid, _name, _boot, _state, _type, _release, _ip4, _ip6, _template) = _fragments if _name != "": _properties = _jail_get_properties(module, iocage_path, _name) _jails[_name] = {"jid": _jid, "name": _name, "state": _state, "properties": _properties} else: module.fail_json(msg=f"_get_iocage_facts():\nUnreadable stdout line from cmd '{cmd}': '{line}'") except ValueError: module.fail_json(msg=f"unable to parse {state}") if name is not None: if name in _jails: return _jails[name] else: return {} return _jails def _jail_started(module, iocage_path, name): cmd = f"{iocage_path} list -h" rc, state, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if rc != 0: _command_fail(module, f"jail_started({name})", cmd, rc, state, err) st = None for line in state.split('\n'): u = line.split('\t')[1] if u == name: s = line.split('\t')[2] if s == 'up': st = True break elif s == 'down': st = False break else: module.fail_json(msg=f"Jail {name} unknown state: {line}") return st def jail_exists(module, iocage_path, argument=None, assume_absent=False): cmd = f"{iocage_path} get host_hostuuid {argument}" rc, name, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if not rc == 0: name = "" # local variable '_msg' is assigned to but never used [F841] # _msg = "" if name != "" and assume_absent: module.fail_json(msg=f"Jail {argument} exists.") return name.strip() def jail_start(module, iocage_path, name): cmd = f"{iocage_path} start {name}" rc = 1 out = "" _msg = "" _changed = True if not module.check_mode: rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if not rc == 0: _command_fail(module, f"Jail {name} could not be started.", cmd, rc, out, err) _msg = f"Jail {name} was started.\n{out}" else: _msg = f"Jail {name} would have been started." return _changed, _msg def _props_to_str(props): argstr = "" # local variable 'minargs' is assigned to but never used [F841] # minargs = "" for _prop in props: _val = props[_prop] if _val == '-' or _val == '' or not _val: continue if _val in ['yes', 'on', True]: argstr += f"{_prop}=1 " elif _val in ['no', 'off', False]: argstr += f"{_prop}=0 " elif _val in ['-', 'none']: argstr += f"{_prop}={_val} " else: argstr += f"{_prop}={str(_val)} " return argstr def release_fetch(module, iocage_path, update=False, release="NO-RELEASE", components=None, args=""): if not module.check_mode: if update: args += " -U" if components is not None: for _component in components: if _component != "": args += f" -F {_component}" cmd = f"{iocage_path} fetch -r {release} {args}" rc = 1 rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if not rc == 0: _command_fail(module, f"Release {release} could not be fetched.", cmd, rc, out, err) _changed = True if update: _msg = f"Release {release} was successfully updated." else: _msg = f"Release {release} was successfully fetched." else: _changed = True _msg = f"Release {release} would have been fetched." return release, _changed, _msg def jail_restart(module, iocage_path, name): cmd = f"{iocage_path} restart {name}" rc = 1 out = "" _msg = "" _changed = True if not module.check_mode: rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if not rc == 0: _command_fail(module, f"Jail {name} could not be restarted.", cmd, rc, out, err) _msg = f"Jail {name} was restarted.\n{rc}" else: _msg = f"Jail {name} would have been restarted." return _changed, _msg def jail_stop(module, iocage_path, name): cmd = f"{iocage_path} stop {name}" _changed = False rc = 1 out = "" _msg = "" if not module.check_mode: rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if not rc == 0: _command_fail(module, f"Jail {name} could not be stopped.", cmd, rc, out, err) _msg = f"Jail {name} was stopped.\n" else: _msg = f"Jail {name} would have been stopped" return _changed, _msg def jail_exec(module, iocage_path, name, user="root", _cmd='/usr/bin/true'): rc = 1 out = "" err = "" _msg = "" _changed = True if not module.check_mode: cmd = f"{iocage_path} exec -u {user} {name} -- {_cmd}" rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if not rc == 0: _command_fail(module, f"Command '{_cmd}' could not be executed in jail '{name}'.", cmd, rc, out, err) _msg = (f"Command '{cmd}' was executed in jail '{name}'.\nrc: {rc}\nstdout:\n{out}\nstderr:\n{err}") else: _msg = f"Command '{_cmd}' would have been executed in jail '{name}'." return _changed, _msg, out, err def jail_pkg(module, iocage_path, name, _cmd='info'): rc = 1 out = "" err = "" _msg = "" _changed = True if not module.check_mode: cmd = f"{iocage_path} pkg {name} {_cmd}" rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if not rc == 0: _command_fail(module, f"pkg '{_cmd}' could not be executed in jail '{name}'.", cmd, rc, out, err) _msg = (f"pkg '{_cmd}' was executed in jail '{name}'.\nstdout:\n{out}\nstderr:\n{err}") else: _msg = f"pkg '{_cmd}' would have been executed in jail '{name}'." return _changed, _msg, out, err def _jail_get_properties(module, iocage_path, name): rc = 1 out = "" if name is not None and name != "": properties = {} cmd = f"{iocage_path} get all {name}" rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if rc == 0: _properties = [line.strip() for line in out.strip().split('\n')] for p in _properties: for _property in [p.split(':', 1)]: if len(_property) == 2: properties[_property[0]] = _property[1] else: module.fail_json(msg=f"error parsing property {p} from {str(properties)}") else: _command_fail(module, f"_jail_get_properties({name})", cmd, rc, out, err) elif module.check_mode and name == "CHECK_MODE_FAKE_UUID": properties = {"CHECK_NEW_JAIL": True} else: module.fail_json(msg=f"jail {name} not found.") return properties def jail_set(module, iocage_path, name, properties=None): if properties is None: properties = {} rc = 1 out = "" _msg = "" _changed = False cmd = "" _existing_props = _jail_get_properties(module, iocage_path, name) _props_to_be_changed = {} for _property in properties: if _property not in _existing_props: continue if _existing_props[_property] == '-' and not properties[_property]: continue if _property == "template": continue propval = None _val = properties[_property] _oval = _existing_props[_property] if _val in [0, 'no', 'off', False]: propval = 0 elif _val in [1, 'yes', 'on', True]: propval = 1 elif isinstance(_oval, str): if _val == '': propval = 'none' else: propval = f'{_val}' else: module.fail_json(msg="Unable to set attribute {0} to {1} for jail {2}" .format(_property, str(_val).replace("'", "'\\''"), name)) if 'CHECK_NEW_JAIL' in _existing_props or \ (_property in _existing_props.keys() and str(_existing_props[_property]) != str(propval)) and \ propval is not None: _props_to_be_changed[_property] = propval if len(_props_to_be_changed) > 0: need_restart = False for p in _props_to_be_changed.keys(): if p in ['ip4_addr', 'ip6_addr', 'template', 'interfaces', 'vnet', 'host_hostname']: need_restart = _jail_started(module, iocage_path, name) cmd = f"{iocage_path} set {_props_to_str(_props_to_be_changed)} {name}" if not module.check_mode: if need_restart: jail_stop(module, iocage_path, name) rc, out, err = module.run_command(cmd) if need_restart: jail_start(module, iocage_path, name) if not rc == 0 or (rc == 1 and "is already a jail!" in err): _command_fail(module, f"Attributes could not be set on jail '{name}'.", cmd, rc, out, err) _msg = f"properties {str(_props_to_be_changed.keys())} were set on jail '{name}' with cmd={cmd}." else: _msg = f"properties {str(_props_to_be_changed.keys())} would have been changed for jail {name} with command {cmd}" _msg += str(_props_to_be_changed) _changed = True else: _changed = False _msg = f"properties {properties.keys()} already set for jail {name}" return _changed, _msg def jail_create(module, iocage_path, name=None, properties=None, clone_from_name=None, clone_from_template=None, release=None, basejail=False, thickjail=False, pkglist=None): if properties is None: properties = {} rc = 1 out = "" _msg = "" if clone_from_name is None and clone_from_template is None: if basejail: cmd = f"{iocage_path} create -b -n {name} -r {release}" elif thickjail: cmd = f"{iocage_path} create -T -n {name} -r {release} {_props_to_str(properties)}" else: cmd = f"{iocage_path} create -n {name} -r {release} {_props_to_str(properties)}" if pkglist: cmd += " --pkglist=" + pkglist elif clone_from_name: cmd = f"{iocage_path} clone {clone_from_name} -n {name} {_props_to_str(properties)}" elif clone_from_template: cmd = f"{iocage_path} create -t {clone_from_template} -n {name} {_props_to_str(properties)}" if not module.check_mode: rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if not rc == 0: _command_fail(module, f"Jail '{name}' could not be created.", cmd, rc, out, err) _msg += f"Jail '{name}' was created with properties {str(properties)}.\n\n{cmd}" name = jail_exists(module, iocage_path, name) if not name: module.fail_json(msg=f"Jail '{name}' not created ???\ncmd: {cmd}\nstdout:\n{out}\nstderr:\n{err}") else: _msg += f"Jail {name} would be created with command:\n{cmd}\n" name = f"CHECK_MODE_FAKE_UUID_FOR_{name}" return name, True, _msg def jail_update(module, iocage_path, name): rc = 1 out = "" _msg = "" _changed = False cmd = f"{iocage_path} update {name}" if not module.check_mode: rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if not rc == 0: _command_fail(module, f"Jail '{name}' not updated.", cmd, rc, out, err) if "No updates needed" in out: _changed = False elif "updating to" in out: nv = re.search(r' ([^ ]*):$', filter((lambda x: 'updating to' in x), out.split('\n'))[0]).group(1) _msg = f"jail {name} updated to {nv}" _changed = True else: _msg = "Unable to check for updates in check_mode" return _changed, _msg def jail_destroy(module, iocage_path, name): rc = 1 out = "" _msg = "" _changed = True if not module.check_mode: cmd = f"{iocage_path} destroy -f {name}" rc, out, err = module.run_command(to_bytes(cmd, errors='surrogate_or_strict'), errors='surrogate_or_strict') if not rc == 0: _command_fail(module, f"Jail '{name}' could not be destroyed.", cmd, rc, out, err) _msg = f"Jail '{name}' was destroyed." jail_exists(module, iocage_path, name, True) else: _msg = f"Jail {name} would have been destroyed." return name, _changed, _msg def run_module(): module_args = dict( state=dict(type='str', default="facts", choices=["basejail", "thickjail", "template", "present", "cloned", "started", "stopped", "restarted", "fetched", "exec", "pkg", "exists", "absent", "set", "facts"],), name=dict(type='str'), pkglist=dict(type='path'), properties=dict(type='dict'), args=dict(type='dict'), user=dict(type='str', default="root"), cmd=dict(type='str'), clone_from=dict(type='str'), release=dict(type='str'), update=dict(type='bool', default=False,), components=dict(type='list', elements='path', aliases=["files", "component"],),) module = AnsibleModule(argument_spec=module_args, supports_check_mode=True) iocage_path = module.get_bin_path('iocage', True) if not iocage_path: module.fail_json(msg='Utility iocage not found!') p = module.params name = p["name"] properties = p["properties"] cmd = p["cmd"] args = p["args"] clone_from = p["clone_from"] user = p["user"] release = p["release"] update = p["update"] components = p["components"] pkglist = p["pkglist"] msgs = [] changed = False out = "" err = "" facts = _get_iocage_facts(module, iocage_path, "all") jails = {} for u in facts["iocage_jails"]: jails[u] = facts["iocage_jails"][u] for u in facts["iocage_templates"]: jails[u] = facts["iocage_templates"][u] if p["state"] == "facts": result = dict(changed=changed, msg=", ".join(msgs), ansible_facts=facts, stdout=out, stderr=err, ) if module._debug: result['module_args'] = f"{(json.dumps(module.params, indent=4))}" module.exit_json(**result) # Input validation # states that need name of jail if name is None and p["state"] in ["started", "stopped", "restarted", "exists", "set", "exec", "pkg", "absent"]: module.fail_json(msg=f"name needed for state {p['state']}") # states that need release defined if p["state"] in ["basejail", "thickjail", "template", "fetched", "present"] or p["update"]: if release is None or release == "": # if name and not (upgrade): # _jail_props = _jail_get_properties(module, iocage_path, name) # release = _jail_props["release"] # else: rc, out, err = module.run_command("uname -r") if rc != 0: module.fail_json(msg="Unable to run uname -r ???") matches = re.match(r'(\d+\.\d+)\-(RELEASE|RC\d+).*', out.strip()) if matches is not None: release = matches.group(1) + "-RELEASE" else: module.fail_json(msg=f"Release not recognised: {out}") # need existing jail if p["state"] in ["started", "stopped", "restarted", "set", "exec", "pkg", "exists"]: if name not in jails: module.fail_json(msg=f"Jail '{name}' doesn't exist") # states that need running jail if p["state"] in ["exec", "pkg"] and jails[name]["state"] != "up": module.fail_json(msg=f"Jail '{name}' not running") if p["state"] == "started": if jails[name]["state"] != "up": changed, _msg = jail_start(module, iocage_path, name) msgs.append(_msg) jails[name] = _get_iocage_facts(module, iocage_path, "jails", name) if jails[name]["state"] != "up" and not module.check_mode: module.fail_json(msg=f"Starting jail {name} failed with {_msg}") else: msgs.append(f"Jail {name} already started") elif p["state"] == "stopped": if jails[name]["state"] == "up": changed, _msg = jail_stop(module, iocage_path, name) msgs.append(_msg) if not module.check_mode: jails[name] = _get_iocage_facts(module, iocage_path, "jails", name) if jails[name]["state"] != "down": module.fail_json(msg=f"Stopping jail {name} failed with {_msg}") else: msgs.append(f"Jail {name} already stopped") elif p["state"] == "restarted": changed, _msg = jail_restart(module, iocage_path, name) jails[name] = _get_iocage_facts(module, iocage_path, "jails", name) if jails[name]["state"] != "up": module.fail_json(msg=f"Restarting jail {name} failed with {_msg}") msgs.append(_msg) elif p["state"] == "exec": changed, _msg, out, err = jail_exec(module, iocage_path, name, user, cmd) msgs.append(_msg) elif p["state"] == "pkg": changed, _msg, out, err = jail_pkg(module, iocage_path, name, cmd) msgs.append(_msg) elif p["state"] == "exists": msgs.append(f"Jail {name} exists") elif p["state"] == "fetched": if update or release not in facts["iocage_releases"]: rel, changed, _msg = release_fetch(module, iocage_path, update, release, components, args) msgs.append(_msg) facts["iocage_releases"] = _get_iocage_facts(module, iocage_path, "releases") if release not in facts["iocage_releases"] or update: module.fail_json(msg=f"Fetching release {release} failed with {_msg}") else: msgs.append(f"Release {release} already fetched") elif p["state"] == "set": changed, _msg = jail_set(module, iocage_path, name, properties) msgs.append(_msg) jails[name] = _get_iocage_facts(module, iocage_path, "jails", name) elif p["state"] in ["present", "cloned", "template", "basejail", "thickjail"]: do_basejail = False do_thickjail = False clone_from_name = None clone_from_template = None # local variable 'jail_exists' is assigned to but never used [F841] # jail_exists = False if p["state"] != "cloned" and release not in facts["iocage_releases"]: release, _release_changed, _release_msg = release_fetch(module, iocage_path, update, release, components, args) if _release_changed: facts["iocage_releases"] = _get_iocage_facts(module, iocage_path, "releases") msgs.append(_release_msg) if p["state"] == "template": if properties is None: properties = {} properties["template"] = "true" properties["boot"] = "false" if name in facts["iocage_templates"]: # local variable 'jail_exists' is assigned to but never used [F841] # jail_exists = True pass elif p["state"] == "basejail": properties = {} do_basejail = True elif p["state"] == "thickjail": do_thickjail = True elif clone_from: if clone_from in facts["iocage_jails"]: clone_from_name = clone_from elif clone_from in facts["iocage_templates"]: clone_from_template = clone_from else: if module.check_mode: # todo: use facts to check if basejail would have been created before msgs.append(f"Jail {name} would have been cloned from (nonexisting) jail or template {clone_from}") else: module.fail_json(msg=f"unable to create jail {name}\nbasejail {clone_from} doesn't exist") if name not in facts["iocage_templates"] and name not in facts["iocage_jails"]: name, changed, _msg = jail_create(module, iocage_path, name, properties, clone_from_name, clone_from_template, release, do_basejail, do_thickjail, pkglist) msgs.append(_msg) else: changed, _msg = jail_set(module, iocage_path, name, properties) msgs.append("%s already exists" % (name)) if changed: msgs.append(_msg) if p["update"]: if release not in facts["iocage_releases"]: release, _release_changed, _release_msg = release_fetch(module, iocage_path, update, release, components, args) if _release_changed: _msg += _release_msg facts["iocage_releases"] = _get_iocage_facts(module, iocage_path, "releases") release, changed, _msg = jail_update(module, iocage_path, name, release) msgs.append(_msg) # # re-set properties (iocage missing them on creation - iocage-sh bug) # if len(p["properties"]) > 0: # changed, _msg = jail_set(module, iocage_path, name, properties) # if changed: # msgs.append(_msg) if changed: if p["state"] == "template": facts["iocage_templates"][name] = _get_iocage_facts(module, iocage_path, "templates", name) else: facts["iocage_jails"][name] = _get_iocage_facts(module, iocage_path, "jails", name) elif p["state"] == "absent": if name in jails: if jails[name]['state'] == "up": changed, _msg = jail_stop(module, iocage_path, name) msgs.append(_msg) name, changed, _msg = jail_destroy(module, iocage_path, name) msgs.append(_msg) del(jails[name]) else: _msg = f"Jail {name} is already absent." msgs.append(_msg) if name in facts["iocage_jails"]: del(facts["iocage_jails"][name]) _msg = f"Jail {name} removed from iocage_jails." msgs.append(_msg) if name in facts["iocage_templates"]: del(facts["iocage_templates"][name]) _msg = f"Jail {name} removed from iocage_templates." msgs.append(_msg) result = dict(changed=changed, msg=", ".join(msgs), ansible_facts=facts, stdout=out, stderr=err, ) if module._debug: result['module_args'] = f"{(json.dumps(module.params, indent=4))}" module.exit_json(**result) def main(): run_module() if __name__ == '__main__': main()
py
1a5913b9f5c21e4fe8635becab498368fe77c13f
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from .tracked_resource import TrackedResource class ApplicationResourceDescription(TrackedResource): """This type describes an application resource. Variables are only populated by the server, and will be ignored when sending a request. :ivar id: Fully qualified identifier for the resource. Ex - /subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/{resourceProviderNamespace}/{resourceType}/{resourceName} :vartype id: str :ivar name: The name of the resource :vartype name: str :ivar type: The type of the resource. Ex- Microsoft.Compute/virtualMachines or Microsoft.Storage/storageAccounts. :vartype type: str :param location: The geo-location where the resource lives :type location: str :param tags: Resource tags. :type tags: dict[str, str] :ivar provisioning_state: State of the resource. :vartype provisioning_state: str :param description: User readable description of the application. :type description: str :param debug_params: Internal use. :type debug_params: str :param services: describes the services in the application. :type services: list[~azure.mgmt.servicefabricmesh.models.ServiceResourceDescription] :ivar health_state: Describes the health state of an application resource. Possible values include: 'Invalid', 'Ok', 'Warning', 'Error', 'Unknown' :vartype health_state: str or ~azure.mgmt.servicefabricmesh.models.HealthState :ivar unhealthy_evaluation: When the application's health state is not 'Ok', this additional details from service fabric Health Manager for the user to know why the application is marked unhealthy. :vartype unhealthy_evaluation: str :ivar status: Status of the application resource. Possible values include: 'Invalid', 'Ready', 'Upgrading', 'Creating', 'Deleting', 'Failed' :vartype status: str or ~azure.mgmt.servicefabricmesh.models.ApplicationResourceStatus :ivar status_details: Gives additional information about the current status of the application deployment. :vartype status_details: str :ivar service_names: Names of the services in the application. :vartype service_names: list[str] :param diagnostics: Describes the diagnostics definition and usage for an application resource. :type diagnostics: ~azure.mgmt.servicefabricmesh.models.DiagnosticsDescription """ _validation = { 'id': {'readonly': True}, 'name': {'readonly': True}, 'type': {'readonly': True}, 'provisioning_state': {'readonly': True}, 'health_state': {'readonly': True}, 'unhealthy_evaluation': {'readonly': True}, 'status': {'readonly': True}, 'status_details': {'readonly': True}, 'service_names': {'readonly': True}, } _attribute_map = { 'id': {'key': 'id', 'type': 'str'}, 'name': {'key': 'name', 'type': 'str'}, 'type': {'key': 'type', 'type': 'str'}, 'location': {'key': 'location', 'type': 'str'}, 'tags': {'key': 'tags', 'type': '{str}'}, 'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'}, 'description': {'key': 'properties.description', 'type': 'str'}, 'debug_params': {'key': 'properties.debugParams', 'type': 'str'}, 'services': {'key': 'properties.services', 'type': '[ServiceResourceDescription]'}, 'health_state': {'key': 'properties.healthState', 'type': 'str'}, 'unhealthy_evaluation': {'key': 'properties.unhealthyEvaluation', 'type': 'str'}, 'status': {'key': 'properties.status', 'type': 'str'}, 'status_details': {'key': 'properties.statusDetails', 'type': 'str'}, 'service_names': {'key': 'properties.serviceNames', 'type': '[str]'}, 'diagnostics': {'key': 'properties.diagnostics', 'type': 'DiagnosticsDescription'}, } def __init__(self, location=None, tags=None, description=None, debug_params=None, services=None, diagnostics=None): super(ApplicationResourceDescription, self).__init__(location=location, tags=tags) self.provisioning_state = None self.description = description self.debug_params = debug_params self.services = services self.health_state = None self.unhealthy_evaluation = None self.status = None self.status_details = None self.service_names = None self.diagnostics = diagnostics
py
1a5914e8e8d318077494d19e7039c4430fcc81f7
import numpy as np import scipy.stats as stats from sira.modelling.structural import Base from sira.modelling.structural import Element as _Element from sira.modelling.structural import Info class Algorithm: @staticmethod def factory(response_params): function_name = response_params["function_name"] funcname_nocase = str(function_name).casefold() if funcname_nocase in [ "stepfunc", "step_func", "stepfunction", "step_function"]: return StepFunc(**response_params) elif funcname_nocase in [ "lognormal", "lognormalcdf", "lognormal_cdf"]: return LogNormalCDF(**response_params) elif funcname_nocase in [ "normal", "normalcdf", "normal_cdf"]: return NormalCDF(**response_params) elif funcname_nocase in [ "rayleigh", "rayleighcdf", "rayleigh_cdf"]: return RayleighCDF(**response_params) elif funcname_nocase in [ "ConstantFunction".lower(), "constant_function"]: return ConstantFunction(**response_params) elif funcname_nocase in [ "Level0Response".lower(), "Level0Recovery".lower()]: return Level0Response(**response_params) elif funcname_nocase in [ "PiecewiseFunction".lower(), "piecewise_function"]: return PiecewiseFunction(**response_params) raise ValueError("No response model matches {}".format(function_name)) class Level0Response(Base): """ Standard response for no damage. """ mode = 1 damage_ratio = 0.0 functionality = 1.0 beta = 0.0 median = 1.0 lower_limit = _Element( 'float', 'lower limit of function if part of piecewise function', None, [lambda x: float(x) > 0.]) upper_limit = _Element( 'float', 'upper limit of function if part of piecewise function', None, [lambda x: float(x) > 0.]) def __call__(self, hazard_level): return 0.0 class RayleighCDF(Base): """ The Rayliegh CDF response model for components. """ scale = _Element( 'float', 'Scale parameter for Rayleigh CDF.', _Element.NO_DEFAULT, validators=[lambda x: float(x) > 0.]) loc = _Element( 'float', 'Location parameter for Rayleigh CDF.', default=0, validators=[lambda x: float(x) >= 0.]) def __call__(self, x): """ SciPy implementation of Rayleigh CDF: loc = shift parameter scale = scaling parameter """ return stats.rayleigh.cdf(x, loc=self.loc, scale=self.scale) class LogNormalCDF(Base): """ The lognormal CDF response model for components. """ median = _Element('float', 'Median of the log normal CDF.', _Element.NO_DEFAULT, [lambda x: float(x) > 0.]) beta = _Element('float', 'Log standard deviation of the log normal CDF', _Element.NO_DEFAULT, [lambda x: float(x) > 0.]) location = _Element('float', 'Location parameter of the log normal CDF', 0.0, [lambda x: float(x) > 0.]) lower_limit = _Element( 'float', 'lower limit of function if part of piecewise function', None, [lambda x: float(x) > 0.]) upper_limit = _Element( 'float', 'upper limit of function if part of piecewise function', None, [lambda x: float(x) > 0.]) def __call__(self, data_point): """ SciPy implementation of LogNormal CDF: scipy.stats.lognorm.cdf(x, s, loc=0, scale=1) where, s = sigma # or beta or standard deviation (shape parameter) scale = exp(mean) = median loc is used to shift the distribution and commonly not used """ return stats.lognorm.cdf( data_point, self.beta, loc=self.location, scale=self.median) class NormalCDF(Base): """ The normal CDF response model for components """ # ----------------------------------------------- mean = _Element( 'float', 'Mean of the normal or Gaussian CDF', _Element.NO_DEFAULT, [lambda x: float(x) >= 0.]) stddev = _Element( 'float', 'Standard deviation of the normal CDF', _Element.NO_DEFAULT, [lambda x: float(x) > 0.]) # ----------------------------------------------- lower_limit = _Element( 'float', 'lower limit of function if part of piecewise function', -np.inf, [lambda x: float(x) > 0.]) upper_limit = _Element( 'float', 'upper limit of function if part of piecewise function', np.inf, [lambda x: float(x) > 0.]) # ----------------------------------------------- def __call__(self, data_point, inverse=False): """ SciPy implementation of Normal CDF: scipy.stats.norm.cdf(x, loc=0, scale=1) where, loc = Mean scale = Standard Deviation i.e. square root of Variance """ if not inverse: return stats.norm.cdf(data_point, loc=self.mean, scale=self.stddev) elif inverse: return stats.norm.ppf(data_point, loc=self.mean, scale=self.stddev) class ConstantFunction(Base): """ A function for defining a constant amplitude for a given range """ amplitude = _Element( 'float', 'Constant amplitude of function', _Element.NO_DEFAULT, [lambda x: float(x) >= 0.]) lower_limit = _Element( 'float', 'lower limit of function if part of piecewise function', None, [lambda x: float(x) >= 0.]) upper_limit = _Element( 'float', 'upper limit of function if part of piecewise function', None, [lambda x: float(x) >= 0]) def __call__(self, hazard_intensity): return self.amplitude class StepFunc(Base): """ A response model that does not have a cumulative distribution function, rather a series of steps for damage. """ xys = _Element( 'XYPairs', 'A list of X, Y pairs.', list, [lambda xy: [(float(x), float(y)) for x, y in xy]]) lower_limit = _Element( 'float', 'lower limit of function if part of piecewise function', None, [lambda x: float(x) > 0.]) upper_limit = _Element( 'float', 'upper limit of function if part of piecewise function', None, [lambda x: float(x) > 0.]) def __call__(self, hazard_intensity): """ Note that intervals are closed on the right. """ for x, y in self.xys: # noqa: E1133 if hazard_intensity < x: return y raise ValueError('value is greater than all xs!') class XYPairs(object): """ A list of float values that implement a step function. """ description = Info("The (x, f(x)) pairs defining a step function.") def __init__(self, pairs): """ Create the tuple list containing the float values. :param pairs: An iterable container of tuples containing floats """ self.pairs = pairs def __iter__(self): """ Return the XYPairs :return: iterator over the XYPairs """ return iter(self.pairs) class PiecewiseFunction(object): """ This class builds a piecwise function defined by algorithm constructor data of a specified format. This data is part of the defined attributes of a system Component. Each dict in the list contains: - the parameters required to construct an algorithm, and - the conditions where that algorithm will be applicable """ piecewise_function_constructor = None def __init__(self, **kwargs): """ input: a list of dicts. Dict name must be 'piecewise_function_constructor' """ for k, v in kwargs.items(): setattr(self, k, v) self.functions = [] self.validranges = [] for param_dict in self.piecewise_function_constructor: # noqa: E1133 lo = self.check_limit(param_dict['lower_limit'], which_lim='lower') hi = self.check_limit(param_dict['upper_limit'], which_lim='upper') self.functions.append(Algorithm.factory(param_dict)) self.validranges.append((lo, hi)) def check_limit(self, val, which_lim): if which_lim == 'lower': inf, infstr = -np.inf, ['-np.inf', '-inf'] else: inf, infstr = np.inf, ['np.inf', '+np.inf', 'inf', '+inf'] if (val is None) or str(val) in ['', 'NA', *infstr]: val = inf else: try: val = float(val) except ValueError: print(f"Invalid value passed for {which_lim} limit of function.") exit(1) return val def condfunc(self, x, func_lims): return (x >= func_lims[0]) & (x < func_lims[1]) def pwfunc(self, x): x = np.asarray(x) y = np.zeros(x.shape) for i, func in enumerate(self.functions): func_lims = self.validranges[i] y += self.condfunc(x, func_lims) * func(x) # noqa: W0123 return y def __call__(self, hazard_intensity): """ input: hazard intensity value output: probability of a response (linked to a damage state) """ vectorized_pwf = np.vectorize(self.pwfunc) return vectorized_pwf(hazard_intensity)
py
1a591505e0bedb18ff05e07f5429e25ba7c0a83c
# Copyright (c) 2014 Intel Corporation. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. # # The contents of this file are mainly copied from cm_api sources, # released by Cloudera. Codes not used by Sahara CDH plugin are removed. # You can find the original codes at # # https://github.com/cloudera/cm_api/tree/master/python/src/cm_api # # To satisfy the pep8 and python3 tests, we did some changes to the codes. # We also change some importings to use Sahara inherited classes. import copy import datetime import time from oslo_serialization import jsonutils as json from oslo_utils import reflection import six from sahara.plugins import context from sahara_plugin_cdh.i18n import _ from sahara_plugin_cdh.plugins.cdh import exceptions as ex class Attr(object): """Base Attribute Encapsulates information about an attribute in the JSON encoding of the object. It identifies properties of the attribute such as whether it's read-only, its type, etc. """ DATE_FMT = "%Y-%m-%dT%H:%M:%S.%fZ" def __init__(self, atype=None, rw=True, is_api_list=False): self._atype = atype self._is_api_list = is_api_list self.rw = rw def to_json(self, value, preserve_ro): """Returns the JSON encoding of the given attribute value If the value has a 'to_json_dict' object, that method is called. Otherwise, the following values are returned for each input type: - datetime.datetime: string with the API representation of a date. - dictionary: if 'atype' is ApiConfig, a list of ApiConfig objects. - python list: python list (or ApiList) with JSON encoding of items - the raw value otherwise """ if hasattr(value, 'to_json_dict'): return value.to_json_dict(preserve_ro) elif isinstance(value, dict) and self._atype == ApiConfig: return config_to_api_list(value) elif isinstance(value, datetime.datetime): return value.strftime(self.DATE_FMT) elif isinstance(value, list) or isinstance(value, tuple): if self._is_api_list: return ApiList(value).to_json_dict() else: return [self.to_json(x, preserve_ro) for x in value] else: return value def from_json(self, resource_root, data): """Parses the given JSON value into an appropriate python object This means: - a datetime.datetime if 'atype' is datetime.datetime - a converted config dictionary or config list if 'atype' is ApiConfig - if the attr is an API list, an ApiList with instances of 'atype' - an instance of 'atype' if it has a 'from_json_dict' method - a python list with decoded versions of the member objects if the input is a python list. - the raw value otherwise """ if data is None: return None if self._atype == datetime.datetime: return datetime.datetime.strptime(data, self.DATE_FMT) elif self._atype == ApiConfig: # ApiConfig is special. We want a python dictionary for summary # views, but an ApiList for full views. Try to detect each case # from the JSON data. if not data['items']: return {} first = data['items'][0] return json_to_config(data, len(first) == 2) elif self._is_api_list: return ApiList.from_json_dict(data, resource_root, self._atype) elif isinstance(data, list): return [self.from_json(resource_root, x) for x in data] elif hasattr(self._atype, 'from_json_dict'): return self._atype.from_json_dict(data, resource_root) else: return data class ROAttr(Attr): """Subclass that just defines the attribute as read-only.""" def __init__(self, atype=None, is_api_list=False): Attr.__init__(self, atype=atype, rw=False, is_api_list=is_api_list) def check_api_version(resource_root, min_version): """Check API version Checks if the resource_root's API version it at least the given minimum version. """ if resource_root.version < min_version: raise ex.CMApiVersionError( _("API version %(minv)s is required but %(acv)s is in use.") % {'minv': min_version, 'acv': resource_root.version}) def call(method, path, ret_type, ret_is_list=False, data=None, params=None, api_version=1): """Call a resource method Generic function for calling a resource method and automatically dealing with serialization of parameters and deserialization of return values. :param method: method to call (must be bound to a resource; e.g., "resource_root.get"). :param path: the full path of the API method to call. :param ret_type: return type of the call. :param ret_is_list: whether the return type is an ApiList. :param data: Optional data to send as payload to the call. :param params: Optional query parameters for the call. :param api_version: minimum API version for the call. """ check_api_version(method.__self__, api_version) if data is not None: data = json.dumps(Attr(is_api_list=True).to_json(data, False)) ret = method(path, data=data, params=params) else: ret = method(path, params=params) if ret_type is None: return elif ret_is_list: return ApiList.from_json_dict(ret, method.__self__, ret_type) elif isinstance(ret, list): return [ret_type.from_json_dict(x, method.__self__) for x in ret] else: return ret_type.from_json_dict(ret, method.__self__) class BaseApiObject(object): """The BaseApiObject helps with (de)serialization from/to JSON The derived class has two ways of defining custom attributes: - Overwriting the '_ATTRIBUTES' field with the attribute dictionary - Override the _get_attributes() method, in case static initialization of the above field is not possible. It's recommended that the _get_attributes() implementation do caching to avoid computing the dictionary on every invocation. The derived class's constructor must call the base class's init() static method. All constructor arguments (aside from self and resource_root) must be keywords arguments with default values (typically None), or from_json_dict() will not work. """ _ATTRIBUTES = {} _WHITELIST = ('_resource_root', '_attributes') @classmethod def _get_attributes(cls): """Get an attribute dictionary Returns a map of property names to attr instances (or None for default attribute behavior) describing the properties of the object. By default, this method will return the class's _ATTRIBUTES field. Classes can override this method to do custom initialization of the attributes when needed. """ return cls._ATTRIBUTES @staticmethod def init(obj, resource_root, attrs=None): """Wraper of real constructor Wraper around the real constructor to avoid issues with the 'self' argument. Call like this, from a subclass's constructor: - BaseApiObject.init(self, locals()) """ # This works around http://bugs.python.org/issue2646 # We use unicode strings as keys in kwargs. str_attrs = {} if attrs: for k, v in six.iteritems(attrs): if k not in ('self', 'resource_root'): str_attrs[k] = v BaseApiObject.__init__(obj, resource_root, **str_attrs) def __init__(self, resource_root, **attrs): """Init method Initializes internal state and sets all known writable properties of the object to None. Then initializes the properties given in the provided attributes dictionary. :param resource_root: API resource object. :param attrs: optional dictionary of attributes to set. This should only contain r/w attributes. """ self._resource_root = resource_root for name, attr in six.iteritems(self._get_attributes()): object.__setattr__(self, name, None) if attrs: self._set_attrs(attrs, from_json=False) def _set_attrs(self, attrs, allow_ro=False, from_json=True): """Set attributes from dictionary Sets all the attributes in the dictionary. Optionally, allows setting read-only attributes (e.g. when deserializing from JSON) and skipping JSON deserialization of values. """ for k, v in six.iteritems(attrs): attr = self._check_attr(k, allow_ro) if attr and from_json: v = attr.from_json(self._get_resource_root(), v) object.__setattr__(self, k, v) def __setattr__(self, name, val): if name not in BaseApiObject._WHITELIST: self._check_attr(name, False) object.__setattr__(self, name, val) def _check_attr(self, name, allow_ro): cls_name = reflection.get_class_name(self, fully_qualified=False) if name not in self._get_attributes(): raise ex.CMApiAttributeError( _('Invalid property %(attname)s for class %(classname)s.') % {'attname': name, 'classname': cls_name}) attr = self._get_attributes()[name] if not allow_ro and attr and not attr.rw: raise ex.CMApiAttributeError( _('Attribute %(attname)s of class %(classname)s ' 'is read only.') % {'attname': name, 'classname': cls_name}) return attr def _get_resource_root(self): return self._resource_root def _update(self, api_obj): """Copy state from api_obj to this object.""" if not isinstance(self, api_obj.__class__): raise ex.CMApiValueError( _("Class %(class1)s does not derive from %(class2)s; " "cannot update attributes.") % {'class1': self.__class__, 'class2': api_obj.__class__}) for name in self._get_attributes().keys(): try: val = getattr(api_obj, name) setattr(self, name, val) except AttributeError: pass def to_json_dict(self, preserve_ro=False): dic = {} for name, attr in six.iteritems(self._get_attributes()): if not preserve_ro and attr and not attr.rw: continue try: value = getattr(self, name) if value is not None: if attr: dic[name] = attr.to_json(value, preserve_ro) else: dic[name] = value except AttributeError: pass return dic def __str__(self): """Give a printable format of an attribute Default implementation of __str__. Uses the type name and the first attribute retrieved from the attribute map to create the string. """ cls_name = reflection.get_class_name(self, fully_qualified=False) name = list(self._get_attributes().keys())[0] value = getattr(self, name, None) return "<%s>: %s = %s" % (cls_name, name, value) @classmethod def from_json_dict(cls, dic, resource_root): obj = cls(resource_root) obj._set_attrs(dic, allow_ro=True) return obj class BaseApiResource(BaseApiObject): """Base ApiResource A specialization of BaseApiObject that provides some utility methods for resources. This class allows easier serialization / deserialization of parameters and return values. """ def _api_version(self): """Get API version Returns the minimum API version for this resource. Defaults to 1. """ return 1 def _path(self): """Get resource path Returns the path to the resource. e.g., for a service 'foo' in cluster 'bar', this should return '/clusters/bar/services/foo'. """ raise NotImplementedError def _require_min_api_version(self, version): """Check minimum version requirement Raise an exception if the version of the api is less than the given version. :param version: The minimum required version. """ actual_version = self._get_resource_root().version version = max(version, self._api_version()) if actual_version < version: raise ex.CMApiVersionError( _("API version %(minv)s is required but %(acv)s is in use.") % {'minv': version, 'acv': actual_version}) def _cmd(self, command, data=None, params=None, api_version=1): """Invoke a command on the resource Invokes a command on the resource. Commands are expected to be under the "commands/" sub-resource. """ return self._post("commands/" + command, ApiCommand, data=data, params=params, api_version=api_version) def _get_config(self, rel_path, view, api_version=1): """Get resource configurations Retrieves an ApiConfig list from the given relative path. """ self._require_min_api_version(api_version) params = dict(view=view) if view else None resp = self._get_resource_root().get(self._path() + '/' + rel_path, params=params) return json_to_config(resp, view == 'full') def _update_config(self, rel_path, config, api_version=1): self._require_min_api_version(api_version) resp = self._get_resource_root().put(self._path() + '/' + rel_path, data=config_to_json(config)) return json_to_config(resp, False) def _delete(self, rel_path, ret_type, ret_is_list=False, params=None, api_version=1): return self._call('delete', rel_path, ret_type, ret_is_list, None, params, api_version) def _get(self, rel_path, ret_type, ret_is_list=False, params=None, api_version=1): return self._call('get', rel_path, ret_type, ret_is_list, None, params, api_version) def _post(self, rel_path, ret_type, ret_is_list=False, data=None, params=None, api_version=1): return self._call('post', rel_path, ret_type, ret_is_list, data, params, api_version) def _put(self, rel_path, ret_type, ret_is_list=False, data=None, params=None, api_version=1): return self._call('put', rel_path, ret_type, ret_is_list, data, params, api_version) def _call(self, method, rel_path, ret_type, ret_is_list=False, data=None, params=None, api_version=1): path = self._path() if rel_path: path += '/' + rel_path return call(getattr(self._get_resource_root(), method), path, ret_type, ret_is_list, data, params, api_version) class ApiList(BaseApiObject): """A list of some api object""" LIST_KEY = "items" def __init__(self, objects, resource_root=None, **attrs): BaseApiObject.__init__(self, resource_root, **attrs) # Bypass checks in BaseApiObject.__setattr__ object.__setattr__(self, 'objects', objects) def __str__(self): return ("<ApiList>(%d): [%s]" % (len(self.objects), ", ".join([str(item) for item in self.objects]))) def to_json_dict(self, preserve_ro=False): ret = BaseApiObject.to_json_dict(self, preserve_ro) attr = Attr() ret[ApiList.LIST_KEY] = [attr.to_json(x, preserve_ro) for x in self.objects] return ret def __len__(self): return self.objects.__len__() def __iter__(self): return self.objects.__iter__() def __getitem__(self, i): return self.objects.__getitem__(i) def __getslice__(self, i, j): return self.objects.__getslice__(i, j) @classmethod def from_json_dict(cls, dic, resource_root, member_cls=None): if not member_cls: member_cls = cls._MEMBER_CLASS attr = Attr(atype=member_cls) items = [] if ApiList.LIST_KEY in dic: items = [attr.from_json(resource_root, x) for x in dic[ApiList.LIST_KEY]] ret = cls(items) # If the class declares custom attributes, populate them based on the # input dict. The check avoids extra overhead for the common case, # where we just have a plain list. _set_attrs() also does not # understand the "items" attribute, so it can't be in the input data. if cls._ATTRIBUTES: if ApiList.LIST_KEY in dic: dic = copy.copy(dic) del dic[ApiList.LIST_KEY] ret._set_attrs(dic, allow_ro=True) return ret class ApiHostRef(BaseApiObject): _ATTRIBUTES = { 'hostId': None, } def __init__(self, resource_root, hostId=None): BaseApiObject.init(self, resource_root, locals()) def __str__(self): return "<ApiHostRef>: %s" % (self.hostId) class ApiServiceRef(BaseApiObject): _ATTRIBUTES = { 'clusterName': None, 'serviceName': None, 'peerName': None, } def __init__(self, resource_root, serviceName=None, clusterName=None, peerName=None): BaseApiObject.init(self, resource_root, locals()) class ApiClusterRef(BaseApiObject): _ATTRIBUTES = { 'clusterName': None, } def __init__(self, resource_root, clusterName=None): BaseApiObject.init(self, resource_root, locals()) class ApiRoleRef(BaseApiObject): _ATTRIBUTES = { 'clusterName': None, 'serviceName': None, 'roleName': None, } def __init__(self, resource_root, serviceName=None, roleName=None, clusterName=None): BaseApiObject.init(self, resource_root, locals()) class ApiRoleConfigGroupRef(BaseApiObject): _ATTRIBUTES = { 'roleConfigGroupName': None, } def __init__(self, resource_root, roleConfigGroupName=None): BaseApiObject.init(self, resource_root, locals()) class ApiCommand(BaseApiObject): SYNCHRONOUS_COMMAND_ID = -1 @classmethod def _get_attributes(cls): if not ('_ATTRIBUTES' in cls.__dict__): cls._ATTRIBUTES = { 'id': ROAttr(), 'name': ROAttr(), 'startTime': ROAttr(datetime.datetime), 'endTime': ROAttr(datetime.datetime), 'active': ROAttr(), 'success': ROAttr(), 'resultMessage': ROAttr(), 'clusterRef': ROAttr(ApiClusterRef), 'serviceRef': ROAttr(ApiServiceRef), 'roleRef': ROAttr(ApiRoleRef), 'hostRef': ROAttr(ApiHostRef), 'children': ROAttr(ApiCommand, is_api_list=True), 'parent': ROAttr(ApiCommand), 'resultDataUrl': ROAttr(), 'canRetry': ROAttr(), } return cls._ATTRIBUTES def __str__(self): return ("<ApiCommand>: '%s' (id: %s; active: %s; success: %s)" % (self.name, self.id, self.active, self.success)) def _path(self): return '/commands/%d' % self.id def fetch(self): """Retrieve updated data about the command from the server :return: A new ApiCommand object. """ if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID: return self resp = self._get_resource_root().get(self._path()) return ApiCommand.from_json_dict(resp, self._get_resource_root()) def wait(self, timeout=None): """Wait for command to finish :param timeout: (Optional) Max amount of time (in seconds) to wait. Wait forever by default. :return: The final ApiCommand object, containing the last known state. The command may still be running in case of timeout. """ if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID: return self SLEEP_SEC = 5 if timeout is None: deadline = None else: deadline = time.time() + timeout while True: cmd = self.fetch() if not cmd.active: return cmd if deadline is not None: now = time.time() if deadline < now: return cmd else: context.sleep(min(SLEEP_SEC, deadline - now)) else: context.sleep(SLEEP_SEC) def abort(self): """Abort a running command :return: A new ApiCommand object with the updated information. """ if self.id == ApiCommand.SYNCHRONOUS_COMMAND_ID: return self path = self._path() + '/abort' resp = self._get_resource_root().post(path) return ApiCommand.from_json_dict(resp, self._get_resource_root()) class ApiBulkCommandList(ApiList): _ATTRIBUTES = { 'errors': ROAttr(), } _MEMBER_CLASS = ApiCommand # # Configuration helpers. # class ApiConfig(BaseApiObject): _ATTRIBUTES = { 'name': None, 'value': None, 'required': ROAttr(), 'default': ROAttr(), 'displayName': ROAttr(), 'description': ROAttr(), 'relatedName': ROAttr(), 'validationState': ROAttr(), 'validationMessage': ROAttr(), } def __init__(self, resource_root, name=None, value=None): BaseApiObject.init(self, resource_root, locals()) def __str__(self): return "<ApiConfig>: %s = %s" % (self.name, self.value) def config_to_api_list(dic): """Convert a python dictionary into an ApiConfig list Converts a python dictionary into a list containing the proper ApiConfig encoding for configuration data. :param dic: Key-value pairs to convert. :return: JSON dictionary of an ApiConfig list (*not* an ApiList). """ config = [] for k, v in six.iteritems(dic): config.append({'name': k, 'value': v}) return {ApiList.LIST_KEY: config} def config_to_json(dic): """Converts a python dictionary into a JSON payload The payload matches the expected "apiConfig list" type used to update configuration parameters using the API. :param dic: Key-value pairs to convert. :return: String with the JSON-encoded data. """ return json.dumps(config_to_api_list(dic)) def json_to_config(dic, full=False): """Converts a JSON-decoded config dictionary to a python dictionary When materializing the full view, the values in the dictionary will be instances of ApiConfig, instead of strings. :param dic: JSON-decoded config dictionary. :param full: Whether to materialize the full view of the config data. :return: Python dictionary with config data. """ config = {} for entry in dic['items']: k = entry['name'] if full: config[k] = ApiConfig.from_json_dict(entry, None) else: config[k] = entry.get('value') return config
py
1a59165bb0a7bd4d5bd7dbae1e9dc77302d4423c
import re import textwrap from ast import literal_eval from inspect import cleandoc from weakref import WeakKeyDictionary from parso.python import tree from parso.cache import parser_cache from parso import split_lines _EXECUTE_NODES = {'funcdef', 'classdef', 'import_from', 'import_name', 'test', 'or_test', 'and_test', 'not_test', 'comparison', 'expr', 'xor_expr', 'and_expr', 'shift_expr', 'arith_expr', 'atom_expr', 'term', 'factor', 'power', 'atom'} _FLOW_KEYWORDS = ( 'try', 'except', 'finally', 'else', 'if', 'elif', 'with', 'for', 'while' ) def get_executable_nodes(node, last_added=False): """ For static analysis. """ result = [] typ = node.type if typ == 'name': next_leaf = node.get_next_leaf() if last_added is False and node.parent.type != 'param' and next_leaf != '=': result.append(node) elif typ == 'expr_stmt': # I think inferring the statement (and possibly returned arrays), # should be enough for static analysis. result.append(node) for child in node.children: result += get_executable_nodes(child, last_added=True) elif typ == 'decorator': # decorator if node.children[-2] == ')': node = node.children[-3] if node != '(': result += get_executable_nodes(node) else: try: children = node.children except AttributeError: pass else: if node.type in _EXECUTE_NODES and not last_added: result.append(node) for child in children: result += get_executable_nodes(child, last_added) return result def get_sync_comp_fors(comp_for): yield comp_for last = comp_for.children[-1] while True: if last.type == 'comp_for': yield last.children[1] # Ignore the async. elif last.type == 'sync_comp_for': yield last elif not last.type == 'comp_if': break last = last.children[-1] def for_stmt_defines_one_name(for_stmt): """ Returns True if only one name is returned: ``for x in y``. Returns False if the for loop is more complicated: ``for x, z in y``. :returns: bool """ return for_stmt.children[1].type == 'name' def get_flow_branch_keyword(flow_node, node): start_pos = node.start_pos if not (flow_node.start_pos < start_pos <= flow_node.end_pos): raise ValueError('The node is not part of the flow.') keyword = None for i, child in enumerate(flow_node.children): if start_pos < child.start_pos: return keyword first_leaf = child.get_first_leaf() if first_leaf in _FLOW_KEYWORDS: keyword = first_leaf return None def clean_scope_docstring(scope_node): """ Returns a cleaned version of the docstring token. """ node = scope_node.get_doc_node() if node is not None: # TODO We have to check next leaves until there are no new # leaves anymore that might be part of the docstring. A # docstring can also look like this: ``'foo' 'bar' # Returns a literal cleaned version of the ``Token``. return cleandoc(safe_literal_eval(node.value)) return '' def find_statement_documentation(tree_node): if tree_node.type == 'expr_stmt': tree_node = tree_node.parent # simple_stmt maybe_string = tree_node.get_next_sibling() if maybe_string is not None: if maybe_string.type == 'simple_stmt': maybe_string = maybe_string.children[0] if maybe_string.type == 'string': return cleandoc(safe_literal_eval(maybe_string.value)) return '' def safe_literal_eval(value): first_two = value[:2].lower() if first_two[0] == 'f' or first_two in ('fr', 'rf'): # literal_eval is not able to resovle f literals. We have to do that # manually, but that's right now not implemented. return '' return literal_eval(value) def get_signature(funcdef, width=72, call_string=None, omit_first_param=False, omit_return_annotation=False): """ Generate a string signature of a function. :param width: Fold lines if a line is longer than this value. :type width: int :arg func_name: Override function name when given. :type func_name: str :rtype: str """ # Lambdas have no name. if call_string is None: if funcdef.type == 'lambdef': call_string = '<lambda>' else: call_string = funcdef.name.value params = funcdef.get_params() if omit_first_param: params = params[1:] p = '(' + ''.join(param.get_code() for param in params).strip() + ')' # TODO this is pretty bad, we should probably just normalize. p = re.sub(r'\s+', ' ', p) if funcdef.annotation and not omit_return_annotation: rtype = " ->" + funcdef.annotation.get_code() else: rtype = "" code = call_string + p + rtype return '\n'.join(textwrap.wrap(code, width)) def move(node, line_offset): """ Move the `Node` start_pos. """ try: children = node.children except AttributeError: node.line += line_offset else: for c in children: move(c, line_offset) def get_following_comment_same_line(node): """ returns (as string) any comment that appears on the same line, after the node, including the # """ try: if node.type == 'for_stmt': whitespace = node.children[5].get_first_leaf().prefix elif node.type == 'with_stmt': whitespace = node.children[3].get_first_leaf().prefix elif node.type == 'funcdef': # actually on the next line whitespace = node.children[4].get_first_leaf().get_next_leaf().prefix else: whitespace = node.get_last_leaf().get_next_leaf().prefix except AttributeError: return None except ValueError: # TODO in some particular cases, the tree doesn't seem to be linked # correctly return None if "#" not in whitespace: return None comment = whitespace[whitespace.index("#"):] if "\r" in comment: comment = comment[:comment.index("\r")] if "\n" in comment: comment = comment[:comment.index("\n")] return comment def is_scope(node): t = node.type if t == 'comp_for': # Starting with Python 3.8, async is outside of the statement. return node.children[1].type != 'sync_comp_for' return t in ('file_input', 'classdef', 'funcdef', 'lambdef', 'sync_comp_for') def _get_parent_scope_cache(func): cache = WeakKeyDictionary() def wrapper(used_names, node, include_flows=False): try: for_module = cache[used_names] except KeyError: for_module = cache[used_names] = {} try: return for_module[node] except KeyError: result = for_module[node] = func(node, include_flows) return result return wrapper def get_parent_scope(node, include_flows=False): """ Returns the underlying scope. """ scope = node.parent if scope is None: return None # It's a module already. while True: if is_scope(scope): if scope.type in ('classdef', 'funcdef', 'lambdef'): index = scope.children.index(':') if scope.children[index].start_pos >= node.start_pos: if node.parent.type == 'param' and node.parent.name == node: pass elif node.parent.type == 'tfpdef' and node.parent.children[0] == node: pass else: scope = scope.parent continue return scope elif include_flows and isinstance(scope, tree.Flow): # The cursor might be on `if foo`, so the parent scope will not be # the if, but the parent of the if. if not (scope.type == 'if_stmt' and any(n.start_pos <= node.start_pos < n.end_pos for n in scope.get_test_nodes())): return scope scope = scope.parent get_cached_parent_scope = _get_parent_scope_cache(get_parent_scope) def get_cached_code_lines(grammar, path): """ Basically access the cached code lines in parso. This is not the nicest way to do this, but we avoid splitting all the lines again. """ return parser_cache[grammar._hashed][path].lines def cut_value_at_position(leaf, position): """ Cuts of the value of the leaf at position """ lines = split_lines(leaf.value, keepends=True)[:position[0] - leaf.line + 1] column = position[1] if leaf.line == position[0]: column -= leaf.column if not lines: return '' lines[-1] = lines[-1][:column] return ''.join(lines) def expr_is_dotted(node): """ Checks if a path looks like `name` or `name.foo.bar` and not `name()`. """ if node.type == 'atom': if len(node.children) == 3 and node.children[0] == '(': return expr_is_dotted(node.children[1]) return False if node.type == 'atom_expr': children = node.children if children[0] == 'await': return False if not expr_is_dotted(children[0]): return False # Check trailers return all(c.children[0] == '.' for c in children[1:]) return node.type == 'name' def _function_is_x_method(*method_names): def wrapper(function_node): """ This is a heuristic. It will not hold ALL the times, but it will be correct pretty much for anyone that doesn't try to beat it. staticmethod/classmethod are builtins and unless overwritten, this will be correct. """ for decorator in function_node.get_decorators(): dotted_name = decorator.children[1] if dotted_name.get_code() in method_names: return True return False return wrapper function_is_staticmethod = _function_is_x_method('staticmethod') function_is_classmethod = _function_is_x_method('classmethod') function_is_property = _function_is_x_method('property', 'cached_property')
py
1a59178fd1f03d24d394dca8580702a9da9642e4
#!/usr/bin/env python """ Copyright (C) 2006 Google Inc. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ """ $URL: https://regionator.googlecode.com/svn/trunk/scripts/mkregionboxes.py $ $Revision: 251 $ $Date: 2007-03-10 12:27:26 -0800 (Sat, 10 Mar 2007) $ """ import os import sys import kml.smalltiles if len(sys.argv) != 5: app = os.path.basename(sys.argv[0]) print 'usage: %s url.kml level name output.html' % app print ' level 0 is finest grain' print ' name is used for each box + number' sys.exit(1) inputkml = sys.argv[1] level = int(sys.argv[2]) name = sys.argv[3] output = sys.argv[4] kml.smalltiles.FindTiles(inputkml, level, name, output)
py
1a5919160e9f5c0ccf0f78c6d22040bebca334aa
# -*- coding: utf-8 -*- """ Functions for Integration Taken from the book "A primer on Scientific Programming using Python by Springer" """ class Integrator: def __init__(self,a,b,n): self.a, self.b, self.n=a,b,n self.points, self.weights = self.construct_method() def construct_method(self): raise NotImplementedError('no rule in class %s' %self.__class__.__name__) def integrate(self,f): s = 0 for i in range(len(self.weights)): s += self.weights[i]*f(self.points[i]) return s
py
1a591930f9a963e0fd4d0894dd48abb98ad9feb5
""" WSGI config for britecore_test project. It exposes the WSGI callable as a module-level variable named ``application``. For more information on this file, see https://docs.djangoproject.com/en/1.11/howto/deployment/wsgi/ """ import os from django.core.wsgi import get_wsgi_application os.environ.setdefault("DJANGO_SETTINGS_MODULE", "britecore_test.settings") application = get_wsgi_application()
py
1a5919daf5ac023969eced4fad3717f89a3159f0
import pytest import cdx_toolkit class MockResp: def __init__(self, thing): self.thing = thing def json(self): return self.thing def test_showNumPages(): j_cc = MockResp({'blocks': 3}) assert cdx_toolkit.showNumPages(j_cc) == 3 j_ia = MockResp(3) assert cdx_toolkit.showNumPages(j_ia) == 3 with pytest.raises(ValueError): j_bad = MockResp('3') assert cdx_toolkit.showNumPages(j_bad) == 3 def test_args(): with pytest.raises(ValueError): cdx = cdx_toolkit.CDXFetcher(wb='foo', warc_prefix='foo') with pytest.raises(ValueError): cdx = cdx_toolkit.CDXFetcher(source='asdf') with pytest.raises(ValueError): cdx = cdx_toolkit.CDXFetcher(source='cc', wb='foo')
py
1a591ad15fc8693e2783ceb46d064fab97fcc594
#!/usr/bin/python # -*- coding: utf-8 -*- # Copyright: (c) 2021, Phillipe Smith <[email protected]> # GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) from __future__ import absolute_import, division, print_function __metaclass__ = type DOCUMENTATION = ''' --- module: rundeck_job_run short_description: Run a Rundeck job description: - This module runs a Rundeck job specified by ID. author: "Phillipe Smith (@phsmith)" version_added: 3.8.0 options: job_id: type: str description: - The job unique ID. required: true job_options: type: dict description: - The job options for the steps. - Numeric values must be quoted. filter_nodes: type: str description: - Filter the nodes where the jobs must run. - See U(https://docs.rundeck.com/docs/manual/11-node-filters.html#node-filter-syntax). run_at_time: type: str description: - Schedule the job execution to run at specific date and time. - ISO-8601 date and time format like C(2021-10-05T15:45:00-03:00). loglevel: type: str description: - Log level configuration. choices: [debug, verbose, info, warn, error] default: info wait_execution: type: bool description: - Wait until the job finished the execution. default: true wait_execution_delay: type: int description: - Delay, in seconds, between job execution status check requests. default: 5 wait_execution_timeout: type: int description: - Job execution wait timeout in seconds. - If the timeout is reached, the job will be aborted. - Keep in mind that there is a sleep based on I(wait_execution_delay) after each job status check. default: 120 abort_on_timeout: type: bool description: - Send a job abort request if exceeded the I(wait_execution_timeout) specified. default: false extends_documentation_fragment: - community.general.rundeck - url ''' EXAMPLES = ''' - name: Run a Rundeck job community.general.rundeck_job_run: url: "https://rundeck.example.org" api_version: 39 api_token: "mytoken" job_id: "xxxxxxxxxxxxxxxxx" register: rundeck_job_run - name: Show execution info ansible.builtin.debug: var: rundeck_job_run.execution_info - name: Run a Rundeck job with options community.general.rundeck_job_run: url: "https://rundeck.example.org" api_version: 39 api_token: "mytoken" job_id: "xxxxxxxxxxxxxxxxx" job_options: option_1: "value_1" option_2: "value_3" option_3: "value_3" register: rundeck_job_run - name: Run a Rundeck job with timeout, delay between status check and abort on timeout community.general.rundeck_job_run: url: "https://rundeck.example.org" api_version: 39 api_token: "mytoken" job_id: "xxxxxxxxxxxxxxxxx" wait_execution_timeout: 30 wait_execution_delay: 10 abort_on_timeout: true register: rundeck_job_run - name: Schedule a Rundeck job community.general.rundeck_job_run: url: "https://rundeck.example.org" api_version: 39 api_token: "mytoken" job_id: "xxxxxxxxxxxxxxxxx" run_at_time: "2021-10-05T15:45:00-03:00" register: rundeck_job_schedule - name: Fire-and-forget a Rundeck job community.general.rundeck_job_run: url: "https://rundeck.example.org" api_version: 39 api_token: "mytoken" job_id: "xxxxxxxxxxxxxxxxx" wait_execution: false register: rundeck_job_run ''' RETURN = ''' execution_info: description: Rundeck job execution metadata. returned: always type: dict sample: { "msg": "Job execution succeeded!", "execution_info": { "id": 1, "href": "https://rundeck.example.org/api/39/execution/1", "permalink": "https://rundeck.example.org/project/myproject/execution/show/1", "status": "succeeded", "project": "myproject", "executionType": "user", "user": "admin", "date-started": { "unixtime": 1633449020784, "date": "2021-10-05T15:50:20Z" }, "date-ended": { "unixtime": 1633449026358, "date": "2021-10-05T15:50:26Z" }, "job": { "id": "697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", "averageDuration": 4917, "name": "Test", "group": "", "project": "myproject", "description": "", "options": { "exit_code": "0" }, "href": "https://rundeck.example.org/api/39/job/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a", "permalink": "https://rundeck.example.org/project/myproject/job/show/697af0c4-72d3-4c15-86a3-b5bfe3c6cb6a" }, "description": "sleep 5 && echo 'Test!' && exit ${option.exit_code}", "argstring": "-exit_code 0", "serverUUID": "5b9a1438-fa3a-457e-b254-8f3d70338068", "successfulNodes": [ "localhost" ], "output": "Test!" } } ''' # Modules import import json from datetime import datetime, timedelta from time import sleep from ansible.module_utils.basic import AnsibleModule from ansible.module_utils.common.text.converters import to_native from ansible.module_utils.six.moves.urllib.parse import quote from ansible_collections.community.general.plugins.module_utils.rundeck import ( api_argument_spec, api_request ) class RundeckJobRun(object): def __init__(self, module): self.module = module self.url = self.module.params["url"] self.api_version = self.module.params["api_version"] self.job_id = self.module.params["job_id"] self.job_options = self.module.params["job_options"] or {} self.filter_nodes = self.module.params["filter_nodes"] or "" self.run_at_time = self.module.params["run_at_time"] or "" self.loglevel = self.module.params["loglevel"].upper() self.wait_execution = self.module.params['wait_execution'] self.wait_execution_delay = self.module.params['wait_execution_delay'] self.wait_execution_timeout = self.module.params['wait_execution_timeout'] self.abort_on_timeout = self.module.params['abort_on_timeout'] for k, v in self.job_options.items(): if not isinstance(v, str): self.module.exit_json( msg="Job option '%s' value must be a string" % k, execution_info={} ) def job_status_check(self, execution_id): response = dict() timeout = False due = datetime.now() + timedelta(seconds=self.wait_execution_timeout) while not timeout: endpoint = "execution/%d" % execution_id response = api_request(module=self.module, endpoint=endpoint)[0] output = api_request(module=self.module, endpoint="execution/%d/output" % execution_id) log_output = "\n".join([x["log"] for x in output[0]["entries"]]) response.update({"output": log_output}) if response["status"] == "aborted": break elif response["status"] == "scheduled": self.module.exit_json(msg="Job scheduled to run at %s" % self.run_at_time, execution_info=response, changed=True) elif response["status"] == "failed": self.module.fail_json(msg="Job execution failed", execution_info=response) elif response["status"] == "succeeded": self.module.exit_json(msg="Job execution succeeded!", execution_info=response) if datetime.now() >= due: timeout = True break # Wait for 5s before continue sleep(self.wait_execution_delay) response.update({"timed_out": timeout}) return response def job_run(self): response, info = api_request( module=self.module, endpoint="job/%s/run" % quote(self.job_id), method="POST", data={ "loglevel": self.loglevel, "options": self.job_options, "runAtTime": self.run_at_time, "filter": self.filter_nodes } ) if info["status"] != 200: self.module.fail_json(msg=info["msg"]) if not self.wait_execution: self.module.exit_json(msg="Job run send successfully!", execution_info=response) job_status = self.job_status_check(response["id"]) if job_status["timed_out"]: if self.abort_on_timeout: api_request( module=self.module, endpoint="execution/%s/abort" % response['id'], method="GET" ) abort_status = self.job_status_check(response["id"]) self.module.fail_json(msg="Job execution aborted due the timeout specified", execution_info=abort_status) self.module.fail_json(msg="Job execution timed out", execution_info=job_status) def main(): argument_spec = api_argument_spec() argument_spec.update(dict( job_id=dict(required=True, type="str"), job_options=dict(type="dict"), filter_nodes=dict(type="str"), run_at_time=dict(type="str"), wait_execution=dict(type="bool", default=True), wait_execution_delay=dict(type="int", default=5), wait_execution_timeout=dict(type="int", default=120), abort_on_timeout=dict(type="bool", default=False), loglevel=dict( type="str", choices=["debug", "verbose", "info", "warn", "error"], default="info" ) )) module = AnsibleModule( argument_spec=argument_spec, supports_check_mode=False ) if module.params["api_version"] < 14: module.fail_json(msg="API version should be at least 14") rundeck = RundeckJobRun(module) rundeck.job_run() if __name__ == "__main__": main()
py
1a591c250bbeb6a206e444e545478c5ec7267cf2
# Copyright (C) 2019 Project AGI # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Model base class.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import abc from sklearn.model_selection import learning_curve class Model: """ The base class for building a classifcation models and running inference on them. """ __metaclass__ = abc.ABCMeta def __init__(self, hparams, batch_size=None, num_classes=None, summary_dir=None, verbose=False): """ Initializes the model parameters. Args: hparams: The hyperparameters for the model as tf.contrib.training.HParams. batch_size: An integer, the number of samples in a batch. num_classes: An integer, the number of classes. summary_dir: The output directory for the results. verbose: A boolean to enable verbose logging. """ self._model = None self._hparams = hparams self._verbose = verbose self._batch_size = batch_size self._num_classes = num_classes self._summary_dir = summary_dir @abc.abstractstaticmethod def default_hparams(): """Builds an HParam object with default hyperparameters.""" raise NotImplementedError('Not implemented') @abc.abstractmethod def train(self, features, labels, seed=None): """ Setup the model with specified hyperparameters and train the model. Args: features: A numpy n-dimensional array containing the features in the shape of samples x features. labels: A numpy array containing the label for each sample. seed: An integer used to specify the randomness seed. """ raise NotImplementedError('Not implemented') @abc.abstractmethod def evaluate(self, features, labels): """ Evaluates the trained model using the specified features and labels. Args: features: A numpy n-dimensional array containing the features in the shape of samples x features. labels: A numpy array containing the label for each sample. Returns: accuracy: The accuracy score of the model. predictions: The labels predicted by the model for each sample. """ raise NotImplementedError('Not implemented') def learning_curve(self, features, labels): """Simple wrapper around sklearn's learning curve module""" return learning_curve(self._model, features, labels)
py
1a591cecd9ebb408be733b641e6bd37ddf86e8a9
"""train finetune""" # Copyright 2021 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ import os from mindspore import context from mindspore.context import ParallelMode import mindspore.dataset as ds from mindspore.train.serialization import load_checkpoint, load_param_into_net from mindspore.communication.management import init, get_rank, get_group_size from mindspore.common import set_seed from src.args import args from src.data.imagenet import ImgData from src.data.srdata import SRData from src.data.div2k import DIV2K from src.data.bicubic import bicubic from src.ipt_model import IPT from src.utils import Trainer def train_net(distribute, imagenet): """Train net with finetune""" set_seed(1) device_id = int(os.getenv('DEVICE_ID', '0')) context.set_context(mode=context.GRAPH_MODE, device_target="Ascend", save_graphs=False, device_id=device_id) if imagenet == 1: train_dataset = ImgData(args) elif not args.derain: train_dataset = DIV2K(args, name=args.data_train, train=True, benchmark=False) train_dataset.set_scale(args.task_id) else: train_dataset = SRData(args, name=args.data_train, train=True, benchmark=False) train_dataset.set_scale(args.task_id) if distribute: init() rank_id = get_rank() rank_size = get_group_size() parallel_mode = ParallelMode.DATA_PARALLEL context.set_auto_parallel_context(parallel_mode=parallel_mode, device_num=rank_size, gradients_mean=True) print('Rank {}, group_size {}'.format(rank_id, rank_size)) if imagenet == 1: train_de_dataset = ds.GeneratorDataset(train_dataset, ["HR", "Rain", "LRx2", "LRx3", "LRx4", "scales", "filename"], num_shards=rank_size, shard_id=rank_id, shuffle=True) else: train_de_dataset = ds.GeneratorDataset(train_dataset, ["LR", "HR", "idx", "filename"], num_shards=rank_size, shard_id=rank_id, shuffle=True) else: if imagenet == 1: train_de_dataset = ds.GeneratorDataset(train_dataset, ["HR", "Rain", "LRx2", "LRx3", "LRx4", "scales", "filename"], shuffle=True) else: train_de_dataset = ds.GeneratorDataset(train_dataset, ["LR", "HR", "idx", "filename"], shuffle=True) if args.imagenet == 1: resize_fuc = bicubic() train_de_dataset = train_de_dataset.batch( args.batch_size, input_columns=["HR", "Rain", "LRx2", "LRx3", "LRx4", "scales", "filename"], output_columns=["LR", "HR", "idx", "filename"], drop_remainder=True, per_batch_map=resize_fuc.forward) else: train_de_dataset = train_de_dataset.batch(args.batch_size, drop_remainder=True) train_loader = train_de_dataset.create_dict_iterator(output_numpy=True) net_m = IPT(args) print("Init net weights successfully") if args.pth_path: param_dict = load_checkpoint(args.pth_path) load_param_into_net(net_m, param_dict) print("Load net weight successfully") train_func = Trainer(args, train_loader, net_m) for epoch in range(0, args.epochs): train_func.update_learning_rate(epoch) train_func.train() if __name__ == "__main__": train_net(distribute=args.distribute, imagenet=args.imagenet)
py
1a591cf7eae5c9833df722104b609d2f88d045cf
# coding: utf-8 from __future__ import absolute_import from bitmovin_api_sdk.common import BaseApi, BitmovinApiLoggerBase from bitmovin_api_sdk.common.poscheck import poscheck_except from bitmovin_api_sdk.models.audio_volume_filter import AudioVolumeFilter from bitmovin_api_sdk.models.bitmovin_response import BitmovinResponse from bitmovin_api_sdk.models.response_envelope import ResponseEnvelope from bitmovin_api_sdk.models.response_error import ResponseError from bitmovin_api_sdk.encoding.filters.audio_volume.customdata.customdata_api import CustomdataApi from bitmovin_api_sdk.encoding.filters.audio_volume.audio_volume_filter_list_query_params import AudioVolumeFilterListQueryParams class AudioVolumeApi(BaseApi): @poscheck_except(2) def __init__(self, api_key, tenant_org_id=None, base_url=None, logger=None): # type: (str, str, str, BitmovinApiLoggerBase) -> None super(AudioVolumeApi, self).__init__( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) self.customdata = CustomdataApi( api_key=api_key, tenant_org_id=tenant_org_id, base_url=base_url, logger=logger ) def create(self, audio_volume_filter, **kwargs): # type: (AudioVolumeFilter, dict) -> AudioVolumeFilter """Create Audio Volume Filter :param audio_volume_filter: The Audio Volume Filter to be created :type audio_volume_filter: AudioVolumeFilter, required :return: Audio volume details :rtype: AudioVolumeFilter """ return self.api_client.post( '/encoding/filters/audio-volume', audio_volume_filter, type=AudioVolumeFilter, **kwargs ) def delete(self, filter_id, **kwargs): # type: (string_types, dict) -> BitmovinResponse """Delete Audio Volume Filter :param filter_id: Id of the Audio volume configuration. :type filter_id: string_types, required :return: Id of the Audio volume. :rtype: BitmovinResponse """ return self.api_client.delete( '/encoding/filters/audio-volume/{filter_id}', path_params={'filter_id': filter_id}, type=BitmovinResponse, **kwargs ) def get(self, filter_id, **kwargs): # type: (string_types, dict) -> AudioVolumeFilter """Audio Volume Filter Details :param filter_id: Id of the audio volume configuration. :type filter_id: string_types, required :return: Audio volume details :rtype: AudioVolumeFilter """ return self.api_client.get( '/encoding/filters/audio-volume/{filter_id}', path_params={'filter_id': filter_id}, type=AudioVolumeFilter, **kwargs ) def list(self, query_params=None, **kwargs): # type: (AudioVolumeFilterListQueryParams, dict) -> AudioVolumeFilter """List Audio Volume Filters :param query_params: Query parameters :type query_params: AudioVolumeFilterListQueryParams :return: List of Audio volume ids :rtype: AudioVolumeFilter """ return self.api_client.get( '/encoding/filters/audio-volume', query_params=query_params, pagination_response=True, type=AudioVolumeFilter, **kwargs )
py
1a591d2aa1881c5be9905a6dfd559d8e91cac866
# -*- coding: utf-8 -*- """ DWX_ZMQ_Execution.py -- @author: Darwinex Labs (www.darwinex.com) Copyright (c) 2019 onwards, Darwinex. All rights reserved. Licensed under the BSD 3-Clause License, you may not use this file except in compliance with the License. You may obtain a copy of the License at: https://opensource.org/licenses/BSD-3-Clause """ from pandas import to_datetime from time import sleep class DWX_ZMQ_Execution(): def __init__(self, _zmq): self._zmq = _zmq ########################################################################## def _execute_(self, _exec_dict, _verbose=False, _delay=0.1, _wbreak=10): _check = '' # Reset thread data output self._zmq._set_response_(None) # OPEN TRADE if _exec_dict['_action'] == 'OPEN': _check = '_action' self._zmq._DWX_MTX_NEW_TRADE_(_order=_exec_dict) # CLOSE TRADE elif _exec_dict['_action'] == 'CLOSE': _check = '_response_value' self._zmq._DWX_MTX_CLOSE_TRADE_BY_TICKET_(_exec_dict['_ticket']) if _verbose: print('\n[{}] {} -> MetaTrader'.format(_exec_dict['_comment'], str(_exec_dict))) # While loop start time reference _ws = to_datetime('now') # While data not received, sleep until timeout while self._zmq._valid_response_('zmq') == False: sleep(_delay) if (to_datetime('now') - _ws).total_seconds() > (_delay * _wbreak): break # If data received, return DataFrame if self._zmq._valid_response_('zmq'): if _check in self._zmq._get_response_().keys(): return self._zmq._get_response_() # Default return None ##########################################################################
py
1a591d960b8061f5af062ac30ab38866b6a247d9
n = int(input()) unique_names = set() for _ in range(n): name = input() unique_names.add(name) for current_name in unique_names: print(current_name)
py
1a591df53eab1293676e8bfe07a3caa2fae1bd28
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors and The HugginFace Inc. team. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tokenization classes.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import collections import unicodedata import os import logging from .file_utils import cached_path logger = logging.getLogger(__name__) PRETRAINED_VOCAB_ARCHIVE_MAP = { 'bert-base-uncased': "/media/disk2/jennybae/bert-base-uncased-vocab.txt", 'bert-base-cased': "/media/disk2/jennybae/bert-base-cased-vocab.txt", 'bert-large-uncased': "/media/disk2/jennybae/bert-large-uncased-vocab.txt", # 'bert-base-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-uncased-vocab.txt", # 'bert-large-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-uncased-vocab.txt", # 'bert-base-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-cased-vocab.txt", 'bert-large-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-large-cased-vocab.txt", 'bert-base-multilingual-uncased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-uncased-vocab.txt", 'bert-base-multilingual-cased': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-multilingual-cased-vocab.txt", 'bert-base-chinese': "https://s3.amazonaws.com/models.huggingface.co/bert/bert-base-chinese-vocab.txt", } PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP = { 'bert-base-uncased': 512, 'bert-large-uncased': 512, 'bert-base-cased': 512, 'bert-large-cased': 512, 'bert-base-multilingual-uncased': 512, 'bert-base-multilingual-cased': 512, 'bert-base-chinese': 512, } VOCAB_NAME = 'vocab.txt' def load_vocab(vocab_file): """Loads a vocabulary file into a dictionary.""" vocab = collections.OrderedDict() index = 0 with open(vocab_file, "r", encoding="utf-8") as reader: while True: token = reader.readline() if not token: break token = token.strip() vocab[token] = index index += 1 return vocab def whitespace_tokenize(text): """Runs basic whitespace cleaning and splitting on a peice of text.""" text = text.strip() if not text: return [] tokens = text.split() return tokens class BertTokenizer(object): """Runs end-to-end tokenization: punctuation splitting + wordpiece""" def __init__(self, vocab_file, do_lower_case=True, max_len=None): if not os.path.isfile(vocab_file): raise ValueError( "Can't find a vocabulary file at path '{}'. To load the vocabulary from a Google pretrained " "model use `tokenizer = BertTokenizer.from_pretrained(PRETRAINED_MODEL_NAME)`".format(vocab_file)) self.vocab = load_vocab(vocab_file) self.ids_to_tokens = collections.OrderedDict( [(ids, tok) for tok, ids in self.vocab.items()]) self.basic_tokenizer = BasicTokenizer(do_lower_case=do_lower_case) self.wordpiece_tokenizer = WordpieceTokenizer(vocab=self.vocab) self.max_len = max_len if max_len is not None else int(1e12) def tokenize(self, text): split_tokens = [] for token in self.basic_tokenizer.tokenize(text): for sub_token in self.wordpiece_tokenizer.tokenize(token): split_tokens.append(sub_token) return split_tokens def convert_tokens_to_ids(self, tokens): """Converts a sequence of tokens into ids using the vocab.""" ids = [] for token in tokens: ids.append(self.vocab[token]) if len(ids) > self.max_len: raise ValueError( "Token indices sequence length is longer than the specified maximum " " sequence length for this BERT model ({} > {}). Running this" " sequence through BERT will result in indexing errors".format(len(ids), self.max_len) ) return ids def convert_ids_to_tokens(self, ids): """Converts a sequence of ids in wordpiece tokens using the vocab.""" tokens = [] for i in ids: tokens.append(self.ids_to_tokens[i]) return tokens @classmethod def from_pretrained(cls, pretrained_model_name, cache_dir=None, *inputs, **kwargs): """ Instantiate a PreTrainedBertModel from a pre-trained model file. Download and cache the pre-trained model file if needed. """ if pretrained_model_name in PRETRAINED_VOCAB_ARCHIVE_MAP: vocab_file = PRETRAINED_VOCAB_ARCHIVE_MAP[pretrained_model_name] else: vocab_file = pretrained_model_name if os.path.isdir(vocab_file): vocab_file = os.path.join(vocab_file, VOCAB_NAME) # redirect to the cache, if necessary try: resolved_vocab_file = cached_path(vocab_file, cache_dir=cache_dir) except FileNotFoundError: logger.error( "Model name '{}' was not found in model name list ({}). " "We assumed '{}' was a path or url but couldn't find any file " "associated to this path or url.".format( pretrained_model_name, ', '.join(PRETRAINED_VOCAB_ARCHIVE_MAP.keys()), vocab_file)) return None if resolved_vocab_file == vocab_file: logger.info("loading vocabulary file {}".format(vocab_file)) else: logger.info("loading vocabulary file {} from cache at {}".format( vocab_file, resolved_vocab_file)) if pretrained_model_name in PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP: # if we're using a pretrained model, ensure the tokenizer wont index sequences longer # than the number of positional embeddings max_len = PRETRAINED_VOCAB_POSITIONAL_EMBEDDINGS_SIZE_MAP[pretrained_model_name] kwargs['max_len'] = min(kwargs.get('max_len', int(1e12)), max_len) # Instantiate tokenizer. tokenizer = cls(resolved_vocab_file, *inputs, **kwargs) return tokenizer class BasicTokenizer(object): """Runs basic tokenization (punctuation splitting, lower casing, etc.).""" def __init__(self, do_lower_case=True): """Constructs a BasicTokenizer. Args: do_lower_case: Whether to lower case the input. """ self.do_lower_case = do_lower_case def tokenize(self, text): """Tokenizes a piece of text.""" text = self._clean_text(text) # This was added on November 1st, 2018 for the multilingual and Chinese # models. This is also applied to the English models now, but it doesn't # matter since the English models were not trained on any Chinese data # and generally don't have any Chinese data in them (there are Chinese # characters in the vocabulary because Wikipedia does have some Chinese # words in the English Wikipedia.). text = self._tokenize_chinese_chars(text) orig_tokens = whitespace_tokenize(text) split_tokens = [] for token in orig_tokens: if self.do_lower_case: token = token.lower() token = self._run_strip_accents(token) split_tokens.extend(self._run_split_on_punc(token)) output_tokens = whitespace_tokenize(" ".join(split_tokens)) return output_tokens def _run_strip_accents(self, text): """Strips accents from a piece of text.""" text = unicodedata.normalize("NFD", text) output = [] for char in text: cat = unicodedata.category(char) if cat == "Mn": continue output.append(char) return "".join(output) def _run_split_on_punc(self, text): """Splits punctuation on a piece of text.""" chars = list(text) i = 0 start_new_word = True output = [] while i < len(chars): char = chars[i] if _is_punctuation(char): output.append([char]) start_new_word = True else: if start_new_word: output.append([]) start_new_word = False output[-1].append(char) i += 1 return ["".join(x) for x in output] def _tokenize_chinese_chars(self, text): """Adds whitespace around any CJK character.""" output = [] for char in text: cp = ord(char) if self._is_chinese_char(cp): output.append(" ") output.append(char) output.append(" ") else: output.append(char) return "".join(output) def _is_chinese_char(self, cp): """Checks whether CP is the codepoint of a CJK character.""" # This defines a "chinese character" as anything in the CJK Unicode block: # https://en.wikipedia.org/wiki/CJK_Unified_Ideographs_(Unicode_block) # # Note that the CJK Unicode block is NOT all Japanese and Korean characters, # despite its name. The modern Korean Hangul alphabet is a different block, # as is Japanese Hiragana and Katakana. Those alphabets are used to write # space-separated words, so they are not treated specially and handled # like the all of the other languages. if ((cp >= 0x4E00 and cp <= 0x9FFF) or # (cp >= 0x3400 and cp <= 0x4DBF) or # (cp >= 0x20000 and cp <= 0x2A6DF) or # (cp >= 0x2A700 and cp <= 0x2B73F) or # (cp >= 0x2B740 and cp <= 0x2B81F) or # (cp >= 0x2B820 and cp <= 0x2CEAF) or (cp >= 0xF900 and cp <= 0xFAFF) or # (cp >= 0x2F800 and cp <= 0x2FA1F)): # return True return False def _clean_text(self, text): """Performs invalid character removal and whitespace cleanup on text.""" output = [] for char in text: cp = ord(char) if cp == 0 or cp == 0xfffd or _is_control(char): continue if _is_whitespace(char): output.append(" ") else: output.append(char) return "".join(output) class WordpieceTokenizer(object): """Runs WordPiece tokenization.""" def __init__(self, vocab, unk_token="[UNK]", max_input_chars_per_word=100): self.vocab = vocab self.unk_token = unk_token self.max_input_chars_per_word = max_input_chars_per_word def tokenize(self, text): """Tokenizes a piece of text into its word pieces. This uses a greedy longest-match-first algorithm to perform tokenization using the given vocabulary. For example: input = "unaffable" output = ["un", "##aff", "##able"] Args: text: A single token or whitespace separated tokens. This should have already been passed through `BasicTokenizer`. Returns: A list of wordpiece tokens. """ output_tokens = [] for token in whitespace_tokenize(text): chars = list(token) if len(chars) > self.max_input_chars_per_word: output_tokens.append(self.unk_token) continue is_bad = False start = 0 sub_tokens = [] while start < len(chars): end = len(chars) cur_substr = None while start < end: substr = "".join(chars[start:end]) if start > 0: substr = "##" + substr if substr in self.vocab: cur_substr = substr break end -= 1 if cur_substr is None: is_bad = True break sub_tokens.append(cur_substr) start = end if is_bad: output_tokens.append(self.unk_token) else: output_tokens.extend(sub_tokens) return output_tokens def _is_whitespace(char): """Checks whether `chars` is a whitespace character.""" # \t, \n, and \r are technically contorl characters but we treat them # as whitespace since they are generally considered as such. if char == " " or char == "\t" or char == "\n" or char == "\r": return True cat = unicodedata.category(char) if cat == "Zs": return True return False def _is_control(char): """Checks whether `chars` is a control character.""" # These are technically control characters but we count them as whitespace # characters. if char == "\t" or char == "\n" or char == "\r": return False cat = unicodedata.category(char) if cat.startswith("C"): return True return False def _is_punctuation(char): """Checks whether `chars` is a punctuation character.""" cp = ord(char) # We treat all non-letter/number ASCII as punctuation. # Characters such as "^", "$", and "`" are not in the Unicode # Punctuation class but we treat them as punctuation anyways, for # consistency. if ((cp >= 33 and cp <= 47) or (cp >= 58 and cp <= 64) or (cp >= 91 and cp <= 96) or (cp >= 123 and cp <= 126)): return True cat = unicodedata.category(char) if cat.startswith("P"): return True return False
py
1a591e46c189b69cf6e1f956f0a4f5066fdd75fb
import os, tempfile, subprocess from string import Template from PuzzleLib import Config from PuzzleLib.Compiler.JIT import getCacheDir, computeHash, FileLock from PuzzleLib.Cuda.SourceModule import SourceModule, ElementwiseKernel, ElementHalf2Kernel, ReductionKernel from PuzzleLib.Cuda.SourceModule import eltwiseTest, reductionTest from PuzzleLib.Hip import Driver as HipDriver hipWarpBit, hipBlockBit = 6, 8 hipWarpSize, hipBlockSize = 1 << hipWarpBit, 1 << hipBlockBit class HipSourceModule(SourceModule): Driver = HipDriver runtimeHeader = """ #include <hip/hip_runtime.h> #define __shfl_xor_sync(mask, value, laneMask, ...) __shfl_xor(value, laneMask, __VA_ARGS__) #define __shfl_up_sync(mask, value, delta, ...) __shfl_up(value, delta, __VA_ARGS__) """ def __init__(self, source, options=None, includes=None, externC=False, verbose=True, debug=False, recompile=False, name=None): super().__init__(source, options, includes, externC, verbose, debug, name) self.recompile = recompile self.includes = [] if self.includes is None else self.includes def build(self): source = self.source.replace("cuda_fp16.h", "hip/hip_fp16.h") source = ("%sextern \"C\"\n{\n%s\n}\n" if self.externC else "%s%s") % (self.runtimeHeader, source) cachedir = getCacheDir(os.path.join(Config.libname, Config.Backend.hip.name)) with FileLock(cachedir): try: codename = self.tryBuild(source, cachedir) except subprocess.CalledProcessError as e: log = e.output.decode() text = log if self.debug else "%s\nSource:\n%s" % ( log, "\n".join("%-4s %s" % (i + 1, line) for i, line in enumerate(source.splitlines(keepends=False))) ) raise self.Driver.RtcError(text) with open(codename, mode="rb") as f: hsaco = f.read() self.cumod = self.Driver.Module(hsaco) def tryBuild(self, source, cachedir): options, includes = self.options, self.includes hashsum = computeHash(source, *options, *includes) codepath = os.path.join(cachedir, hashsum) name, srcext = "module" if self.name is None else self.name, ".hip.cpp" codename = os.path.join(codepath, "%s.code" % name) sourcename = os.path.join(codepath, "%s%s" % (name, srcext)) if not os.path.exists(codename) or self.recompile: os.makedirs(codepath, exist_ok=True) args = ["hipcc", "--genco"] + options + ["-o", codename] stderr = subprocess.STDOUT if self.verbose else subprocess.DEVNULL Config.getLogger().debug("No cache found for HIP extension '%s', performing compilation ...", name) if not self.debug: f = tempfile.NamedTemporaryFile(mode="w", encoding="utf-8", suffix=srcext, delete=False) try: with f: f.write(source) subprocess.check_output(args + [f.name], stderr=stderr) finally: os.remove(f.name) else: with open(sourcename, mode="w", encoding="utf-8") as f: f.write(source) subprocess.check_output(args + [sourcename], stderr=stderr) else: Config.getLogger().debug("Found cached compilation for HIP extension '%s', skipping compilation ...", name) return codename @classmethod def getDefaultOptions(cls): deviceIdx = cls.Driver.Device.getCurrent() return ["--targets gfx%s" % cls.Driver.Device(deviceIdx).getArch()] class HipEltwiseKernel(ElementwiseKernel): Driver = HipDriver SourceModule = HipSourceModule warpBit, warpSize = hipWarpBit, hipWarpSize blockBit, blockSize = hipBlockBit, hipBlockSize class HipEltHalf2Kernel(ElementHalf2Kernel): Driver = HipDriver SourceModule = HipSourceModule warpBit, warpSize = hipWarpBit, hipWarpSize blockBit, blockSize = hipBlockBit, hipBlockSize class HipReductionKernel(ReductionKernel): Driver = HipDriver SourceModule = HipSourceModule warpBit, warpSize = hipWarpBit, hipWarpSize blockBit, blockSize = hipBlockBit, hipBlockSize reduceTmpl = Template(""" #undef READ_AND_MAP #undef REDUCE #define READ_AND_MAP(i) ($mapExpr) #define REDUCE(a, b) ($reduceExpr) extern "C" __global__ void $name($arguments, $T *partials, int size) { __shared__ $T sdata[$warpSize]; int tid = threadIdx.x; int gid = tid + blockIdx.x * $NT; $T acc = $neutral; for (int i = gid; i < size; i += $NT * gridDim.x) acc = REDUCE(acc, READ_AND_MAP(i)); for (int mask = $warpSize / 2; mask > 0; mask /= 2) { $T upval = __shfl_xor(acc, mask, $warpSize); acc = REDUCE(acc, upval); } if (tid % $warpSize == 0) sdata[tid / $warpSize] = acc; __syncthreads(); int nwarps = $NT / $warpSize; if (tid < $warpSize) { acc = (tid < nwarps) ? sdata[tid] : $neutral; for (int mask = $warpSize / 2; mask > 0; mask /= 2) { $T upval = __shfl_xor(acc, mask, $warpSize); acc = REDUCE(acc, upval); } } if (tid == 0) partials[blockIdx.x] = acc; } """) def unittest(): from PuzzleLib.Hip import Backend for deviceIdx in range(Backend.getDeviceCount()): bnd = Backend.getBackend(deviceIdx) eltwiseTest(bnd) reductionTest(bnd) if __name__ == "__main__": unittest()
py
1a591f71a097fb277a222e3b29d0120fbc628fc4
# Written by Hannah Horng ([email protected]) import pandas as pd import neuroCombat as nC from sklearn.preprocessing import LabelEncoder import matplotlib.pyplot as plt from scipy.stats import ranksums, ttest_ind, ttest_rel, ks_2samp import os def NestedComBat(dat, covars, batch_list, categorical_cols=None, continuous_cols=None, drop=False, write_p=False, plotting=False, filepath=''): """ Completes sequential nested ComBat harmonization on an input DataFrame. Order is determined by number of features with statistically significant differences in distribution (KS test) due to a particular batch effect. Arguments --------- data : DataFrame of original data with shape (features, samples) covars : DataFrame with shape (samples, covariates) corresponding to original data. All variables should be label- encoded (i.e. strings converted to integer designations) batch_list : list of strings indicating batch effect column names within covars (i.e. ['Manufacturer', 'CE'...]) categorical_cols : string or list of strings of categorical variables to adjust for continuous_cols : string or list of strings of continuous variables to adjust for drop : Boolean, if True -- features with significant differences in distribution due to the batch effect being harmonized are dropped with each iteration (corresponds to NestedD) write_p : Boolean, if True -- KS test p-values will be written as a CSV into the directory created from filepath plotting : Boolean, if True -- kernel density plots will be written as image files into the directory created from filepath filepath : root directory path for saving KS test p-values and kernel density plots created during harmonization Returns ------- new_dat : DataFrame with shape (features, samples) that has been sequentially harmonized with Nested ComBat """ p_dict = {} count_dict = {} f_dict = {} print('ROUND 1:') for a in range(len(batch_list)): batch_col = batch_list[a] print('Harmonizing by ' + batch_col + '...') filepath2 = filepath + 'Round 1/' + batch_col + '/' if not os.path.exists(filepath2): os.makedirs(filepath2) # RUN COMBAT print('ComBat with Raw Data...') output = nC.neuroCombat(dat, covars, batch_col, continuous_cols=continuous_cols, categorical_cols=categorical_cols)['data'] output_df = pd.DataFrame.from_records(output.T) output_df.columns = dat.T.columns f_dict[batch_col] = output_df if plotting: combat_histograms(dat.T, output_df, covars, covars, batch_col, filepath2) if write_p: p_values = combat_kstest(dat.T, output_df, covars, covars, batch_col, write=True, filepath=filepath2) else: p_values = combat_kstest(dat.T, output_df, covars, covars, batch_col) p_values.index = output_df.columns p_dict[batch_col] = p_values['ComBat'] count_dict[batch_col] = len(p_values[p_values['ComBat'] < .05]) drop_feature = [key for key, value in count_dict.items() if value == min(count_dict.values())][0] # Iteration batch_list2 = batch_list.copy() batch_list2.remove(drop_feature) new_data_df = f_dict[drop_feature] new_pvalues = p_dict[drop_feature] new_dat = new_data_df.T if drop: new_dat = new_data_df.T[new_pvalues > .05] # Dropping every iteration c = 1 while len(batch_list2) > 0: print('ROUND ' + str(c+1) + ':') p_dict = {} count_dict = {} f_dict = {} c = c+1 for b in range(len(batch_list2)): batch_col = batch_list2[b] print('Harmonizing by ' + batch_col + '...') filepath2 = filepath+'Round '+str(c) + '/' + batch_col+'/' if not os.path.exists(filepath2): os.makedirs(filepath2) # RUN COMBAT # print('ComBat with Raw Data...') output = nC.neuroCombat(new_dat, covars, batch_col, continuous_cols=continuous_cols, categorical_cols=categorical_cols)['data'] output_df = pd.DataFrame.from_records(output.T) output_df.columns = new_dat.T.columns f_dict[batch_col] = output_df if plotting: combat_histograms(new_dat.T, output_df, covars, covars, batch_col, filepath2) if write_p: p_values = combat_kstest(new_dat.T, output_df, covars, covars, batch_col, write=True, filepath=filepath2) else: p_values = combat_kstest(new_dat.T, output_df, covars, covars, batch_col) p_values.index = output_df.columns p_dict[batch_col] = p_values['ComBat'] count_dict[batch_col] = len(p_values[p_values['ComBat'] < .05]) drop_feature = [key for key, value in count_dict.items() if value == min(count_dict.values())][0] new_data_df = f_dict[drop_feature] new_pvalues = p_dict[drop_feature] if drop: new_dat = new_data_df.T[new_pvalues > .05] # Iteration + Dropping else: new_dat = new_data_df.T batch_list2.remove(drop_feature) output_df = pd.DataFrame.from_records(new_dat.T) output_df.columns = new_dat.T.columns return output_df def combat_kstest(data, output, covars1, covars2, batch_col, filepath='', write=False): """ Calculating KS test for differences in distribution due to batch effect before and after harmonization *Note that this is differs from the version in GMMComBat only by file destination naming Arguments --------- data : DataFrame of original data with shape (samples, features) output: DataFrame of harmonized data with shape (samples, features) covars1 : DataFrame with shape (samples, covariates) corresponding to original data covars2 : DataFrame with shape (samples, covariates) corresponding to harmonized data batch_col : string indicating batch/imaging parameter name in covars filepath : write destination for ks p-value DataFrame if write is True write: Boolean, set to True to save ks p-value DataFrame Returns ------- p_df : DataFrame with two colums corresponding to KS test p-value testing for significant differences in distribution attributable to the batch effect specified by batch_col """ data_keys = data.keys() batch_var1 = covars1[batch_col] batch_var2 = covars2[batch_col] data_0 = data[batch_var1 == 0] data_1 = data[batch_var1 == 1] output_0 = output[batch_var2 == 0] output_1 = output[batch_var2 == 1] # KS Test (more generalized differences in distribution) p_before = [] p_after = [] for m in range(0, data.shape[1]): p_value1 = ks_2samp(data_0.iloc[:, m], data_1.iloc[:, m]) p_value2 = ks_2samp(output_0.iloc[:, m], output_1.iloc[:, m]) p_before.append(p_value1.pvalue) p_after.append(p_value2.pvalue) p_df = pd.DataFrame({'Raw': p_before, 'ComBat': p_after}) if write: p_df = pd.DataFrame({'Raw': p_before, 'ComBat': p_after}) p_df.index = data_keys p_df.to_csv(filepath + '_' + batch_col + '_feature_ks_values.csv') return p_df def combat_histograms(data, output, covars1, covars2, batch_col, filepath): """ Plots kernel density plots separated by batch effect groups and before vs. after ComBat harmonization Arguments --------- data : DataFrame of original data with shape (samples, features) output: DataFrame of harmonized data with shape (samples, features) covars1 : DataFrame with shape (samples, covariates) corresponding to original data covars2 : DataFrame with shape (samples, covariates) corresponding to harmonized data batch_col : string indicating batch/imaging parameter name in covars filepath : write destination for kernel density plots """ print('Plotting histograms...') data_keys = data.keys() batch_var1 = covars1[batch_col] batch_var2 = covars2[batch_col] data_0 = data[batch_var1 == 0] data_1 = data[batch_var1 == 1] output_0 = output[batch_var2 == 0] output_1 = output[batch_var2 == 1] for k in range(0, data.shape[1]): plt.figure() data_0.iloc[:, k].plot.kde() data_1.iloc[:, k].plot.kde() output_0.iloc[:, k].plot.kde() output_1.iloc[:, k].plot.kde() plt.xlabel(data_keys[k]) leg = ["0", "1", "0_ComBat", "1_ComBat"] plt.legend(leg, loc='upper right') plt.rcParams.update({'font.size': 12}) filename = filepath + batch_col + '_' + 'histogram_' + data_keys[k] + ".png" plt.savefig(filename, bbox_inches='tight') plt.close() def feature_kstest_histograms(dat, covars, batch_list, filepath): """ Plots kernel density plots and computes KS test p-values separated by batch effect groups for a dataset (intended to assess differences in distribution to all batch effects in batch_list following harmonization with NestedComBat *Note that this is differs from the version in GMMComBat only by file destination naming Arguments --------- data : DataFrame of original data with shape (samples, features) output: DataFrame of harmonized data with shape (samples, features) covars : DataFrame with shape (samples, covariates) corresponding to original data. All variables should be label- encoded (i.e. strings converted to integer designations) batch_list : list of strings indicating batch effect column names within covars (i.e. ['Manufacturer', 'CE'...]) filepath : write destination for kernel density plots """ print('Plotting final feature histograms...') p_df = pd.DataFrame() for batch_col in batch_list: p = [] split_col = covars[batch_col] filepath2 = filepath + 'feature_histograms/' + batch_col + '/' if not os.path.exists(filepath2): os.makedirs(filepath2) for feature in dat: plt.figure() dat[feature][split_col == 0].plot.kde() dat[feature][split_col == 1].plot.kde() plt.xlabel(feature) filename = filepath2 + feature + '.png' plt.savefig(filename, bbox_inches='tight') plt.close() p_value = ks_2samp(dat[feature][split_col == 0], dat[feature][split_col == 1]) p.append(p_value.pvalue) p_df[batch_col] = p p_df.index = dat.keys() p_df.to_csv(filepath + 'final_nested_ks_values.csv')
py
1a591f9b0847f59ba85be9b6301969e0673b8023
import subprocess import re # ************************************************ # remove_custom_emoji # 絵文字IDは読み上げない # ************************************************ def remove_custom_emoji(text): #pattern = r'<:[a-zA-Z0-9_]+:[0-9]+>' # カスタム絵文字のパターン pattern = r'<:' # カスタム絵文字のパターン text = re.sub(pattern,'',text) # 置換処理 pattern = r':[0-9]+>' # カスタム絵文字のパターン return re.sub(pattern,'',text) # 置換処理 # ************************************************ # exclude_url # URLなら省略 # ************************************************ def exclude_url(text): pattern = "https?://[\w/:%#\$&\?\(\)~\.=\+\-]+" return re.sub(pattern,'URL',text) # 置換処理 # ************************************************ # remove_picture # 画像ファイルなら読み上げない # ************************************************ def remove_picture(text): pattern = r'.*(\.jpg|\.jpeg|\.gif|\.png|\.bmp)' return re.sub(pattern,'画像',text) # 置換処理 # ************************************************ # remove_command # コマンドは読み上げない # ************************************************ def remove_command(text): pattern = r'^\!.*' return re.sub(pattern,'',text) # 置換処理 # ************************************************ # remove_log # 参加ログは読み上げない # ************************************************ def remove_log(text): pattern = r'(\【VC参加ログ\】.*)' return re.sub(pattern,'',text) # 置換処理 # ************************************************ # user_custam # ユーザ登録した文字を読み替える # ************************************************ def user_custam(text): f = open('user_dictionary.txt', 'r') line = f.readline() while line: pattern = line.strip().split(',') if pattern[0] in text: text = text.replace(pattern[0], pattern[1]) print('置換後のtext:'+text) break else: line = f.readline() f.close() return text # ************************************************ # creat_WAV # message.contentをテキストファイルと音声ファイルに書き込む # 引数:inputText # 書き込みファイル:input.txt、output.wav # ************************************************ def creat_WAV(text): text = remove_custom_emoji(text) # 絵文字IDは読み上げない text = exclude_url(text) # URLなら省略 open_jtalk = ['jtalk/bin/open_jtalk'] mech = ['-x', 'jtalk/dic'] htsvoice = ['-m', 'jtalk/mei/mei_normal.htsvoice'] speed = ['-r', '1.0'] outwav = ['-ow', 'output.wav'] cmd = open_jtalk + mech + htsvoice + speed + outwav c = subprocess.Popen(' '.join(cmd), stdin=subprocess.PIPE, shell=True) c.stdin.write(text.encode()) c.stdin.close() c.wait() if __name__ == '__main__': creat_WAV('hello world')
py
1a591feea790c02e5d0f7979a6a861527411dae1
''' The Pool.map and Pool.apply will lock the main program until all a process is finished, which is quite useful if we want to obtain resuls in a particular order for certain applications. In contrast, the async variants will submit all processes at once and retrieve the results as soon as they are finished. One more difference is that we need to use the get method after the apply_async() call in order to obtain the return values of the finished processes. ''' from multiprocessing import Pool, current_process, cpu_count def f(x): return x * 2 def start_process(): print('Starting', current_process().name) if __name__ == '__main__': inputs = list(range(10)) print('Input :', inputs) builtin_outputs = list(map(f, inputs)) print('Built-in:', builtin_outputs) #number of threads to use pool_size = cpu_count() print("Number of processes:", pool_size) pool = Pool(processes=pool_size, initializer=start_process) #using pool.map() pool_outputs = pool.map(f, inputs) #using pool.apply() #pool_outputs = [pool.apply(f, args=(x,)) for x in inputs] #using pool.apply_async() #pool_outputs = [pool.apply_async(f, args=(x,)) for x in inputs] #need to use p.get() #pool_outputs = [p.get() for p in pool_outputs] pool.close() # no more tasks pool.join() # wrap up current tasks print('Pool:', pool_outputs)
py
1a592054ba2da445f7f83360c1a02aa890893669
#!/usr/bin/python # -*- coding: utf-8 -*- class Contest: def __init__(self): self.name = None self.url = None self.start_time = None self.end_time = None self.problems = None def getProblems(self, ): pass def refreshContest(self, ): pass
py
1a59207735b7d2f7f32821d82f09140d5ad55a2a
from operator import attrgetter import pyangbind.lib.xpathhelper as xpathhelper from pyangbind.lib.yangtypes import RestrictedPrecisionDecimalType, RestrictedClassType, TypedListType from pyangbind.lib.yangtypes import YANGBool, YANGListType, YANGDynClass, ReferenceType from pyangbind.lib.base import PybindBase from decimal import Decimal from bitarray import bitarray import __builtin__ class dad(PybindBase): """ This class was auto-generated by the PythonClass plugin for PYANG from YANG module brocade-common-def - based on the path /ipv6/ipv6-global-cmds/nd-global/dad. Each member element of the container is represented as a class variable - with a specific YANG type. """ __slots__ = ('_pybind_generated_by', '_path_helper', '_yang_name', '_rest_name', '_extmethods', '__global_dad_time',) _yang_name = 'dad' _rest_name = 'dad' _pybind_generated_by = 'container' def __init__(self, *args, **kwargs): path_helper_ = kwargs.pop("path_helper", None) if path_helper_ is False: self._path_helper = False elif path_helper_ is not None and isinstance(path_helper_, xpathhelper.YANGPathHelper): self._path_helper = path_helper_ elif hasattr(self, "_parent"): path_helper_ = getattr(self._parent, "_path_helper", False) self._path_helper = path_helper_ else: self._path_helper = False extmethods = kwargs.pop("extmethods", None) if extmethods is False: self._extmethods = False elif extmethods is not None and isinstance(extmethods, dict): self._extmethods = extmethods elif hasattr(self, "_parent"): extmethods = getattr(self._parent, "_extmethods", None) self._extmethods = extmethods else: self._extmethods = False self.__global_dad_time = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="global-dad-time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection', u'alt-name': u'time'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True) load = kwargs.pop("load", None) if args: if len(args) > 1: raise TypeError("cannot create a YANG container with >1 argument") all_attr = True for e in self._pyangbind_elements: if not hasattr(args[0], e): all_attr = False break if not all_attr: raise ValueError("Supplied object did not have the correct attributes") for e in self._pyangbind_elements: nobj = getattr(args[0], e) if nobj._changed() is False: continue setmethod = getattr(self, "_set_%s" % e) if load is None: setmethod(getattr(args[0], e)) else: setmethod(getattr(args[0], e), load=load) def _path(self): if hasattr(self, "_parent"): return self._parent._path()+[self._yang_name] else: return [u'ipv6', u'ipv6-global-cmds', u'nd-global', u'dad'] def _rest_path(self): if hasattr(self, "_parent"): if self._rest_name: return self._parent._rest_path()+[self._rest_name] else: return self._parent._rest_path() else: return [u'ipv6', u'nd', u'dad'] def _get_global_dad_time(self): """ Getter method for global_dad_time, mapped from YANG variable /ipv6/ipv6_global_cmds/nd_global/dad/global_dad_time (common-def:time-interval-sec) """ return self.__global_dad_time def _set_global_dad_time(self, v, load=False): """ Setter method for global_dad_time, mapped from YANG variable /ipv6/ipv6_global_cmds/nd_global/dad/global_dad_time (common-def:time-interval-sec) If this variable is read-only (config: false) in the source YANG file, then _set_global_dad_time is considered as a private method. Backends looking to populate this variable should do so via calling thisObj._set_global_dad_time() directly. """ if hasattr(v, "_utype"): v = v._utype(v) try: t = YANGDynClass(v,base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="global-dad-time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection', u'alt-name': u'time'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True) except (TypeError, ValueError): raise ValueError({ 'error-string': """global_dad_time must be of a type compatible with common-def:time-interval-sec""", 'defined-type': "common-def:time-interval-sec", 'generated-type': """YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="global-dad-time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection', u'alt-name': u'time'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True)""", }) self.__global_dad_time = t if hasattr(self, '_set'): self._set() def _unset_global_dad_time(self): self.__global_dad_time = YANGDynClass(base=RestrictedClassType(base_type=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32), restriction_dict={'range': [u'1..5']}), default=RestrictedClassType(base_type=long, restriction_dict={'range': ['0..4294967295']}, int_size=32)(1), is_leaf=True, yang_name="global-dad-time", rest_name="time", parent=self, path_helper=self._path_helper, extmethods=self._extmethods, register_paths=True, extensions={u'tailf-common': {u'cli-full-command': None, u'info': u'retransmit time interval for Neighbor solicitations, sent as part of duplicate address detection', u'alt-name': u'time'}}, namespace='urn:brocade.com:mgmt:brocade-ipv6-nd-ra', defining_module='brocade-ipv6-nd-ra', yang_type='common-def:time-interval-sec', is_config=True) global_dad_time = __builtin__.property(_get_global_dad_time, _set_global_dad_time) _pyangbind_elements = {'global_dad_time': global_dad_time, }
py
1a5921868ea32ce6551069e612ee1686eb9fee7f
from django.shortcuts import render def main_view(request): return render(request, 'main.html')
py
1a5923b7a6317a536cca57045ff8e73a40894fce
import math import scipy import numpy as np from scipy.fftpack import fft, dct arr = np.array([10., 50., 100., 150., 200., 250., 100., 0]) ##arr = np.array([255., 0., 0., 255., 255., 0., 0., 255]) ##arr = np.array([155., 0., 0., 155., 155., 0., 0., 155]) ##arr = np.array([155., 154., 153., 152., 151., 150., 149., 148]) ##print dct(arr, 2) factor = [] factor.append(16) factor.append(15) factor.append(14) factor.append(13) factor.append(11) factor.append(9) factor.append(6) factor.append(3) ##bitWidth = 4 ## ##factor_space = np.linspace(0,7,8) ## ##for a in factor_space: ## temp = int (round(float(math.cos(a * math.pi /16)*(2**bitWidth)))) ## factor.append(temp) ## print temp factor_arr = np.array([[factor[4],factor[4],factor[4],factor[4],factor[4],factor[4],factor[4],factor[4]], [factor[1],factor[3],factor[5],factor[7],-factor[7],-factor[5],-factor[3],-factor[1]], [factor[2],factor[6],-factor[6],-factor[2],-factor[2],-factor[6],factor[6],factor[2]], [factor[3],-factor[7],-factor[1],-factor[5],factor[5],factor[1],factor[7],-factor[3]], [factor[4],-factor[4],-factor[4],factor[4],factor[4],-factor[4],-factor[4],factor[4]], [factor[5],-factor[1],factor[7],factor[3],-factor[3],-factor[7],factor[1],-factor[5]], [factor[6],-factor[2],factor[2],-factor[6],-factor[6],factor[2],-factor[2],factor[6]], [factor[7],-factor[5],factor[3],-factor[1],factor[1],-factor[3],factor[5],-factor[7]]]) result_arr = np.dot(factor_arr,arr) print result_arr
py
1a5923b9055e70b5ca7b52b07e23eed5aa06df41
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is regenerated. # -------------------------------------------------------------------------- import functools from typing import TYPE_CHECKING import warnings from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error from azure.core.paging import ItemPaged from azure.core.pipeline import PipelineResponse from azure.core.pipeline.transport import HttpResponse from azure.core.polling import LROPoller, NoPolling, PollingMethod from azure.core.polling.base_polling import LROBasePolling from azure.core.rest import HttpRequest from azure.core.tracing.decorator import distributed_trace from msrest import Serializer from .. import models as _models from .._vendor import _convert_request, _format_url_section if TYPE_CHECKING: # pylint: disable=unused-import,ungrouped-imports from typing import Any, Callable, Dict, Generic, Iterable, Optional, TypeVar, Union T = TypeVar('T') ClsType = Optional[Callable[[PipelineResponse[HttpRequest, HttpResponse], T, Dict[str, Any]], Any]] _SERIALIZER = Serializer() # fmt: off def build_get_sql_scripts_by_workspace_request( **kwargs # type: Any ): # type: (...) -> HttpRequest api_version = kwargs.pop('api_version', "2020-12-01") # type: str accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/sqlScripts') # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_create_or_update_sql_script_request_initial( sql_script_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest api_version = kwargs.pop('api_version', "2020-12-01") # type: str content_type = kwargs.pop('content_type', None) # type: Optional[str] if_match = kwargs.pop('if_match', None) # type: Optional[str] accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/sqlScripts/{sqlScriptName}') path_format_arguments = { "sqlScriptName": _SERIALIZER.url("sql_script_name", sql_script_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if if_match is not None: header_parameters['If-Match'] = _SERIALIZER.header("if_match", if_match, 'str') if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="PUT", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_get_sql_script_request( sql_script_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest api_version = kwargs.pop('api_version', "2020-12-01") # type: str if_none_match = kwargs.pop('if_none_match', None) # type: Optional[str] accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/sqlScripts/{sqlScriptName}') path_format_arguments = { "sqlScriptName": _SERIALIZER.url("sql_script_name", sql_script_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if if_none_match is not None: header_parameters['If-None-Match'] = _SERIALIZER.header("if_none_match", if_none_match, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="GET", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_delete_sql_script_request_initial( sql_script_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest api_version = kwargs.pop('api_version', "2020-12-01") # type: str accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/sqlScripts/{sqlScriptName}') path_format_arguments = { "sqlScriptName": _SERIALIZER.url("sql_script_name", sql_script_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="DELETE", url=url, params=query_parameters, headers=header_parameters, **kwargs ) def build_rename_sql_script_request_initial( sql_script_name, # type: str **kwargs # type: Any ): # type: (...) -> HttpRequest api_version = kwargs.pop('api_version', "2020-12-01") # type: str content_type = kwargs.pop('content_type', None) # type: Optional[str] accept = "application/json" # Construct URL url = kwargs.pop("template_url", '/sqlScripts/{sqlScriptName}/rename') path_format_arguments = { "sqlScriptName": _SERIALIZER.url("sql_script_name", sql_script_name, 'str'), } url = _format_url_section(url, **path_format_arguments) # Construct parameters query_parameters = kwargs.pop("params", {}) # type: Dict[str, Any] query_parameters['api-version'] = _SERIALIZER.query("api_version", api_version, 'str') # Construct headers header_parameters = kwargs.pop("headers", {}) # type: Dict[str, Any] if content_type is not None: header_parameters['Content-Type'] = _SERIALIZER.header("content_type", content_type, 'str') header_parameters['Accept'] = _SERIALIZER.header("accept", accept, 'str') return HttpRequest( method="POST", url=url, params=query_parameters, headers=header_parameters, **kwargs ) # fmt: on class SqlScriptOperations(object): """SqlScriptOperations operations. You should not instantiate this class directly. Instead, you should create a Client instance that instantiates it for you and attaches it as an attribute. :ivar models: Alias to model classes used in this operation group. :type models: ~azure.synapse.artifacts.models :param client: Client for service requests. :param config: Configuration of service client. :param serializer: An object model serializer. :param deserializer: An object model deserializer. """ models = _models def __init__(self, client, config, serializer, deserializer): self._client = client self._serialize = serializer self._deserialize = deserializer self._config = config @distributed_trace def get_sql_scripts_by_workspace( self, **kwargs # type: Any ): # type: (...) -> Iterable["_models.SqlScriptsListResponse"] """Lists sql scripts. :keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword callable cls: A custom type or function that will be passed the direct response :return: An iterator like instance of either SqlScriptsListResponse or the result of cls(response) :rtype: ~azure.core.paging.ItemPaged[~azure.synapse.artifacts.models.SqlScriptsListResponse] :raises: ~azure.core.exceptions.HttpResponseError """ api_version = kwargs.pop('api_version', "2020-12-01") # type: str cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlScriptsListResponse"] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) def prepare_request(next_link=None): if not next_link: request = build_get_sql_scripts_by_workspace_request( api_version=api_version, template_url=self.get_sql_scripts_by_workspace.metadata['url'], ) request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) else: request = build_get_sql_scripts_by_workspace_request( api_version=api_version, template_url=next_link, ) request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } request.method = "GET" return request def extract_data(pipeline_response): deserialized = self._deserialize("SqlScriptsListResponse", pipeline_response) list_of_elem = deserialized.value if cls: list_of_elem = cls(list_of_elem) return deserialized.next_link or None, iter(list_of_elem) def get_next(next_link=None): request = prepare_request(next_link) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response) raise HttpResponseError(response=response, model=error) return pipeline_response return ItemPaged( get_next, extract_data ) get_sql_scripts_by_workspace.metadata = {'url': '/sqlScripts'} # type: ignore def _create_or_update_sql_script_initial( self, sql_script_name, # type: str sql_script, # type: "_models.SqlScriptResource" if_match=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> Optional["_models.SqlScriptResource"] cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SqlScriptResource"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', "2020-12-01") # type: str content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] json = self._serialize.body(sql_script, 'SqlScriptResource') request = build_create_or_update_sql_script_request_initial( sql_script_name=sql_script_name, api_version=api_version, content_type=content_type, json=json, if_match=if_match, template_url=self._create_or_update_sql_script_initial.metadata['url'], ) request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) deserialized = None if response.status_code == 200: deserialized = self._deserialize('SqlScriptResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized _create_or_update_sql_script_initial.metadata = {'url': '/sqlScripts/{sqlScriptName}'} # type: ignore @distributed_trace def begin_create_or_update_sql_script( self, sql_script_name, # type: str sql_script, # type: "_models.SqlScriptResource" if_match=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> LROPoller["_models.SqlScriptResource"] """Creates or updates a Sql Script. :param sql_script_name: The sql script name. :type sql_script_name: str :param sql_script: Sql Script resource definition. :type sql_script: ~azure.synapse.artifacts.models.SqlScriptResource :param if_match: ETag of the SQL script entity. Should only be specified for update, for which it should match existing entity or can be * for unconditional update. :type if_match: str :keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be LROBasePolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either SqlScriptResource or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[~azure.synapse.artifacts.models.SqlScriptResource] :raises: ~azure.core.exceptions.HttpResponseError """ api_version = kwargs.pop('api_version', "2020-12-01") # type: str content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType["_models.SqlScriptResource"] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._create_or_update_sql_script_initial( sql_script_name=sql_script_name, sql_script=sql_script, if_match=if_match, api_version=api_version, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): response = pipeline_response.http_response deserialized = self._deserialize('SqlScriptResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_create_or_update_sql_script.metadata = {'url': '/sqlScripts/{sqlScriptName}'} # type: ignore @distributed_trace def get_sql_script( self, sql_script_name, # type: str if_none_match=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> Optional["_models.SqlScriptResource"] """Gets a sql script. :param sql_script_name: The sql script name. :type sql_script_name: str :param if_none_match: ETag of the sql compute entity. Should only be specified for get. If the ETag matches the existing entity tag, or if * was provided, then no content will be returned. :type if_none_match: str :keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword callable cls: A custom type or function that will be passed the direct response :return: SqlScriptResource, or the result of cls(response) :rtype: ~azure.synapse.artifacts.models.SqlScriptResource or None :raises: ~azure.core.exceptions.HttpResponseError """ cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.SqlScriptResource"]] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', "2020-12-01") # type: str request = build_get_sql_script_request( sql_script_name=sql_script_name, api_version=api_version, if_none_match=if_none_match, template_url=self.get_sql_script.metadata['url'], ) request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 304]: map_error(status_code=response.status_code, response=response, error_map=error_map) error = self._deserialize.failsafe_deserialize(_models.CloudErrorAutoGenerated, pipeline_response) raise HttpResponseError(response=response, model=error) deserialized = None if response.status_code == 200: deserialized = self._deserialize('SqlScriptResource', pipeline_response) if cls: return cls(pipeline_response, deserialized, {}) return deserialized get_sql_script.metadata = {'url': '/sqlScripts/{sqlScriptName}'} # type: ignore def _delete_sql_script_initial( self, sql_script_name, # type: str **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', "2020-12-01") # type: str request = build_delete_sql_script_request_initial( sql_script_name=sql_script_name, api_version=api_version, template_url=self._delete_sql_script_initial.metadata['url'], ) request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202, 204]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) _delete_sql_script_initial.metadata = {'url': '/sqlScripts/{sqlScriptName}'} # type: ignore @distributed_trace def begin_delete_sql_script( self, sql_script_name, # type: str **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Deletes a Sql Script. :param sql_script_name: The sql script name. :type sql_script_name: str :keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be LROBasePolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ api_version = kwargs.pop('api_version', "2020-12-01") # type: str polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._delete_sql_script_initial( sql_script_name=sql_script_name, api_version=api_version, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_delete_sql_script.metadata = {'url': '/sqlScripts/{sqlScriptName}'} # type: ignore def _rename_sql_script_initial( self, sql_script_name, # type: str new_name=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> None cls = kwargs.pop('cls', None) # type: ClsType[None] error_map = { 401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError } error_map.update(kwargs.pop('error_map', {})) api_version = kwargs.pop('api_version', "2020-12-01") # type: str content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] _request = _models.ArtifactRenameRequest(new_name=new_name) json = self._serialize.body(_request, 'ArtifactRenameRequest') request = build_rename_sql_script_request_initial( sql_script_name=sql_script_name, api_version=api_version, content_type=content_type, json=json, template_url=self._rename_sql_script_initial.metadata['url'], ) request = _convert_request(request) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } request.url = self._client.format_url(request.url, **path_format_arguments) pipeline_response = self._client._pipeline.run(request, stream=False, **kwargs) response = pipeline_response.http_response if response.status_code not in [200, 202]: map_error(status_code=response.status_code, response=response, error_map=error_map) raise HttpResponseError(response=response) if cls: return cls(pipeline_response, None, {}) _rename_sql_script_initial.metadata = {'url': '/sqlScripts/{sqlScriptName}/rename'} # type: ignore @distributed_trace def begin_rename_sql_script( self, sql_script_name, # type: str new_name=None, # type: Optional[str] **kwargs # type: Any ): # type: (...) -> LROPoller[None] """Renames a sqlScript. :param sql_script_name: The sql script name. :type sql_script_name: str :param new_name: New name of the artifact. :type new_name: str :keyword api_version: Api Version. The default value is "2020-12-01". Note that overriding this default value may result in unsupported behavior. :paramtype api_version: str :keyword callable cls: A custom type or function that will be passed the direct response :keyword str continuation_token: A continuation token to restart a poller from a saved state. :keyword polling: By default, your polling method will be LROBasePolling. Pass in False for this operation to not poll, or pass in your own initialized polling object for a personal polling strategy. :paramtype polling: bool or ~azure.core.polling.PollingMethod :keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present. :return: An instance of LROPoller that returns either None or the result of cls(response) :rtype: ~azure.core.polling.LROPoller[None] :raises: ~azure.core.exceptions.HttpResponseError """ api_version = kwargs.pop('api_version', "2020-12-01") # type: str content_type = kwargs.pop('content_type', "application/json") # type: Optional[str] polling = kwargs.pop('polling', True) # type: Union[bool, azure.core.polling.PollingMethod] cls = kwargs.pop('cls', None) # type: ClsType[None] lro_delay = kwargs.pop( 'polling_interval', self._config.polling_interval ) cont_token = kwargs.pop('continuation_token', None) # type: Optional[str] if cont_token is None: raw_result = self._rename_sql_script_initial( sql_script_name=sql_script_name, new_name=new_name, api_version=api_version, content_type=content_type, cls=lambda x,y,z: x, **kwargs ) kwargs.pop('error_map', None) def get_long_running_output(pipeline_response): if cls: return cls(pipeline_response, None, {}) path_format_arguments = { "endpoint": self._serialize.url("self._config.endpoint", self._config.endpoint, 'str', skip_quote=True), } if polling is True: polling_method = LROBasePolling(lro_delay, path_format_arguments=path_format_arguments, **kwargs) elif polling is False: polling_method = NoPolling() else: polling_method = polling if cont_token: return LROPoller.from_continuation_token( polling_method=polling_method, continuation_token=cont_token, client=self._client, deserialization_callback=get_long_running_output ) else: return LROPoller(self._client, raw_result, get_long_running_output, polling_method) begin_rename_sql_script.metadata = {'url': '/sqlScripts/{sqlScriptName}/rename'} # type: ignore
py
1a59241e29fd6b2976693ed13ea2848bde3592e4
import numpy as np import cv2 as cv ''' Based on GeeksForGeeks article with an opticalFLow demonstration ''' def opticalFlow(img1_path,img2_path,blur=False,maxValue=True): ''' Optical flow between two images inputs: two consecutive images, blur: Boolean - adds blurring to the image for filtering out small particles maxValue: Boolean - crates vector by means of the maximum value within the pellet bounding box. Average vector otherwise. ''' framet1 = cv.imread(img1_path) framet2 = cv.imread(img2_path) if(blur): framet1 = cv.GaussianBlur(framet1, (3, 31), 0) framet2 = cv.GaussianBlur(framet2, (3, 31), 0) grayt1 = cv.cvtColor(framet1, cv.COLOR_BGR2GRAY) grayt2 = cv.cvtColor(framet2, cv.COLOR_BGR2GRAY) flow = cv.calcOpticalFlowFarneback(grayt1, grayt2, None, 0.5, 3, 15, 3, 5, 1.2, 0) magnitude, angle = cv.cartToPolar(flow[..., 0], flow[..., 1]) mask = np.zeros_like(framet1) mask[..., 1] = 255 # Sets image hue according to the optical flow # direction mask[..., 0] = angle * 180 / np.pi / 2 # Sets image value according to the optical flow # magnitude (normalized) mask[..., 2] = cv.normalize(magnitude, None, 0, 255, cv.NORM_MINMAX) # Converts HSV to RGB (BGR) color representation rgb = cv.cvtColor(mask, cv.COLOR_HSV2BGR) x0 = 973 x1 = 1021 y0 = 300 y1 = 350 drawBbox(framet1,x0,x1,y0,y1) if(maxValue): d = (np.max(flow[973:1021,300:350][0][:,0],axis=0),np.max(flow[973:1021,300:350][0][:,1],axis=0)) else: d = np.average(flow[973:1021,300:350][0],axis=0) centerPoint = ((1000,325)) endPoint = (int(centerPoint[0]+d[0]),int(centerPoint[1]+d[1])) framet1 = framet1+rgb cv.arrowedLine(framet1,centerPoint,endPoint,(0,0,255) ,2) displayImage(framet1) return flow def displayImage(frame,description="input, press 0 to exit"): cv.imshow(description, frame) cv.waitKey(0) def drawBbox(frame,bbox, color = 100, linewidth=3): cv.line(frame, (bbox.x0,bbox.y0), (bbox.x0,bbox.y1), color, linewidth) cv.line(frame, (bbox.x0,bbox.y1), (bbox.x1,bbox.y1), color, linewidth) cv.line(frame, (bbox.x1,bbox.y0), (bbox.x1,bbox.y1), color, linewidth) cv.line(frame, (bbox.x0,bbox.y0), (bbox.x1,bbox.y0), color, linewidth) return frame def drawVectfromBbox(frame,startPoint,endPoint): cv.arrowedLine(frame,startPoint,endPoint,(0,0,255) ,2) return frame def euclidianDistance(p1,p2): return np.sqrt((int(p1[0])-int(p2[0]))**2+(int(p1[1])-int(p2[1]))**2) #opticalFlow('img0371.jpg','img0372.jpg', blur = False)
py
1a5924449b3265e71e8aa8b0333f9e2c61510f56
import numpy as np from astropy import units as u from astropy.wcs import WCSSUB_CELESTIAL, WCSSUB_SPECTRAL def get_spatial_scale(wcs, assert_square=True): # Code adapted from APLpy wcs = wcs.sub([WCSSUB_CELESTIAL]) cdelt = np.matrix(wcs.wcs.get_cdelt()) pc = np.matrix(wcs.wcs.get_pc()) scale = np.array(cdelt * pc) if assert_square: try: np.testing.assert_almost_equal(abs(cdelt[0,0]), abs(cdelt[0,1])) np.testing.assert_almost_equal(abs(pc[0,0]), abs(pc[1,1])) np.testing.assert_almost_equal(abs(scale[0,0]), abs(scale[0,1])) except AssertionError: raise ValueError("Non-square pixels. Please resample data.") return abs(scale[0,0]) * u.Unit(wcs.wcs.cunit[0]) def get_spectral_scale(wcs): # Code adapted from APLpy wcs = wcs.sub([WCSSUB_SPECTRAL]) cdelt = np.matrix(wcs.wcs.get_cdelt()) pc = np.matrix(wcs.wcs.get_pc()) scale = np.array(cdelt * pc) return abs(scale[0,0]) * u.Unit(wcs.wcs.cunit[0]) def sanitize_wcs(mywcs): pc = np.matrix(mywcs.wcs.get_pc()) if (pc[:,2].sum() != pc[2,2] or pc[2,:].sum() != pc[2,2]): raise ValueError("Non-independent 3rd axis.") axtypes = mywcs.get_axis_types() if ((axtypes[0]['coordinate_type'] != 'celestial' or axtypes[1]['coordinate_type'] != 'celestial' or axtypes[2]['coordinate_type'] != 'spectral')): cunit3 = mywcs.wcs.cunit[2] ctype3 = mywcs.wcs.ctype[2] if cunit3 != '': cunit3 = u.Unit(cunit3) if cunit3.is_equivalent(u.m/u.s): mywcs.wcs.ctype[2] = 'VELO' elif cunit3.is_equivalent(u.Hz): mywcs.wcs.ctype[2] = 'FREQ' elif cunit3.is_equivalent(u.m): mywcs.wcs.ctype[2] = 'WAVE' else: raise ValueError("Could not determine type of 3rd axis.") elif ctype3 != '': if 'VELO' in ctype3: mywcs.wcs.ctype[2] = 'VELO' elif 'FELO' in ctype3: mywcs.wcs.ctype[2] = 'VELO-F2V' elif 'FREQ' in ctype3: mywcs.wcs.ctype[2] = 'FREQ' elif 'WAVE' in ctype3: mywcs.wcs.ctype[2] = 'WAVE' else: raise ValueError("Could not determine type of 3rd axis.") else: raise ValueError("Cube axes not in expected orientation: PPV") return mywcs def get_wcs_system_frame(wcs): """TODO: move to astropy.wcs.utils""" ct = wcs.sub([WCSSUB_CELESTIAL]).wcs.ctype if 'GLON' in ct[0]: from astropy.coordinates import Galactic return Galactic elif 'RA' in ct[0]: from astropy.coordinates import ICRS return ICRS else: raise ValueError("Unrecognized coordinate system")
py
1a5924c2024ca54ed09da2a3185ed70a38c39637
# Standard Library import uuid # Third Party from tests.analysis.utils import generate_data # First Party from smdebug.analysis.utils import no_refresh from smdebug.exceptions import StepNotYetAvailable from smdebug.rules import Rule from smdebug.trials import create_trial def test_no_refresh_invocation(): class TestRule(Rule): def __init__(self, base_trial): super().__init__(base_trial=base_trial, action_str="") def set_required_tensors(self, step): for t in self.base_trial.tensor_names(): self.req_tensors.add(t, steps=[step]) def invoke_at_step(self, step): for t in self.req_tensors.get(): if step == 0: assert t.value(step + 1) is not None elif step == 1: try: t.value(step + 1) assert False except StepNotYetAvailable: pass run_id = str(uuid.uuid4()) base_path = "ts_output/rule_no_refresh/" path = base_path + run_id num_tensors = 3 generate_data( path=base_path, trial=run_id, num_tensors=num_tensors, step=0, tname_prefix="foo", worker="algo-1", shape=(3, 3, 3), ) generate_data( path=base_path, trial=run_id, num_tensors=num_tensors, step=1, tname_prefix="foo", worker="algo-1", shape=(3, 3, 3), ) tr = create_trial(path) r = TestRule(tr) r.invoke(0) r.invoke(1) generate_data( path=base_path, trial=run_id, num_tensors=num_tensors, step=2, tname_prefix="foo", worker="algo-1", shape=(3, 3, 3), ) # will not see step2 data with no_refresh(tr): r.invoke_at_step(1) # below will refresh try: r.invoke(1) assert False except AssertionError: pass
py
1a5924e19dda4a1315f067364fc0d2d1049129c6
"""Utilities for processing test outputs.""" import pathlib import re import subprocess from pathlib import Path from typing import Iterator, Tuple def split_string( string: str, sub_length: int = 40, copy: bool = False ) -> Tuple[str, ...]: """Split a string into subsections less than or equal to new length. Args: string (str): The long string to split up. sub_length (int): The maximum length of the subsections. Defaults to 56. copy (bool): Copy output to clipboard. Returns: Tuple[str]: The string split into sections. """ string_length = len(string) split = tuple( string[begin : begin + sub_length] for begin in range(0, string_length, sub_length) ) if copy is True: string = str(split) copy_string(string) return split def copy_string(string: str) -> None: """Copies the string to clipboard. Uses pbcopy, so for now only works with macOS. """ subprocess.run("/usr/bin/pbcopy", text=True, input=string) # noqa: S603 def write_output(string: str, test_name: str, replace_links: bool = True) -> None: """Write the output to the expected output's file.""" if replace_links: tempfile_link_pattern = re.compile( r"(?P<prefix>file://)" r"(?P<core_tempfile_link>[\w\s/\\]*)" r"(?P<suffix>\d+\.\w+)" ) string = tempfile_link_pattern.sub( lambda match: f"{match.group('prefix')}" "{{ tempfile_path }}" f"{match.group('suffix')}", string=string, ) output_directory = pathlib.Path(__file__).parent.parent / pathlib.Path( "unit", "expected_outputs" ) expected_output_file = output_directory / pathlib.Path(test_name).with_suffix( ".txt" ) expected_output_file.write_text(string) def _get_all_expected_output_paths() -> Iterator[Path]: """Get the paths of all the expected output files.""" file_dir = pathlib.Path(__file__).parent expected_outputs = ( file_dir.parent / pathlib.Path("unit", "expected_outputs") ).glob("**/*.txt") yield from expected_outputs def replace_expected_section(old: str, new: str) -> None: """Replace all occurrences of a section in the expected output.""" expected_output_paths = _get_all_expected_output_paths() for expected_output_path in expected_output_paths: old_text = expected_output_path.read_text() new_text = old_text.replace(old, new) expected_output_path.write_text(new_text)
py
1a5924e6067593ef52dc6f3579f5b229420b417a
"Camera based door sensor"
py
1a5924f1468498731ad231c4dbf170c6cb5fc639
# coding: utf-8 from __future__ import unicode_literals import re from .common import InfoExtractor from ..utils import int_or_none class TumblrIE(InfoExtractor): _VALID_URL = r'https?://(?P<blog_name>[^/?#&]+)\.tumblr\.com/(?:post|video)/(?P<id>[0-9]+)(?:$|[/?#])' _TESTS = [{ 'url': 'http://tatianamaslanydaily.tumblr.com/post/54196191430/orphan-black-dvd-extra-behind-the-scenes', 'md5': '479bb068e5b16462f5176a6828829767', 'info_dict': { 'id': '54196191430', 'ext': 'mp4', 'title': 'tatiana maslany news, Orphan Black || DVD extra - behind the scenes ↳...', 'description': 'md5:37db8211e40b50c7c44e95da14f630b7', 'thumbnail': r're:http://.*\.jpg', } }, { 'url': 'http://5sostrum.tumblr.com/post/90208453769/yall-forgetting-the-greatest-keek-of-them-all', 'md5': 'bf348ef8c0ef84fbf1cbd6fa6e000359', 'info_dict': { 'id': '90208453769', 'ext': 'mp4', 'title': '5SOS STRUM ;]', 'description': 'md5:dba62ac8639482759c8eb10ce474586a', 'thumbnail': r're:http://.*\.jpg', } }, { 'url': 'http://hdvideotest.tumblr.com/post/130323439814/test-description-for-my-hd-video', 'md5': '7ae503065ad150122dc3089f8cf1546c', 'info_dict': { 'id': '130323439814', 'ext': 'mp4', 'title': 'HD Video Testing \u2014 Test description for my HD video', 'description': 'md5:97cc3ab5fcd27ee4af6356701541319c', 'thumbnail': r're:http://.*\.jpg', }, 'params': { 'format': 'hd', }, }, { 'url': 'http://naked-yogi.tumblr.com/post/118312946248/naked-smoking-stretching', 'md5': 'de07e5211d60d4f3a2c3df757ea9f6ab', 'info_dict': { 'id': 'Wmur', 'ext': 'mp4', 'title': 'naked smoking & stretching', 'upload_date': '20150506', 'timestamp': 1430931613, 'age_limit': 18, 'uploader_id': '1638622', 'uploader': 'naked-yogi', }, 'add_ie': ['Vidme'], }, { 'url': 'http://camdamage.tumblr.com/post/98846056295/', 'md5': 'a9e0c8371ea1ca306d6554e3fecf50b6', 'info_dict': { 'id': '105463834', 'ext': 'mp4', 'title': 'Cam Damage-HD 720p', 'uploader': 'John Moyer', 'uploader_id': 'user32021558', }, 'add_ie': ['Vimeo'], }, { 'url': 'http://sutiblr.tumblr.com/post/139638707273', 'md5': '2dd184b3669e049ba40563a7d423f95c', 'info_dict': { 'id': 'ir7qBEIKqvq', 'ext': 'mp4', 'title': 'Vine by sutiblr', 'alt_title': 'Vine by sutiblr', 'uploader': 'sutiblr', 'uploader_id': '1198993975374495744', 'upload_date': '20160220', 'like_count': int, 'comment_count': int, 'repost_count': int, }, 'add_ie': ['Vine'], }, { 'url': 'http://vitasidorkina.tumblr.com/post/134652425014/joskriver-victoriassecret-invisibility-or', 'md5': '01c12ceb82cbf6b2fe0703aa56b3ad72', 'info_dict': { 'id': '-7LnUPGlSo', 'ext': 'mp4', 'title': 'Video by victoriassecret', 'description': 'Invisibility or flight…which superpower would YOU choose? #VSFashionShow #ThisOrThat', 'uploader_id': 'victoriassecret', 'thumbnail': r're:^https?://.*\.jpg' }, 'add_ie': ['Instagram'], }] def _real_extract(self, url): m_url = re.match(self._VALID_URL, url) video_id = m_url.group('id') blog = m_url.group('blog_name') url = 'http://%s.tumblr.com/post/%s/' % (blog, video_id) webpage, urlh = self._download_webpage_handle(url, video_id) iframe_url = self._search_regex( r'src=\'(https?://www\.tumblr\.com/video/[^\']+)\'', webpage, 'iframe url', default=None) if iframe_url is None: return self.url_result(urlh.geturl(), 'Generic') iframe = self._download_webpage(iframe_url, video_id, 'Downloading iframe page') duration = None sources = [] sd_url = self._search_regex( r'<source[^>]+src=(["\'])(?P<url>.+?)\1', iframe, 'sd video url', default=None, group='url') if sd_url: sources.append((sd_url, 'sd')) options = self._parse_json( self._search_regex( r'data-crt-options=(["\'])(?P<options>.+?)\1', iframe, 'hd video url', default='', group='options'), video_id, fatal=False) if options: duration = int_or_none(options.get('duration')) hd_url = options.get('hdUrl') if hd_url: sources.append((hd_url, 'hd')) formats = [{ 'url': video_url, 'ext': 'mp4', 'format_id': format_id, 'height': int_or_none(self._search_regex( r'/(\d{3,4})$', video_url, 'height', default=None)), 'quality': quality, } for quality, (video_url, format_id) in enumerate(sources)] self._sort_formats(formats) # The only place where you can get a title, it's not complete, # but searching in other places doesn't work for all videos video_title = self._html_search_regex( r'(?s)<title>(?P<title>.*?)(?: \| Tumblr)?</title>', webpage, 'title') return { 'id': video_id, 'title': video_title, 'description': self._og_search_description(webpage, default=None), 'thumbnail': self._og_search_thumbnail(webpage, default=None), 'duration': duration, 'formats': formats, }
py
1a5925aa29fbedb62600b0e541b20b8e77b0b87d
import re import threading from typing import Any from antlr4 import CommonTokenStream, InputStream, ParserRuleContext from antlr4.error.ErrorListener import ErrorListener from .errors import GrammarParseError # Import from visitor in order to check the presence of generated grammar files # files in a single place. from .grammar_visitor import ( # type: ignore OmegaConfGrammarLexer, OmegaConfGrammarParser, ) # Used to cache grammar objects to avoid re-creating them on each call to `parse()`. # We use a per-thread cache to make it thread-safe. _grammar_cache = threading.local() # Build regex pattern to efficiently identify typical interpolations. # See test `test_match_simple_interpolation_pattern` for examples. _config_key = r"[$\w]+" # foo, $0, $bar, $foo_$bar123$ _key_maybe_brackets = f"{_config_key}|\\[{_config_key}\\]" # foo, [foo], [$bar] _node_access = f"\\.{_key_maybe_brackets}" # .foo, [foo], [$bar] _node_path = f"(\\.)*({_key_maybe_brackets})({_node_access})*" # [foo].bar, .foo[bar] _node_inter = f"\\${{\\s*{_node_path}\\s*}}" # node interpolation ${foo.bar} _id = "[a-zA-Z_]\\w*" # foo, foo_bar, abc123 _resolver_name = f"({_id}(\\.{_id})*)?" # foo, ns.bar3, ns_1.ns_2.b0z _arg = "[a-zA-Z_0-9/\\-\\+.$%*@?|]+" # string representing a resolver argument _args = f"{_arg}(\\s*,\\s*{_arg})*" # list of resolver arguments _resolver_inter = f"\\${{\\s*{_resolver_name}\\s*:\\s*{_args}?\\s*}}" # ${foo:bar} _inter = f"({_node_inter}|{_resolver_inter})" # any kind of interpolation _outer = "([^$]|\\$(?!{))+" # any character except $ (unless not followed by {) SIMPLE_INTERPOLATION_PATTERN = re.compile( f"({_outer})?({_inter}({_outer})?)+$", flags=re.ASCII ) # NOTE: SIMPLE_INTERPOLATION_PATTERN must not generate false positive matches: # it must not accept anything that isn't a valid interpolation (per the # interpolation grammar defined in `omegaconf/grammar/*.g4`). class OmegaConfErrorListener(ErrorListener): # type: ignore def syntaxError( self, recognizer: Any, offending_symbol: Any, line: Any, column: Any, msg: Any, e: Any, ) -> None: raise GrammarParseError(str(e) if msg is None else msg) from e def reportAmbiguity( self, recognizer: Any, dfa: Any, startIndex: Any, stopIndex: Any, exact: Any, ambigAlts: Any, configs: Any, ) -> None: raise GrammarParseError("ANTLR error: Ambiguity") # pragma: no cover def reportAttemptingFullContext( self, recognizer: Any, dfa: Any, startIndex: Any, stopIndex: Any, conflictingAlts: Any, configs: Any, ) -> None: # Note: for now we raise an error to be safe. However this is mostly a # performance warning, so in the future this may be relaxed if we need # to change the grammar in such a way that this warning cannot be # avoided (another option would be to switch to SLL parsing mode). raise GrammarParseError( "ANTLR error: Attempting Full Context" ) # pragma: no cover def reportContextSensitivity( self, recognizer: Any, dfa: Any, startIndex: Any, stopIndex: Any, prediction: Any, configs: Any, ) -> None: raise GrammarParseError("ANTLR error: ContextSensitivity") # pragma: no cover def parse( value: str, parser_rule: str = "configValue", lexer_mode: str = "DEFAULT_MODE" ) -> ParserRuleContext: """ Parse interpolated string `value` (and return the parse tree). """ l_mode = getattr(OmegaConfGrammarLexer, lexer_mode) istream = InputStream(value) cached = getattr(_grammar_cache, "data", None) if cached is None: error_listener = OmegaConfErrorListener() lexer = OmegaConfGrammarLexer(istream) lexer.removeErrorListeners() lexer.addErrorListener(error_listener) lexer.mode(l_mode) token_stream = CommonTokenStream(lexer) parser = OmegaConfGrammarParser(token_stream) parser.removeErrorListeners() parser.addErrorListener(error_listener) # The two lines below could be enabled in the future if we decide to switch # to SLL prediction mode. Warning though, it has not been fully tested yet! # from antlr4 import PredictionMode # parser._interp.predictionMode = PredictionMode.SLL # Note that although the input stream `istream` is implicitly cached within # the lexer, it will be replaced by a new input next time the lexer is re-used. _grammar_cache.data = lexer, token_stream, parser else: lexer, token_stream, parser = cached # Replace the old input stream with the new one. lexer.inputStream = istream # Initialize the lexer / token stream / parser to process the new input. lexer.mode(l_mode) token_stream.setTokenSource(lexer) parser.reset() try: return getattr(parser, parser_rule)() except Exception as exc: if type(exc) is Exception and str(exc) == "Empty Stack": # This exception is raised by antlr when trying to pop a mode while # no mode has been pushed. We convert it into an `GrammarParseError` # to facilitate exception handling from the caller. raise GrammarParseError("Empty Stack") else: raise
py
1a5925e56bf1c0a0de750fe7b80066e4e177a0a7
# Copyright (C) 2018-2021 Intel Corporation # SPDX-License-Identifier: Apache-2.0 import pytest # test.BACKEND_NAME is a configuration variable determining which # nGraph backend tests will use. It's set during pytest configuration time. # See `pytest_configure` hook in `conftest.py` for more details. BACKEND_NAME = None # test.MODEL_ZOO_DIR is a configuration variable providing the path # to the ZOO of ONNX models to test. It's set during pytest configuration time. # See `pytest_configure` hook in `conftest.py` for more # details. MODEL_ZOO_DIR = None # test.MODEL_ZOO_XFAIL is a configuration variable which enable xfails for model zoo. MODEL_ZOO_XFAIL = False def xfail_test(reason="Mark the test as expected to fail", strict=True): return pytest.mark.xfail(reason=reason, strict=strict) skip_segfault = pytest.mark.skip(reason="Segmentation fault error") xfail_issue_33488 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "MaxUnpool") xfail_issue_33512 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "Einsum") xfail_issue_33535 = xfail_test(reason="nGraph does not support the following ONNX operations:" "DynamicQuantizeLinear") xfail_issue_33538 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "Scan") skip_issue_38084 = pytest.mark.skip(reason="Aborted (core dumped) Assertion " "`(layer->get_output_partial_shape(i).is_static())' failed.") xfail_issue_33589 = xfail_test(reason="nGraph does not support the following ONNX operations:" "IsNaN and isInf") xfail_issue_33595 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "Unique") xfail_issue_33596 = xfail_test(reason="RuntimeError: nGraph does not support different sequence operations:" "ConcatFromSequence, SequenceConstruct, SequenceAt, SplitToSequence," "SequenceEmpty, SequenceInsert, SequenceErase, SequenceLength ") xfail_issue_33606 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "Det") xfail_issue_33651 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "TfIdfVectorizer") xfail_issue_33581 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "GatherElements") xfail_issue_33633 = xfail_test(reason="MaxPool: dilations unsupported") xfail_issue_35911 = xfail_test(reason="Assertion error: Pad model mismatch error") xfail_issue_35912 = xfail_test(reason="RuntimeError: Error of validate layer: B with type: " "Pad. Cannot parse parameter pads_end from IR for layer B. " "Value -1,0 cannot be casted to int.") xfail_issue_35923 = xfail_test(reason="RuntimeError: PReLU without weights is not supported") xfail_issue_35925 = xfail_test(reason="Assertion error - reduction ops results mismatch") xfail_issue_35927 = xfail_test(reason="RuntimeError: B has zero dimension that is not allowable") xfail_issue_36480 = xfail_test(reason="RuntimeError: [NOT_FOUND] Unsupported property dummy_option " "by CPU plugin") xfail_issue_36485 = xfail_test(reason="RuntimeError: Check 'm_group >= 1' failed at " "/openvino/ngraph/core/src/op/shuffle_channels.cpp:77:") xfail_issue_36486 = xfail_test(reason="RuntimeError: HardSigmoid operation should be converted " "to HardSigmoid_IE") xfail_issue_36487 = xfail_test(reason="Assertion error - mvn operator computation mismatch") xfail_issue_38084 = xfail_test(reason="RuntimeError: AssertionFailed: layer->get_output_partial_shape(i)" "is_static() nGraph <value> operation with name: <value> cannot be" "converted to <value> layer with name: <value> because output" "with index 0 contains dynamic shapes: {<value>}. Try to use " "CNNNetwork::reshape() method in order to specialize shapes " "before the conversion.") xfail_issue_38091 = xfail_test(reason="AssertionError: Mismatched elements") xfail_issue_38699 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "ai.onnx.preview.training.Gradient") xfail_issue_38701 = xfail_test(reason="RuntimeError: unsupported element type: STRING") xfail_issue_38706 = xfail_test(reason="RuntimeError: output_3.0 has zero dimension which is not allowed") xfail_issue_38708 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Slice): y>': " "Axes input must be constant") xfail_issue_38710 = xfail_test(reason="RuntimeError: roi has zero dimension which is not allowed") xfail_issue_38713 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "ai.onnx.preview.training.Momentum") xfail_issue_43742 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "If") xfail_issue_45457 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v5::Loop" "Not constant termination condition body output is not supported") xfail_issue_38722 = xfail_test(reason="RuntimeError: While validating ONNX nodes MatMulInteger" "and QLinearMatMul" "Input0 scale and input0 zero point shape must be same and 1") xfail_issue_38723 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "QLinearConv") xfail_issue_38724 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Resize): Y>':" "tf_crop_and_resize - this type of coordinate transformation mode" "is not supported. Choose one of the following modes:" "tf_half_pixel_for_nn, asymmetric, align_corners, pytorch_half_pixel," "half_pixel") xfail_issue_38725 = xfail_test(reason="RuntimeError: While validating ONNX node '<Node(Loop):" "value info has no element type specified") xfail_issue_38726 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "LessOrEqual") xfail_issue_38732 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "ConvInteger") xfail_issue_38734 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "ai.onnx.preview.training.Adam") xfail_issue_38735 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "ai.onnx.preview.training.Adagrad") xfail_issue_48052 = xfail_test(reason="Dropout op is not supported in traning mode") xfail_issue_45180 = xfail_test(reason="RuntimeError: Unsupported dynamic op: ReduceSum") xfail_issue_44839 = xfail_test(reason="Huge computation missmatch") xfail_issue_44848 = xfail_test(reason="E Unsupported dynamic op: Range") xfail_issue_44851 = xfail_test(reason="E Unsupported dynamic op: Broadcast") xfail_issue_44854 = xfail_test(reason="E Unsupported dynamic op: VariadicSplit") xfail_issue_44858 = xfail_test(reason="E Unsupported dynamic op: Unsqueeze") xfail_issue_44956 = xfail_test(reason="E Unsupported dynamic op: Loop") xfail_issue_44957 = xfail_test(reason="E Unsupported dynamic op: NonZero") xfail_issue_44958 = xfail_test(reason="E Unsupported dynamic op: Interpolate") xfail_issue_44965 = xfail_test(reason="E RuntimeError: value info has no element") xfail_issue_44968 = xfail_test(reason="E Unsupported dynamic op: Squeeze") xfail_issue_44970 = xfail_test(reason="Assertion error") xfail_issue_44976 = xfail_test(reason="E RuntimeError: Quantize layer with name:" "FakeQuantize_xxx has non const input on 1 port") xfail_issue_46762 = xfail_test(reason="Incorrect result of Minimum op if uint data type is used") xfail_issue_46765 = xfail_test(reason="select_last_index attribute is not supported by ArgMin and ArgMax") xfail_issue_47323 = xfail_test(reason="RuntimeError: The plugin does not support FP64") xfail_issue_47337 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::OneHot") xfail_issue_33593 = xfail_test(reason="Current implementation of MaxPool doesn't support indices output") # Model MSFT issues: xfail_issue_37957 = xfail_test(reason="RuntimeError: nGraph does not support the following ONNX operations:" "com.microsoft.CropAndResize, com.microsoft.GatherND," "com.microsoft.Pad, com.microsoft.Range") xfail_issue_39669 = xfail_test(reason="AssertionError: This model has no test data") xfail_issue_40686 = xfail_test(reason="NANs as results") xfail_issue_36534 = xfail_test(reason="RuntimeError: node input index is out of range") xfail_issue_36536 = xfail_test(reason="RuntimeError: can't protect") xfail_issue_36538 = xfail_test(reason="RuntimeError: Check 'PartialShape::broadcast_merge_into( pshape, " "node->get_input_partial_shape(i), autob)' failed at " "/openvino/ngraph/src/ngraph/op/util/elementwise_args.cpp:48:") xfail_issue_39656 = xfail_test(reason="RuntimeError: Reshape reshaped has dynamic second input!") xfail_issue_39658 = xfail_test(reason="RuntimeError: Tile operation has a form that is not supported." " z should be converted to TileIE operation.") xfail_issue_39659 = xfail_test(reason="RuntimeError: Broadcast operation has a form that is not supported." " y should be converted to Tile operation.") xfail_issue_45344 = xfail_test(reason="Unsupported dynamic ops: v3::NonMaxSuppressionIE3") xfail_issue_39662 = xfail_test(reason="RuntimeError: 'ScatterElementsUpdate' layer with name 'y' have " "indices value that points to non-existing output tensor element") xfail_issue_37973 = xfail_test(reason="TF Inception V2 - AssertionError: zoo models results mismatch") xfail_issue_47430 = xfail_test(reason="FCN ResNet models - AssertionError: zoo models results mismatch") xfail_issue_47495 = xfail_test(reason="BertSquad-10 from MSFT - AssertionError: zoo models results mismatch") xfail_issue_49207 = xfail_test(reason="Function references undeclared parameters") xfail_issue_48145 = xfail_test(reason="BertSquad-8 - AssertionError: Items are not equal: ACTUAL: 4 " "DESIRED: 3") xfail_issue_48190 = xfail_test(reason="RobertaBase-11 - AssertionError: Items are not equal: " "ACTUAL: dtype('float64') DESIRED: dtype('float32')") xfail_issue_49750 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v4::Interpolate") xfail_issue_49752 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::Pad") xfail_issue_49753 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::StridedSlice") xfail_issue_49754 = xfail_test(reason="RuntimeError: Unsupported dynamic ops: v1::TopKIE") xfail_issue_52463 = xfail_test(reason="test_operator_add_size1_singleton_broadcast_cpu - " "Not equal to tolerance") xfail_issue_49391 = xfail_test(reason="Roll is not implemented in CPU plugin.")
py
1a592682145e61a441b2f374248383d4825d7c0b
######################################################################### ## This file is part of the alpha-beta-CROWN verifier ## ## ## ## Copyright (C) 2021, Huan Zhang <[email protected]> ## ## Kaidi Xu <[email protected]> ## ## Shiqi Wang <[email protected]> ## ## Zhouxing Shi <[email protected]> ## ## Yihan Wang <[email protected]> ## ## ## ## This program is licenced under the BSD 3-Clause License, ## ## contained in the LICENCE file in this directory. ## ## ## ######################################################################### """alpha-beta-CROWN verifier interface to handle robustness verification.""" import os import re import socket import random import time import gc from utils import get_test_acc, load_model, load_verification_dataset import numpy as np import pandas as pd import torch import arguments from auto_LiRPA import BoundedModule, BoundedTensor from auto_LiRPA.perturbations import PerturbationLpNorm from bab_verification_general import mip, incomplete_verifier, bab from attack_pgd import pgd_attack from utils import Normalization, get_pgd_acc def config_args(): # Add arguments specific for this front-end. h = ["general"] arguments.Config.add_argument("--mode", type=str, default="verified-acc", choices=["verified-acc", "runnerup", "clean-acc", "crown-only-verified-acc", "alpha-crown-only-verified-acc", "ibp-only-verified-acc", "attack-only", "specify-target"], help='Verify against all labels ("verified-acc" mode), or just the runnerup labels ("runnerup" mode), or using a specified label in dataset ("speicify-target" mode, only used for oval20). Mode can also be set as "crown-only-verified-acc" or "alpha-crown-only-verified-acc", which quickly computes the verified accuracy over the entire dataset via CROWN or alpha-CROWN.', hierarchy=h + ["mode"]) arguments.Config.add_argument('--complete_verifier', choices=["bab", "mip", "bab-refine", "skip"], default="bab", help='Complete verification verifier. "bab": branch and bound with beta-CROWN; "mip": mixed integer programming (MIP) formulation; "bab-refine": branch and bound with intermediate layer bounds computed by MIP.', hierarchy=h + ["complete_verifier"]) arguments.Config.add_argument('--no_incomplete', action='store_false', dest='incomplete', help='Enable/Disable initial alpha-CROWN incomplete verification (this can save GPU memory when disabled).', hierarchy=h + ["enable_incomplete_verification"]) arguments.Config.add_argument("--crown", action='store_true', help='Compute CROWN verified accuracy before verification (not used).', hierarchy=h + ["get_crown_verified_acc"]) h = ["model"] arguments.Config.add_argument("--model", type=str, default="please_specify_model_name", help='Name of model. Model must be defined in the load_verification_dataset() function in utils.py.', hierarchy=h + ["name"]) h = ["data"] arguments.Config.add_argument("--dataset", type=str, default="CIFAR", choices=["MNIST", "CIFAR", "CIFAR_SDP_FULL", "CIFAR_RESNET", "CIFAR_SAMPLE", "MNIST_SAMPLE", "CIFAR_ERAN", "MNIST_ERAN", "MNIST_ERAN_UN", "MNIST_SDP", "MNIST_MADRY_UN", "CIFAR_SDP", "CIFAR_UN"], help='Dataset name. Dataset must be defined in utils.py.', hierarchy=h + ["dataset"]) arguments.Config.add_argument("--filter_path", type=str, default=None, help='A filter in pkl format contains examples that will be skipped (not used).', hierarchy=h + ["data_filter_path"]) arguments.Config.add_argument("--data_idx_file", type=str, default=None, help='A text file with a list of example IDs to run.', hierarchy=h + ["data_idx_file"]) h = ["attack"] arguments.Config.add_argument("--mip_attack", action='store_true', help='Use MIP (Gurobi) based attack if PGD cannot find a successful adversarial example.', hierarchy=h + ["enable_mip_attack"]) arguments.Config.add_argument('--pgd_steps', type=int, default=100, help="Steps of PGD attack.", hierarchy=h + ["pgd_steps"]) arguments.Config.add_argument('--pgd_restarts', type=int, default=30, help="Number of random PGD restarts.", hierarchy= h + ["pgd_restarts"]) arguments.Config.add_argument('--no_pgd_early_stop', action='store_false', dest='pgd_early_stop', help="Early stop PGD when an adversarial example is found.", hierarchy=h + ["pgd_early_stop"]) arguments.Config.add_argument('--pgd_lr_decay', type=float, default=0.99, help='Learning rate decay factor used in PGD attack.', hierarchy= h + ["pgd_lr_decay"]) arguments.Config.add_argument('--pgd_alpha', type=str, default="auto", help='Step size of PGD attack. Default (auto) is epsilon/4.', hierarchy=h + ["pgd_alpha"]) h = ["debug"] arguments.Config.add_argument("--lp_test", type=str, default=None, choices=["MIP", "LP", "LP_intermediate_refine", "MIP_intermediate_refine", None], help='Debugging option, do not use.', hierarchy=h + ['lp_test']) arguments.Config.parse_config() def get_statistics(model, image, true_label, eps, data_min, data_max, batch_size, method="CROWN"): """For quickly checking clean accuracy and CROWN verified accuracy.""" assert method == "CROWN" or method == "alpha-CROWN" or method == "IBP" # Clearn accuracy predicted = model(image) n_correct = (predicted.argmax(dim=1) == true_label).sum().item() print(f'{n_correct} examples are correct, image range ({image.min()}, {image.max()})') # CROWN verified accuracy verified = 0 N = image.size(0) num_outputs = arguments.Config["data"]["num_outputs"] norm = np.inf assert norm == arguments.Config["specification"]["norm"] # TODO: make this function support more norms. model = BoundedModule(model, torch.empty_like(image[:batch_size]), device=arguments.Config["general"]["device"]) if method == 'alpha-CROWN': # Set alpha-CROWN optimization parameters. lr_alpha = arguments.Config["solver"]["alpha-crown"]["lr_alpha"] iteration = arguments.Config["solver"]["alpha-crown"]["iteration"] share_slopes = arguments.Config["solver"]["alpha-crown"]["share_slopes"] optimizer = arguments.Config["solver"]["beta-crown"]["optimizer"] lr_decay = arguments.Config["solver"]["beta-crown"]["lr_decay"] model.set_bound_opts({'optimize_bound_args': {'ob_iteration': iteration, 'ob_beta': False, 'ob_alpha': True, 'ob_alpha_share_slopes': share_slopes, 'ob_optimizer': optimizer, 'ob_lr': lr_alpha, 'ob_lr_decay': lr_decay}}) batch_idx = 0 all_start_time = time.time() while batch_idx * batch_size < N: start_time = time.time() start_idx, end_idx = batch_idx*batch_size, min(batch_idx*batch_size+batch_size, N) data, labels = image[start_idx:end_idx], torch.tensor(true_label[start_idx:end_idx]) if arguments.Config["specification"]["type"] == "lp": # Linf norm only so far. data_ub = torch.min(data + eps, data_max) data_lb = torch.max(data - eps, data_min) else: # Per-example, per-element lower and upper bounds. data_ub = data_max[start_idx:end_idx] data_lb = data_min[start_idx:end_idx] data, data_lb, data_ub, labels = data.cuda(), data_lb.cuda(), data_ub.cuda(), labels.cuda() ptb = PerturbationLpNorm(norm=norm, eps=None, x_L=data_lb, x_U=data_ub) data = BoundedTensor(data, ptb) # labels = torch.argmax(pred, dim=1).cpu().detach().numpy() c = torch.eye(num_outputs).type_as(data)[labels].unsqueeze(1) - torch.eye(num_outputs).type_as(data).unsqueeze(0) I = (~(labels.data.unsqueeze(1) == torch.arange(num_outputs).type_as(labels.data).unsqueeze(0))) c = (c[I].view(data.size(0), num_outputs - 1, num_outputs)).cuda() if method == "CROWN" or method == "IBP": with torch.no_grad(): lb, ub = model.compute_bounds(x=(data,), method=method, C=c, bound_upper=False) else: # alpha-CROWN requires gradient. lb, ub = model.compute_bounds(x=(data,), method="CROWN-optimized", C=c, bound_upper=False) verified += (lb.min(1)[0]>=0).sum().item() # Print some bounds for the first batch for debugging. duration = time.time() - start_time if batch_idx == 0: print("Bounds for first a few examples:") print(lb[:10].detach().cpu().numpy()) print(f"batch: {batch_idx}, verified acc: {(lb.min(1)[0]>=0).sum().item()} / {data.size(0)}, time {duration}") del lb, ub batch_idx += 1 full_duration = time.time() - all_start_time print(f"{method} verified acc: {verified/N * 100}%, {verified} verified, time {full_duration}") def main(): print(f'Experiments at {time.ctime()} on {socket.gethostname()}') torch.manual_seed(arguments.Config["general"]["seed"]) random.seed(arguments.Config["general"]["seed"]) np.random.seed(arguments.Config["general"]["seed"]) if arguments.Config["general"]["device"] != 'cpu': torch.cuda.manual_seed_all(arguments.Config["general"]["seed"]) # Always disable TF32 (precision is too low for verification). torch.backends.cuda.matmul.allow_tf32 = False torch.backends.cudnn.allow_tf32 = False if arguments.Config["general"]["deterministic"]: os.environ["CUBLAS_WORKSPACE_CONFIG"] = ":4096:8" torch.use_deterministic_algorithms(True) if arguments.Config["general"]["double_fp"]: torch.set_default_dtype(torch.float64) if arguments.Config["attack"]["pgd_order"] != "skip": if arguments.Config["specification"]["type"] == "lp" and arguments.Config["specification"]["norm"] != np.inf: print('Only Linf-norm attack is supported, the pgd_order will be changed to skip') arguments.Config["attack"]["pgd_order"] = "skip" model_ori = load_model(weights_loaded=True) if arguments.Config["specification"]["epsilon"] is not None: perturb_epsilon = torch.tensor(arguments.Config["specification"]["epsilon"], dtype=torch.get_default_dtype()) else: print('No epsilon defined!') perturb_epsilon = None X, labels, runnerup, data_max, data_min, perturb_epsilon, target_label = load_verification_dataset(perturb_epsilon) if arguments.Config["general"]["mode"] == "clean-acc": print("Testing clean accuracy.") get_test_acc(model_ori, X=X, labels=labels, batch_size=arguments.Config["solver"]["beta-crown"]["batch_size"]) return if "MNIST" in arguments.Config["data"]["dataset"]: attack_dataset = "MNIST" elif "CIFAR" in arguments.Config["data"]["dataset"]: attack_dataset = "CIFAR" else: attack_dataset = "UNKNOWN" if arguments.Config["specification"]["type"] == 'lp': if perturb_epsilon is None: raise ValueError("Perturbation epsilon is not set by data loader. Do you mean to use the 'bound' type specification? Try adding --spec_type bound") if arguments.Config["specification"]["epsilon"] is not None: print(f"epsilon after preprocessing: {perturb_epsilon}, data_max = {data_max}, data_min = {data_min}") if data_max.size(0) != 1 or data_min.size(0) != 1: raise ValueError("For 'lp' type specification, you need rabsolute (global) lower and upper bounds, not per example bounds.") elif arguments.Config["specification"]["type"] == 'bound': print(f'Loaded datasets with per-element lower and upper bounds: max = {data_max.max().item()}, min = {data_min.min().item()}') if data_max.size(0) != X.size(0) or data_min.size(0) != X.size(0): raise ValueError("For 'bound' type specification, you need per example lower and upper bounds.") if perturb_epsilon is None: perturb_epsilon = (data_max - data_min).mean() / 2. print(f'eps set to {perturb_epsilon}. This will not be used for certification, but will be used to determine PGD step size.') if arguments.Config["data"]["data_idx_file"] is not None: # Go over a list of data indices. with open(arguments.Config["data"]["data_idx_file"]) as f: bnb_ids = re.split(r'[;|,|\n|\s]+', f.read().strip()) bnb_ids = [int(b_id) for b_id in bnb_ids] print(f'Example indices (total {len(bnb_ids)}): {bnb_ids}') else: # By default, we go over all data. bnb_ids = list(range(X.shape[0])) bnb_ids = bnb_ids[arguments.Config["data"]["start"]: arguments.Config["data"]["end"]] print('Task length:', len(bnb_ids)) save_path = 'Verified_ret_[{}]_start={}_end={}_iter={}_b={}_timeout={}_branching={}-{}-{}_lra-init={}_lra={}_lrb={}_PGD={}.npy'. \ format(arguments.Config['model']['name'], arguments.Config["data"]["start"], arguments.Config["data"]["end"], arguments.Config["solver"]["beta-crown"]["iteration"], arguments.Config["solver"]["beta-crown"]["batch_size"], arguments.Config["bab"]["timeout"], arguments.Config["bab"]["branching"]["method"], arguments.Config["bab"]["branching"]["reduceop"], arguments.Config["bab"]["branching"]["candidates"], arguments.Config["solver"]["alpha-crown"]["lr_alpha"], arguments.Config["solver"]["beta-crown"]["lr_alpha"], arguments.Config["solver"]["beta-crown"]["lr_beta"], arguments.Config["attack"]["pgd_order"]) print(f'saving results to {save_path}') if arguments.Config["general"]["mode"] == "crown-only-verified-acc": get_statistics(model_ori, X, labels, perturb_epsilon, data_min, data_max, batch_size=arguments.Config["solver"]["beta-crown"]["batch_size"]) return if arguments.Config["general"]["mode"] == "alpha-crown-only-verified-acc": get_statistics(model_ori, X, labels, perturb_epsilon, data_min, data_max, batch_size=arguments.Config["solver"]["beta-crown"]["batch_size"], method="alpha-CROWN") return if arguments.Config["general"]["mode"] == "ibp-only-verified-acc": get_statistics(model_ori, X, labels, perturb_epsilon, data_min, data_max, batch_size=arguments.Config["solver"]["beta-crown"]["batch_size"], method="IBP") return if arguments.Config["general"]["mode"] == "attack-only": get_pgd_acc(model_ori, X, labels, perturb_epsilon, data_min, data_max, batch_size=arguments.Config["solver"]["beta-crown"]["batch_size"]) ret, lb_record, attack_success = [], [], [] mip_unsafe, mip_safe, mip_unknown = [], [], [] verified_acc = len(bnb_ids) verified_failed = [] verified_success_list = [] example_time = [] skipped_examples = [] nat_acc = len(bnb_ids) orig_timeout = arguments.Config["bab"]["timeout"] model_ori, all_data_max, all_data_min = model_ori.to(arguments.Config["general"]["device"]), data_max.to(arguments.Config["general"]["device"]), data_min.to(arguments.Config["general"]["device"]) if isinstance(perturb_epsilon, torch.Tensor): perturb_eps = perturb_epsilon.to(arguments.Config["general"]["device"]) for new_idx, imag_idx in enumerate(bnb_ids): arguments.Config["bab"]["timeout"] = orig_timeout print('\n %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% idx:', new_idx, 'img ID:', imag_idx, '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%') torch.cuda.empty_cache() gc.collect() x, y = X[imag_idx], int(labels[imag_idx].item()) x = x.unsqueeze(0).to(dtype=torch.get_default_dtype(), device=arguments.Config["general"]["device"]) if arguments.Config["specification"]["type"] == 'bound': data_min = all_data_min[imag_idx].unsqueeze(0) data_max = all_data_max[imag_idx].unsqueeze(0) else: data_min = all_data_min data_max = all_data_max # first check the model is correct at the input logit_pred = model_ori(x)[0] if logit_pred.size(0) > 1: # Multi-class. y_pred = torch.max(logit_pred, 0)[1].item() else: # Binary classifier: logit_pred > 0 => label 1, otherwise label 0. y_pred = int(logit_pred.item() > 0) if type(perturb_epsilon) is list: # Each image has different epsilon (e.g., OVAL 20). perturb_eps = perturb_epsilon[imag_idx].to(arguments.Config["general"]["device"]) print(f'predicted label {y_pred}, correct label {y}, image norm {x.abs().sum().item()}, logits {logit_pred}') if y_pred != y: print(f'Result: image {imag_idx} prediction is incorrect, skipped.') skipped_examples.append(imag_idx) verified_acc -= 1 nat_acc -= 1 # attack_success.append(imag_idx) continue # else: # # enable here to check clean acc # continue verified_success = False verified_status = "unknown" attack_margin = None attack_images = None example_start_time = time.time() if arguments.Config["attack"]["pgd_order"] == "before": start_attack = time.time() attack_args = {'dataset': attack_dataset, 'model': model_ori, 'x': x, 'max_eps': perturb_eps, 'data_min': data_min, 'data_max': data_max, 'y': y} attack_ret, attack_images, attack_margin = pgd_attack(**attack_args) ret.append([imag_idx, 0, 0, time.time()-start_attack, new_idx, -3, np.inf, np.inf]) if attack_ret: # Attack success. verified_status = "unsafe-pgd" verified_acc -= 1 attack_success.append(imag_idx) print(f"Result: image {imag_idx} attack success!") example_time.append(time.time() - example_start_time) print(f'Wall time: {example_time[-1]}') continue # continue # uncomment for checking pgd attacking results init_global_lb = saved_bounds = saved_slopes = None # Incomplete verification is enabled by default. The intermediate lower and upper bounds will be reused in bab and mip. if not verified_success and (arguments.Config["general"]["enable_incomplete_verification"] or arguments.Config["general"]["complete_verifier"] == "bab-refine"): start_incomplete = time.time() data = x if arguments.Config["specification"]["type"] == 'lp': # Lp norm. if arguments.Config["specification"]["norm"] == np.inf: if data_max is None: data_ub = data + perturb_eps # perturb_eps is already normalized. data_lb = data - perturb_eps else: data_ub = torch.min(data + perturb_eps, data_max) data_lb = torch.max(data - perturb_eps, data_min) else: data_ub = data_lb = data elif arguments.Config["specification"]["type"] == 'bound': # Given lower and upper bounds *per example per pixel*. data_lb = data_min data_ub = data_max else: raise ValueError(f'Unexpected specification type {arguments.Config["specification"]["type"]}') verified_status, init_global_lb, saved_bounds, saved_slopes = incomplete_verifier(model_ori, x, y, data_ub=data_ub, data_lb=data_lb, eps=perturb_eps) verified_success = verified_status != "unknown" if not verified_success: lower_bounds, upper_bounds = saved_bounds[1], saved_bounds[2] arguments.Config["bab"]["timeout"] -= (time.time()-start_incomplete) ret.append([imag_idx, 0, 0, time.time()-start_incomplete, new_idx, -1, np.inf, np.inf]) if verified_success: print(f"Result: image {imag_idx} verification success (with incomplete verifier)!") verified_success_list.append(imag_idx) example_time.append(time.time() - example_start_time) print(f'Wall time: {example_time[-1]}') continue if arguments.Config["attack"]["pgd_order"] == "after": start_attack = time.time() attack_args = {'dataset': attack_dataset, 'model': model_ori, 'x': x, 'max_eps': perturb_eps, 'data_min': data_min, 'data_max': data_max, 'y': y} attack_ret, attack_images, attack_margin = pgd_attack(**attack_args) ret.append([imag_idx, 0, 0, time.time()-start_attack, new_idx, -3, np.inf, np.inf]) if attack_ret: # Attack success. verified_status = "unsafe-pgd" verified_acc -= 1 attack_success.append(imag_idx) print(f"Result: image {imag_idx} attack success!") example_time.append(time.time() - example_start_time) print(f'Wall time: {example_time[-1]}') continue elif arguments.Config["attack"]["enable_mip_attack"]: c = torch.eye(arguments.Config["data"]["num_outputs"]).type_as(data)[[y]].unsqueeze(1) - torch.eye(arguments.Config["data"]["num_outputs"]).type_as(data).unsqueeze(0) lirpa_model, lower_bounds, upper_bounds, masks = saved_bounds[:4] lirpa_model.build_mip_model(lower_bounds, upper_bounds, arguments.Config["bab"]["timeout"], arguments.Config["solver"]["mip"]["parallel_solvers"], arguments.Config["solver"]["mip"]["solver_threads"]) total_unstable = 0 for layer_i, m in enumerate(masks): unstable = int(m.sum().item()) total_unstable += unstable print(f'layer {layer_i} has {unstable} unstable neurons') print(f'Total {total_unstable} unstable neurons.') attack_ret = False labels_to_verify = attack_margin.argsort().squeeze().tolist() print('Sorted order for labels to attack:', labels_to_verify) for target in labels_to_verify: if target != y: if init_global_lb[0][target].item() > 0: print(f'Label {target} is already verified.') continue attack_image_target = target if target < y else target - 1 adv_pool = AdvExamplePool(lirpa_model.net, masks, C=c[:, target:target+1]) # Add adversarial image for the specific target only. adv_pool.add_adv_images(attack_images[:, :, attack_image_target].view((-1, *attack_images.shape[-3:]))) neuron_idx, coeff = adv_pool.get_activation_pattern_from_pool() # The initial starting point and activation pattern has a batch dimension because there can be multiple initializations. selected_advs = adv_pool.adv_pool best_adv = torch.stack([adv.x for adv in selected_advs], dim=0) best_adv_pattern = [torch.stack([adv.activation_pattern[layer_i] for adv in selected_advs], dim=0) for layer_i in range(adv_pool.nlayers)] print(f'Best adv example in pool: {adv_pool.adv_pool[0].obj}, worse {adv_pool.adv_pool[-1].obj}') print(f'Target label {target} has {len(coeff)} out of {total_unstable} unstable neurons fixed.') attack_ret, solver_results = lirpa_model.update_mip_model_fix_relu([neuron_idx], [coeff], target, arguments.Config["solver"]["mip"]["parallel_solvers"], arguments.Config["solver"]["mip"]["solver_threads"], async_mip=False, best_adv=[best_adv], adv_activation_pattern=[best_adv_pattern]) with torch.no_grad(): pred = lirpa_model.net(solver_results[0][3].to(lirpa_model.net.device)).squeeze(0) attack_margin = pred[y] - pred print(f"attack margin: {attack_margin}, for label {target}: {pred[y] - pred[target]}") if attack_ret: break if attack_ret: # Attack success. verified_status = "unsafe-mip_attack" verified_acc -= 1 attack_success.append(imag_idx) print(f"Result: image {imag_idx} attack success!") example_time.append(time.time() - example_start_time) print(f'Wall time: {example_time[-1]}') continue if arguments.Config["general"]["mode"] == "verified-acc": if arguments.Config["attack"]["pgd_order"] != "skip": # .reshape(-1) to handle the case where there is only 1 element. labels_to_verify = attack_margin.argsort().squeeze().reshape(-1).tolist() print('Sorted order for labels to attack:', labels_to_verify) elif arguments.Config["general"]["enable_incomplete_verification"]: # We have initial incomplete bounds. labels_to_verify = init_global_lb.argsort().squeeze(0).tolist() else: labels_to_verify = list(range(arguments.Config["data"]["num_outputs"])) elif arguments.Config["general"]["mode"] == "runnerup": labels_to_verify = [logit_pred.argsort(descending=True)[1]] elif arguments.Config["general"]["mode"] == "specify-target": labels_to_verify = [target_label[imag_idx]] else: raise ValueError("unknown verification mode") # MIP or MIP refined bounds. if not verified_success and (arguments.Config["general"]["complete_verifier"] == "mip" or arguments.Config["general"]["complete_verifier"] == "bab-refine"): start_refine = time.time() verified_status, init_global_lb, lower_bounds, upper_bounds = mip(saved_bounds=saved_bounds, y=y, labels_to_verify=labels_to_verify) verified_success = verified_status != "unknown-mip" if verified_status == "unknown-mip": verified_acc -= 1 mip_unknown.append(imag_idx) elif verified_status == "unsafe-mip": verified_acc -= 1 mip_unsafe.append(imag_idx) elif verified_status == "safe-mip" or verified_status == "safe-incomplete-refine": mip_safe.append(imag_idx) arguments.Config["bab"]["timeout"] -= (time.time()-start_refine) ret.append([imag_idx, 0, 0, time.time()-start_refine, new_idx, -2, np.inf, np.inf]) print("time threshold left for bab:", arguments.Config["bab"]["timeout"]) if verified_success: if verified_status == "safe-mip": print(f"Result: image {imag_idx} verification success (with mip)!") verified_success_list.append(imag_idx) elif verified_status == "safe-incomplete-refine": print(f"Result: image {imag_idx} verification success (with mip refine)!") verified_success_list.append(imag_idx) elif verified_status == "unsafe-mip": print(f"Result: image {imag_idx} attack success (with mip)!") attack_success.append(imag_idx) else: print(f"Warning: verified status {verified_status} not supported!") exit() example_time.append(time.time() - example_start_time) print(f'Wall time: {example_time[-1]}') continue elif arguments.Config["general"]["complete_verifier"] == 'skip': print(f"Result: image {imag_idx} verification failure (complete verifier skipped as requested).") verified_acc -= 1 verified_failed.append(imag_idx) example_time.append(time.time() - example_start_time) print(f'Wall time: {example_time[-1]}') continue elif verified_status == "unknown-mip": print(f"Result: image {imag_idx} verification failure (with mip).") example_time.append(time.time() - example_start_time) print(f'Wall time: {example_time[-1]}') continue pidx_all_verified = True bab_attack_success = False for pidx in labels_to_verify: if isinstance(pidx, torch.Tensor): pidx = pidx.item() print('##### [{}:{}] Tested against {} ######'.format(new_idx, imag_idx, pidx)) if pidx == y: print("groundtruth label, skip!") ret.append([imag_idx, 0, 0, 0, new_idx, pidx, np.inf, np.inf]) continue torch.cuda.empty_cache() gc.collect() start_inner = time.time() # attack_images shape: (1, batch, restarts, num_outputs-1, c, h, w) # select target label attack_images according to pidx. New shape (restarts, c, h, w). targeted_attack_images = None if attack_images is not None: targeted_attack_images = attack_images[0, :, pidx if pidx < y else pidx - 1] attack_args.update({'target': pidx, 'only_target_attack': True}) attack_args.update({'data_max': torch.min(x + perturb_eps, data_max)}) attack_args.update({'data_min': torch.max(x - perturb_eps, data_min)}) arguments.attack_args = attack_args else: arguments.attack_args = None try: if arguments.Config["general"]["enable_incomplete_verification"]: # Reuse results from incomplete results, or from refined MIPs. # skip the prop that already verified rlb, rub = list(lower_bounds), list(upper_bounds) rlb[-1] = rlb[-1][0, pidx] rub[-1] = rub[-1][0, pidx] # print(init_global_lb[0].min().item(), init_global_lb[0].min().item() - arguments.Config["bab"]["decision_thresh"] <= -100.) if init_global_lb[0].min().item() - arguments.Config["bab"]["decision_thresh"] <= -100.: print(f"Initial alpha-CROWN with poor bound {init_global_lb[0].min().item()}. We will run not branch and bound.") l, u, nodes, glb_record = rlb[-1].item(), float('inf'), 0, [] elif init_global_lb[0, pidx] >= arguments.Config["bab"]["decision_thresh"]: print(f"Initial alpha-CROWN verified for label {pidx} with bound {init_global_lb[0, pidx]}") l, u, nodes, glb_record = rlb[-1].item(), float('inf'), 0, [] else: if arguments.Config["bab"]["timeout"] < 0: print(f"Image {imag_idx} verification failure (running out of time budget).") l, u, nodes, glb_record = rlb[-1].item(), float('inf'), 0, [] else: # feed initialed bounds to save time l, u, nodes, glb_record = bab(model_ori, x, pidx, y=y, eps=perturb_eps, data_ub=data_max, data_lb=data_min, lower_bounds=lower_bounds, upper_bounds=upper_bounds, reference_slopes=saved_slopes, attack_images=targeted_attack_images) else: assert arguments.Config["general"]["complete_verifier"] == "bab" # for MIP and BaB-Refine. # Main function to run verification l, u, nodes, glb_record = bab(model_ori, x, pidx, y=y, eps=perturb_eps, data_ub=data_max, data_lb=data_min, attack_images=targeted_attack_images) time_cost = time.time() - start_inner print('Image {} label {} verification end, final lower bound {}, upper bound {}, time: {}'.format(imag_idx, pidx, l, u, time_cost)) ret.append([imag_idx, l, nodes, time_cost, new_idx, pidx, u, attack_margin[pidx] if attack_margin is not None else np.inf]) arguments.Config["bab"]["timeout"] -= time_cost lb_record.append([glb_record]) print(imag_idx, l) np.save(save_path, np.array(ret)) # np.save('lb_record_' + save_path, np.array(lb_record)) if u < arguments.Config["bab"]["decision_thresh"]: verified_status = "unsafe-bab" pidx_all_verified = False bab_attack_success = True break elif l < arguments.Config["bab"]["decision_thresh"]: pidx_all_verified = False # break to run next sample save time if any label is not verified. break except KeyboardInterrupt: print('time:', imag_idx, time.time()-start_inner, "\n",) print(ret) pidx_all_verified = False break example_time.append(time.time() - example_start_time) if not pidx_all_verified: verified_acc -= 1 if bab_attack_success: attack_success.append(imag_idx) print(f'Result: image {imag_idx} attack success (with branch and bound)!') else: verified_failed.append(imag_idx) print(f'Result: image {imag_idx} verification failure (with branch and bound).') else: verified_success_list.append(imag_idx) print(f'Result: image {imag_idx} verification success (with branch and bound)!') # Make sure ALL tensors used in this loop are deleted here. del init_global_lb, saved_bounds, saved_slopes print(f'Wall time: {example_time[-1]}') # some results analysis np.set_printoptions(suppress=True) ret = np.array(ret) print(f'\nnumber of correctly classified examples: {nat_acc}') print(f'incorrectly classified idx (total {len(skipped_examples)}):', skipped_examples) print(f'attack success idx (total {len(attack_success)}):', attack_success) if len(attack_success) > 0: print('attack_success rate:', len(attack_success)/len(bnb_ids)) np.save('Attack-success_{}_{}_start{}_end{}.npy'. format(arguments.Config['model']['name'], arguments.Config["data"]["dataset"], arguments.Config["data"]["start"], arguments.Config["data"]["end"]), np.array(attack_success)) print(f'verification success idx (total {len(verified_success_list)}):', verified_success_list) print(f'verification failure idx (total {len(verified_failed)}):', verified_failed) if arguments.Config["general"]["complete_verifier"] == "mip": print("##### Complete MIP solver summary #####") print(f"mip verified safe idx: {mip_safe}") print(f"mip unsafe idx: {mip_unsafe}") print(f"mip unknown idx: {mip_unknown}") print(f"mip verified safe rate {len(mip_safe)/len(bnb_ids)}, " f"unsafe rate {len(mip_unsafe)/len(bnb_ids)}, " f"unknown rate {len(mip_unknown)/len(bnb_ids)}, " f"total {len(bnb_ids)}") print("final verified acc: {}%[{}]".format(verified_acc/len(bnb_ids)*100., len(bnb_ids))) np.save('Verified-acc_{}_{}_start{}_end{}_{}_branching_{}.npy'. format(arguments.Config['model']['name'], arguments.Config["data"]["dataset"], arguments.Config["data"]["start"], arguments.Config["data"]["end"], verified_acc, arguments.Config["bab"]["branching"]["method"]), np.array(verified_failed)) total_verification = len(verified_success_list) + len(verified_failed) print(f"verifier is called on {total_verification} examples.") print("total verified:", verified_acc) if ret.size > 0: # print("mean time [total:{}]: {}".format(len(bnb_ids), ret[:, 3].sum()/float(len(bnb_ids)))) print("mean time [cnt:{}] (excluding attack success): {}".format(total_verification, ret[:, 3][ret[:, 5] != -3].sum()/float(total_verification if total_verification != 0 else "nan"))) if len(attack_success) > 0: print("mean time [cnt:{}] (including attack success): {}".format(total_verification + len(attack_success), ret[:, 3].sum() / float(total_verification + len(attack_success)))) if __name__ == "__main__": config_args() main()
py
1a592828f43cca3e6d7363fd7aab3d021553a819
# Lint as: python2, python3 # Copyright 2019 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """An experimental new unified TPU executor.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function import os from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo import compat as tf from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import base_model from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import checkpointer from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import cluster_factory from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import ml_perf_log as mlp_log from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import multitask_model from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import py_utils from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.lingvo.core import task_scheduler from REDACTED.tensorflow_models.mlperf.models.rough.transformer_lingvo.google.trainer import base_runner from REDACTED.tensorflow.python.tpu import device_assignment as device_assignment_lib # pylint: disable=g-direct-tensorflow-import tf.flags.DEFINE_bool( 'cluster_placer_in_executor', False, 'If True, cluster.GetPlacer() is used in Executor. ' + 'When running on TPU model weights can be distributed ' + 'across TPU hosts, for outrageously large models this ' + 'enables sharded checkpointing and reduces host memory ' + 'requirements, see _LeastLoadedPlacer in cluster.py.') tf.flags.DEFINE_bool( 'disable_meta_optimizer_in_executor', False, 'Disabling the grappler meta_optimizer improves start-up time.') FLAGS = tf.flags.FLAGS def GetExecutorParams(model_name, cluster_params, model_registry): """Get the params needed to instantiate the Executor. Args: model_name: A model name regsitered in the ModelRegistry. cluster_params: A cluster hyperparams object. model_registry: A ModelRegistry object. Returns: A tuple (dict, Params): - ps_params_dict: High-level task name -> ProgramScheduleParams - train_cfg: A SingleTaskModelParams or MultiTaskModelParams. """ ps_params_dict = {} with cluster_factory.Cluster(cluster_params): ps_cfg = model_registry.GetProgramSchedule(model_name) train_cfg = model_registry.GetParams(model_name, 'Train') train_cfg.cluster = cluster_params if issubclass(train_cfg.cls, base_model.MultiTaskModel): multi_task_train_cfg = train_cfg # Create SingleTaskModelParams from a MultiTaskModelParams. for k, _ in multi_task_train_cfg.task_params.IterParams(): single_task_params = base_model.SingleTaskModel.Params() single_task_params.cluster = multi_task_train_cfg.cluster single_task_params.input = multi_task_train_cfg.input.Get(k) single_task_params.task = multi_task_train_cfg.task_params.Get(k) single_task_params.train = single_task_params.task.train if k not in ps_cfg.program_schedule_dict: tf.logging.fatal( 'Could not find %s in ps_cfg.program_schedule_dict: %s', k, ps_cfg) program_schedule_params = ps_cfg.program_schedule_dict[k] program_schedule_params.task_dict = {'Train': single_task_params} for eval_dataset_name in program_schedule_params.dataset_names: multi_task_eval_cfg = model_registry.GetParams( model_name, eval_dataset_name) eval_task_params = base_model.SingleTaskModel.Params() eval_task_params.cluster = single_task_params.cluster eval_task_params.input = multi_task_eval_cfg.input.Get(k) eval_task_params.task = multi_task_eval_cfg.task_params.Get(k) program_schedule_params.task_dict[ eval_dataset_name] = eval_task_params ps_params_dict[k] = program_schedule_params else: program_schedule_params = ps_cfg program_schedule_params.task_dict = {'Train': train_cfg} for eval_dataset_name in program_schedule_params.dataset_names: task_eval_params = model_registry.GetParams(model_name, eval_dataset_name) task_eval_params.cluster = train_cfg.cluster program_schedule_params.task_dict[eval_dataset_name] = task_eval_params ps_params_dict[''] = program_schedule_params return ps_params_dict, train_cfg class ExecutorTpu(base_runner.BaseRunner): """An experimental runner that does arbitrary multi-program execution on TPU. Overview of operation: - During construction, all programs construct their sub-graphs, in a sense creating a mega-graph. - A sequence of programs is then executed in-whole associated with that task. eg: [train x 1000 steps, checkpoint, eval 4 steps, decode 2 steps] - In this manner, programs and higher-level tasks cooperatively time-slice share the TPU. """ def __init__(self, train_cfg, ps_params_dict, model_task_name, logdir, tf_master, **kwargs): """Construct an ExecutorTpu BaseRunner. Args: train_cfg: SingleTaskModelParams or MultiTaskModelParams ps_params_dict: A dict of top-level task name -> ProgramSchedule params, if train_cfg is a SingleTaskModelParams, we expect only one entry. model_task_name: An override for multi-task models, currently unused. logdir: String path to the log directory to output to. tf_master: String path to the master job, e.g. 'local'. **kwargs: keyword args to pass through to BaseRunner. """ super(ExecutorTpu, self).__init__(train_cfg, model_task_name, logdir, tf_master, **kwargs) self._cluster_def = self._cluster.worker_cluster_def # There is a single Executor task assert self._cluster.num_replicas == 1 data_parallelism = self._cluster.num_splits_per_client assert data_parallelism num_devices_per_split = self._cluster.num_devices_per_split tf.logging.info('data_parallelism: %d, num_devices_per_split: %d', data_parallelism, num_devices_per_split) self.task_scheduler = None self._checkpoint_dir = os.path.join(logdir, 'train') self._variable_renaming_rules = [] self._ml_perf = None # If this is a multi-task model, grab the params for the TaskScheduler. if issubclass(train_cfg.cls, base_model.SingleTaskModel): tf.logging.info('single_task_model') assert len(ps_params_dict) == 1 self._model_task_name = list(ps_params_dict.keys())[0] self._single_task_mode = True elif issubclass(train_cfg.cls, base_model.MultiTaskModel): tf.logging.info('multi_task_model') if issubclass(train_cfg.cls, multitask_model.RegExSharedVariableModel): self._variable_renaming_rules = train_cfg.variable_renaming_rules if train_cfg.task_schedule is None: task_schedule_params = task_scheduler.ConstantScheduler.Params() task_schedule_params.task_probs = sorted( list(train_cfg.task_probs.IterParams())) else: task_schedule_params = train_cfg.task_schedule self.task_scheduler = task_schedule_params.Instantiate() self._single_task_mode = False else: tf.logging.fatal( 'Model %s is not a sub-class of SingleTaskModel or MultiTaskModel', train_cfg.cls) tf.logging.info('train_cfg.cls: %s', train_cfg.cls) self._WriteToLog(train_cfg.ToText(), self._checkpoint_dir, 'params.txt') self._program_schedule_dict = {} self._programs = [] for task_string, program_schedule_params in ps_params_dict.items(): program_schedule_params.logdir = logdir program_schedule_params.num_splits_per_client = data_parallelism program_schedule_params.task_name = task_string ps = program_schedule_params.Instantiate() self._program_schedule_dict[task_string] = ps tf.logging.info('program_schedule_params: %s', program_schedule_params.ToText()) self._programs += ps.Programs() if program_schedule_params.ml_perf.benchmark_name is not None: self._ml_perf = program_schedule_params.ml_perf tf.logging.info('num_programs: %d', len(self._programs)) if self._ml_perf is not None: self._ml_perf_log = True mlp_log.mlperf_print(key='benchmark', value=self._ml_perf.benchmark_name) else: self._ml_perf_log = False # BaseRunner legacy self.enqueue_ops = None @py_utils.RetryOnTransientTfError() def _WaitTillInit(): """Wait until the model is ready.""" try: with self._graph.as_default(), self._GetSession( cluster_def=self._cluster_def, disable_meta_optimizer=FLAGS.disable_meta_optimizer_in_executor ) as sess: topology = sess.run( tf.tpu.initialize_system(embedding_config=None, job=None)) device_assignment = device_assignment_lib.device_assignment( topology, computation_shape=py_utils.ComputationShape( num_devices_per_split), num_replicas=data_parallelism) py_utils.SetTpuDeviceAssignment(device_assignment) tf.logging.info('device_assignment.core_assignment: %s', str(device_assignment.core_assignment)) tf.logging.info( 'device_assignment.topology.device_coordinates: %s', str(device_assignment.topology.device_coordinates)) except py_utils.transient_tf_errors as e: tf.logging.info('TPU initialization failed: %s', e) raise if self._ml_perf_log: mlp_log.mlperf_print(key='cache_clear', value=True) mlp_log.mlperf_print(key='init_start', value=None) _WaitTillInit() with self._graph.as_default(), tf.container(self._container_id): tf.logging.info('self._cluster.job_spec.name: %s', self._cluster.job_spec.name) with self._cluster, tf.device( self._cluster.job_spec.name if not FLAGS.cluster_placer_in_executor else self._cluster.GetPlacer()): with py_utils.VariableRenameScope(self._variable_renaming_rules): _ = py_utils.GetOrCreateGlobalStepVar() for program in self._programs: program.BuildTpuSubgraph() for program in self._programs: program.SetStatusMessageFn(self._SetStatusMessage) program.CreateCheckpointer() self._initialize_tables = tf.tables_initializer() self._initialize_local_vars = tf.local_variables_initializer() self.save_only_checkpointer = checkpointer.Checkpointer( self._checkpoint_dir, model=None, train_params=train_cfg.train, save_only=True) def Start(self): # Run training. self._RunLoop('executor_tpu', self._Loop) def _Loop(self): with tf.container(self._container_id), self._GetSession( cluster_def=self._cluster_def, disable_meta_optimizer=FLAGS.disable_meta_optimizer_in_executor ) as sess: # Initialize the variables first, if needed. for program in self._programs: program.RestoreIfNeeded(sess) program.Compile(sess) sess.run(self._initialize_tables) sess.run(self._initialize_local_vars) while True: global_step = sess.run(py_utils.GetGlobalStep()) if self._ShouldStop(sess, global_step): tf.logging.info('Training finished.') if not self._ml_perf_log: self.save_only_checkpointer.Save(sess, global_step) return # If a task is explicitly selected, only run the programs associated # with that task. if self._single_task_mode or self._model_task_name: tf.logging.info('Single task mode: %s', self._model_task_name) program_schedule = self._program_schedule_dict[self._model_task_name] else: # Otherwise, sample a task. model_task = self.task_scheduler.Sample(global_step) tf.logging.info('Sampled %s', model_task) program_schedule = self._program_schedule_dict[model_task] done = program_schedule.Run(sess) if done: tf.logging.info('Program schedule told us to stop.') return # TODO(blee): More complex saving rules. Currently, we assume # we save after every task's program schedule execution. # # global_step local variable above is a result of sess.run, not a # tf variable, so when we do save_only_checkpointer.Save(...) here # py_utils.GetGlobalStep() is ahead of it by # (train_executions_per_eval * train_steps_per_loop) # steps ahead already, due to program_schedule.Run(sess). # if not self._ml_perf_log: self.save_only_checkpointer.Save(sess, py_utils.GetGlobalStep())
py
1a5929ad979bbc45f4954c6069e8ec55e60b6b09
import types import sys import os import io from pathlib import Path from urllib.parse import urlparse import logging import asyncio import tarfile from io import BytesIO import mimetypes import functools import ssl import click from girder_client import GirderClient from flask import Flask, send_from_directory, jsonify import aiohttp from async_lru import alru_cache import tenacity class AsyncGirderClient(object): def __init__(self, session, api_url): self._ratelimit_semaphore = asyncio.Semaphore(5) self._api_url = api_url.rstrip('/') self._folder_create_semaphore = asyncio.Semaphore() self._item_create_semaphore = asyncio.Semaphore() self._session = session async def authenticate(self, api_key): params = {'key': api_key} async with self._session.post('%s/api_key/token' % (self._api_url), params=params) as r: r.raise_for_status() auth = await r.json() self._headers = { 'Girder-Token': auth['authToken']['token'] } @tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError), wait=tenacity.wait_exponential(max=10), stop=tenacity.stop_after_attempt(10)) async def post(self, path, headers=None, params=None, raise_for_status=True, **kwargs): if params is not None: params = {k:str(v) for (k,v) in params.items()} if headers is None: headers = self._headers else: headers.update(self._headers) async with self._ratelimit_semaphore: async with self._session.post('%s/%s' % (self._api_url, path), headers=headers, params=params, **kwargs) as r: if raise_for_status: r.raise_for_status() return await r.json() @tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError), wait=tenacity.wait_exponential(max=10), stop=tenacity.stop_after_attempt(10)) async def put(self, path, headers=None, params=None, raise_for_status=True, **kwargs): if params is not None: params = {k:str(v) for (k,v) in params.items()} if headers is None: headers = self._headers else: headers.update(self._headers) async with self._ratelimit_semaphore: async with self._session.put('%s/%s' % (self._api_url, path), headers=headers, params=params, **kwargs) as r: if raise_for_status: r.raise_for_status() return await r.json() @tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError), wait=tenacity.wait_exponential(max=10), stop=tenacity.stop_after_attempt(10)) async def get(self, path, raise_for_status=True, params=None, status=False, **kwargs): if params is not None: params = {k:str(v) for (k,v) in params.items()} async with self._ratelimit_semaphore: async with self._session.get('%s/%s' % (self._api_url, path), headers=self._headers, params=params, **kwargs) as r: if raise_for_status and not status: r.raise_for_status() if status: return (r.status, await r.json()) else: return await r.json() @alru_cache(maxsize=1000) async def create_folder(self, parent_id, parent_type, name): params = { 'parentId': parent_id, 'parentType': parent_type, 'name': name, 'description': '', 'reuseExisting': True } # We need this sempahore to prevent two folders with the same name be # create, this opertion in not atomic in Girder. async with self._folder_create_semaphore: return await self.post('folder', params=params) async def list_item(self, folder, name): params = { 'parentId': folder['_id'], 'name': name } return await self.get('item', params=params) @alru_cache(maxsize=1000) async def create_item(self, folder_id, name): params = { 'folderId': folder_id, 'name': name, 'description': '', 'reuseExisting': True } # We need this sempahore to prevent two items with the same name be # create, this opertion in not atomic in Girder. async with self._item_create_semaphore: return await self.post('item', params=params) async def upload_file(self, item, file_name, bits, size): mime_type, _ = mimetypes.guess_type(file_name) params = { 'parentType': 'item', 'parentId': item['_id'], 'name': file_name, 'size': size, 'mimeType': mime_type } headers = { 'Content-Length': str(size) } headers.update(self._headers) upload = await self.post('file', params=params, headers=headers, data=bits) return upload async def set_metadata(self, resource_type, _id, meta, semaphore=None): # The metadata put operation is not atomic! if semaphore is not None: await semaphore.acquire() try: return await self.put('%s/%s/metadata' % (resource_type, _id), json=meta) finally: if semaphore is not None: semaphore.release() async def get_metadata(self, resource_type, _id): resource = await self.get('%s/%s' % (resource_type, _id)) return resource.get('meta') @alru_cache(maxsize=1000) async def resource_path(self, _id, resource_type): params = { 'type': resource_type } return await self.get('resource/%s/path' % _id, params=params) async def lookup_resource(self, path): params = { 'path': path, 'test': True } (status, json_body) = await self.get('resource/lookup', params=params, status=True) if status == 400: return None else: return json_body async def file_exist(self, item, name): item_path = await self.resource_path(item['_id'], 'item') return await self.lookup_resource('%s/%s' % (item_path, name)) is not None async def ensure_folders(gc, parent, folders): for folder_name in folders: parent = await gc.create_folder(parent['_id'], 'folder', folder_name) return parent async def upload_image(gc, folder, shot_name, run_name, variable, timestep, bits, size, check_exists=False): log = logging.getLogger('adash') image_path = Path(variable['image_name']) image_folders = [shot_name, run_name, variable['group_name']] parent_folder = await ensure_folders(gc, folder, image_folders) name = None for k in ['name', 'variable_name']: name = variable.get(k) if name is not None: break if name is None: raise Exception('Unable to extract variable name.') variable_item = await gc.create_item(parent_folder['_id'], name) image_name = '%s%s' % (str(timestep).zfill(4), image_path.suffix) create = True if check_exists: create = not await gc.file_exist(variable_item, image_name) if create: log.info('Uploading "%s/%s/%s".' % ('/'.join([str(i) for i in image_folders]), name, image_name)) await gc.upload_file(variable_item, image_name, bits, size) @tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError), wait=tenacity.wait_exponential(max=10), stop=tenacity.stop_after_attempt(10)) async def fetch_variables(session, upload_site_url, shot_name, run_name, timestep): async with session.get('%s/shots/%s/%s/%d/variables.json' % (upload_site_url, shot_name, run_name, timestep)) as r: if r.status == 404: return None r.raise_for_status() return await r.json() @tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError), wait=tenacity.wait_exponential(max=10), stop=tenacity.stop_after_attempt(10)) async def fetch_images_archive(session, upload_site_url, shot_name, run_name, timestep): async with session.get('%s/shots/%s/%s/%d/images.tar.gz' % (upload_site_url, shot_name, run_name, timestep)) as r: if r.status == 404: return None r.raise_for_status() return await r.read() async def fetch_images(session, gc, folder, upload_site_url, shot_name, run_name, timestep, metadata_semaphore, check_exists=False): log = logging.getLogger('adash') log.info('Fetching variables.json for timestep: "%d".' % timestep) # Fetch variables.json variables = await fetch_variables(session, upload_site_url, shot_name, run_name, timestep) if variables is None: log.warning('Unable to fetch variables.json. Timestep "%d" is missing.' % timestep) else: log.info('Fetching images.tar.gz for timestep: "%d".' % timestep) buffer = BytesIO(await fetch_images_archive(session, upload_site_url, shot_name, run_name, timestep)) tasks = [] with tarfile.open(fileobj=buffer) as tgz: for v in variables: info = None k = '%s/%s' % (v['group_name'], v['image_name']) try: info = tgz.getmember(k) except KeyError: pass if info is None: raise Exception('Unable to extract image: "%s"' % k) br = tgz.extractfile(info) bits = br.read() tasks.append( asyncio.create_task( upload_image(gc, folder, shot_name, run_name, v, timestep, bits, info.size, check_exists) ) ) # Gather, so we fetch all images for this timestep before moving on to the # next one! await asyncio.gather(*tasks) # Set the current timestep metadata = { 'currentTimestep': timestep } run_folder = await ensure_folders(gc, folder, [shot_name, run_name]) await gc.set_metadata('folder', run_folder['_id'], metadata, metadata_semaphore) # scheduler used to schedule fetch_images request inorder, so we fetch the images # in timestep order. async def fetch_images_scheduler(queue): log = logging.getLogger('adash') while True: try: fetch = await queue.get() log.info(fetch) await fetch queue.task_done() except asyncio.CancelledError: raise except: log.exception('Exception occured fetching images.') @tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError), wait=tenacity.wait_exponential(max=10), stop=tenacity.stop_after_attempt(10)) async def fetch_run_time(session, upload_site_url, shot_name, run_name): run_path = 'shots/%s/%s/time.json' % (shot_name, run_name) async with session.get('%s/%s' % (upload_site_url, run_path), raise_for_status=False) as r: if r.status == 404: return None return await r.json() async def watch_run(session, gc, folder, upload_site_url, shot_name, run_name, username, machine, run_poll_interval): log = logging.getLogger('adash') log.info('Starting to watch run "%s" shot "%s".' % (run_name, shot_name)) fetch_images_queue = asyncio.Queue() metadata_semaphore = asyncio.Semaphore() scheduler = asyncio.create_task( fetch_images_scheduler(fetch_images_queue) ) last_timestep = None metadata = { 'username': username, 'machine': machine } run_folder = await ensure_folders(gc, folder, [shot_name, run_name]) await gc.set_metadata('folder', run_folder['_id'], metadata, metadata_semaphore) while True: # Check to see what the last successfully processed timestep was metadata = await gc.get_metadata('folder', run_folder['_id']) if last_timestep is None: last_timestep = metadata.get('currentTimestep') if last_timestep is not None: log.info('Last timestep processed: "%d"' % last_timestep) else: log.info('No previous timestep have been processed.') last_timestep = 0 # Now see where the simulation upload has got to run_path = 'shots/%s/%s/time.json' % (shot_name, run_name) time = await fetch_run_time(session, upload_site_url, shot_name, run_name) # Wait for time.json to appear if time is None: log.warn('Unable to fetch "%s", waiting for 1 sec.' % run_path) await asyncio.sleep(1) continue new_timestep = time['current'] complete = time.get('complete', False) # Are we done. The run is marked as complete and we have ingested all the # timesteps. if complete and last_timestep == new_timestep: log.info('Run "%s" is complete.' % run_name) await fetch_images_queue.join() scheduler.cancel() break # Did we miss any timesteps? delta = new_timestep - last_timestep # We have missed to timesteps so need to catch up! if delta > 1: # First schedule a fetch of the next timesetp checking if the files # exists ( this is the one that could be partially processed. fetch_images_queue.put_nowait( fetch_images(session, gc, folder, upload_site_url, shot_name, run_name, last_timestep+1, metadata_semaphore, check_exists=True) ) # Then process the rest normally for t in range(last_timestep+2, new_timestep+1): fetch_images_queue.put_nowait( fetch_images(session, gc, folder, upload_site_url, shot_name, run_name, t, metadata_semaphore) ) # We successfully processed the last timestep so just schedule the processing # of the next. elif delta == 1: fetch_images_queue.put_nowait( fetch_images(session, gc, folder, upload_site_url, shot_name, run_name, new_timestep, metadata_semaphore, # If we processing the first timestep we need to check # the existence of the files, as the fetching of this # timestep may have failed before. last_timestep == 0) ) last_timestep = new_timestep await asyncio.sleep(run_poll_interval) @tenacity.retry(retry=tenacity.retry_if_exception_type(aiohttp.client_exceptions.ServerConnectionError), wait=tenacity.wait_exponential(max=10), stop=tenacity.stop_after_attempt(10)) async def fetch_shot_index(session, upload_site_url): async with session.get('%s/shots/index.json' % upload_site_url) as r: if r.status == 404: return None else: r.raise_for_status() return await r.json() async def watch_shots_index(session, gc, folder, upload_site_url, api_url, api_key, shot_poll_interval, run_poll_internval): log = logging.getLogger('adash') runs = set() shot_metadata_semaphore = asyncio.Semaphore() # Get users and machines metadata = await gc.get_metadata('folder', folder['_id']) users = set(metadata.get('users', [])) if metadata is not None else set() machines = set(metadata.get('machines', [])) if metadata is not None else set() while True: log.info('Fetching /shots/index.json') index = await fetch_shot_index(session, upload_site_url) if index is None: # Just wait for index.json to appear log.warn('Unable to fetch "shots/index.json", waiting for 1 sec.') await asyncio.sleep(1) continue for shot in index: username = shot['username'] users.add(username) machine = shot['machine_name'] machines.add(machine) # TODO Update the meta data shot_name = shot['shot_name'] run_name = shot['run_name'] run_key = '%s/%s' % ( shot_name, run_name) if run_key not in runs: asyncio.create_task( watch_run(session, gc, folder, upload_site_url, shot_name, run_name, username, machine, run_poll_internval) ) runs.add(run_key) metadata = { 'machines': list(machines), 'users': list(users) } await gc.set_metadata('folder', folder['_id'], metadata, shot_metadata_semaphore) await asyncio.sleep(shot_poll_interval) async def watch(folder_id, upload_site_url, api_url, api_key, shot_poll_interval, run_poll_internval): def ignore_aiohttp_ssl_eror(loop, aiohttpversion='3.5.4'): """Ignore aiohttp #3535 issue with SSL data after close There appears to be an issue on Python 3.7 and aiohttp SSL that throws a ssl.SSLError fatal error (ssl.SSLError: [SSL: KRB5_S_INIT] application data after close notify (_ssl.c:2609)) after we are already done with the connection. See GitHub issue aio-libs/aiohttp#3535 Given a loop, this sets up a exception handler that ignores this specific exception, but passes everything else on to the previous exception handler this one replaces. If the current aiohttp version is not exactly equal to aiohttpversion nothing is done, assuming that the next version will have this bug fixed. This can be disabled by setting this parameter to None """ if aiohttpversion is not None and aiohttp.__version__ != aiohttpversion: return orig_handler = loop.get_exception_handler() def ignore_ssl_error(loop, context): if context.get('message') == 'SSL error in data received': # validate we have the right exception, transport and protocol exception = context.get('exception') protocol = context.get('protocol') if ( isinstance(exception, ssl.SSLError) and exception.reason == 'KRB5_S_INIT' and isinstance(protocol, asyncio.sslproto.SSLProtocol) and isinstance(protocol._app_protocol, aiohttp.client_proto.ResponseHandler) ): if loop.get_debug(): asyncio.log.logger.debug('Ignoring aiohttp SSL KRB5_S_INIT error') return if orig_handler is not None: orig_handler(loop, context) else: loop.default_exception_handler(context) loop.set_exception_handler(ignore_ssl_error) ignore_aiohttp_ssl_eror(asyncio.get_running_loop()) async with aiohttp.ClientSession(connector=aiohttp.TCPConnector(verify_ssl=False)) as session: gc = AsyncGirderClient(session, api_url) await gc.authenticate(api_key) folder = { '_id': folder_id } await watch_shots_index(session, gc, folder, upload_site_url, api_url, api_key, shot_poll_interval, run_poll_internval) @click.command('watch', help='Watch upload site and ingest data into Girder') @click.option('-f', '--folder-id', help='the folder to ingest into. [default: GIRDER_FOLDER_IR env. variable]', envvar='GIRDER_FOLDER_ID') @click.option('-r', '--upload-site_url', help='the URL to the upload site to watch. [default: UPLOAD_SITE_URL env. variable]', envvar='UPLOAD_SITE_URL',) @click.option('-u', '--api-url', default='http://localhost:8080/api/v1', help='RESTful API URL ' '(e.g https://girder.example.com/api/v1). [default: GIRDER_API_URL env. variable]', envvar='GIRDER_API_URL') @click.option('-k', '--api-key', envvar='GIRDER_API_KEY', help='[default: GIRDER_API_KEY env. variable]') @click.option('-i', '--shot-poll-interval', default=30, type=int, help='shot poll interval (sec)') @click.option('-v', '--run-poll-interval', default=30, type=int, help='run poll interval (sec)') def main(folder_id, upload_site_url, api_url, api_key, shot_poll_interval, run_poll_interval): #gc = GC(api_url=api_url, api_key=api_key) if upload_site_url[-1] == '/': upload_site_url = upload_site_url[:-1] log = logging.getLogger('adash') log.info('Watching: %s' % upload_site_url) asyncio.run( watch(folder_id, upload_site_url, api_url, api_key, shot_poll_interval, run_poll_interval) )
py
1a592c938ee8d15012768a07ea9c4d6e02ef1920
from collections import OrderedDict import six from django.http import Http404 from django.utils.encoding import force_text from rest_framework import status from rest_framework import exceptions, status from rest_framework.compat import set_rollback from rest_framework.exceptions import PermissionDenied from rest_framework.response import Response from django.utils.translation import ugettext_lazy as _ class APIError(Exception): status_code = status.HTTP_500_INTERNAL_SERVER_ERROR default_detail = _('A server error occurred.') default_error_slug = 'internal_error' def __init__(self, detail=None, error_slug=None): if detail is not None: self.detail = force_text(detail) self.error_slug = force_text(error_slug) else: self.detail = force_text(self.default_detail) self.error_slug = force_text(self.default_error_slug) def __str__(self): return self.detail def custom_exception_handler(exc, context): """ Returns the response that should be used for any given exception. By default we handle the REST framework `APIException`, and also Django's built-in `Http404` and `PermissionDenied` exceptions. Any unhandled exceptions may return `None`, which will cause a 500 error to be raised. """ if isinstance(exc, exceptions.APIException): headers = {} if getattr(exc, 'auth_header', None): headers['WWW-Authenticate'] = exc.auth_header if getattr(exc, 'wait', None): headers['Retry-After'] = '%d' % exc.wait if isinstance(exc.detail, (list, dict)): # Concatenate all field and non_field errors for message: message = '' for key in exc.detail: try: if isinstance(exc.detail[key], str): message += exc.detail[key] + ' ' else: for error in exc.detail[key]: # Don't include duplicates in universal error message if error not in message: message += error + ' ' except TypeError: if key == 'non_field_errors': message = exc.detail[key][0] else: message = _('Invalid request.') if message.endswith(' '): message = message[:-1] # remove last space data = OrderedDict([('status', 'error'), ('message', message), ('data', exc.detail)]) else: data = OrderedDict([('status', 'error'), ('message', exc.detail)]) set_rollback() return Response(data, status=exc.status_code, headers=headers) elif isinstance(exc, Http404): msg = _('Not found.') data = {'status': 'error', 'message': six.text_type(msg)} set_rollback() return Response(data, status=status.HTTP_404_NOT_FOUND) elif isinstance(exc, PermissionDenied): msg = _('Permission denied.') data = {'status': 'error', 'message': six.text_type(msg)} set_rollback() return Response(data, status=status.HTTP_403_FORBIDDEN) # Note: Unhandled exceptions will raise a 500 error. return None
py
1a592e2de103578f205bb7b42c154c1f06cec99f
import torch import sys import csv import nestedtensor import utils import torchvision from torch.nn import functional as F import random class DETRNestedTensor(object): def __init__(self, tensors, mask): self.tensors = tensors self.mask = mask def to(self, *args, **kwargs): cast_tensor = self.tensors.to(*args, **kwargs) cast_mask = self.mask.to( *args, **kwargs) if self.mask is not None else None return type(self)(cast_tensor, cast_mask) def decompose(self): return self.tensors, self.mask @classmethod def from_tensor_list(cls, tensor_list): # TODO make this more general if tensor_list[0].ndim == 3: # TODO make it support different-sized images max_size = tuple(max(s) for s in zip(*[img.shape for img in tensor_list])) # min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list])) batch_shape = (len(tensor_list),) + max_size b, c, h, w = batch_shape dtype = tensor_list[0].dtype device = tensor_list[0].device tensor = torch.zeros(batch_shape, dtype=dtype, device=device) mask = torch.ones((b, h, w), dtype=torch.bool, device=device) for img, pad_img, m in zip(tensor_list, tensor, mask): pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img) m[: img.shape[1], :img.shape[2]] = False else: raise ValueError('not supported') return cls(tensor, mask) # Performance tanks hard for lots of small Tensors as expected DEVICE = torch.device('cuda') NDIM = 256 NHEAD = 8 MODEL = torch.nn.MultiheadAttention(NDIM, NHEAD).to(DEVICE).eval() def run_benchmark(bsz, mean_i, mean_j, var, writer): RAND_INTS = [(int(random.gauss(mean_j, var)), int( random.gauss(mean_i, var))) for _ in range(bsz)] src_ = nestedtensor.nested_tensor( [torch.randn(NDIM * i * j).float().reshape(NDIM, i, j) for (i, j) in RAND_INTS], device=DEVICE, dtype=torch.float) src = [] for i, s in enumerate(src_): src.append(i*len(s) + s) detr_nt_src = DETRNestedTensor.from_tensor_list(src) sparsity = int(detr_nt_src.decompose()[1].float().mean().item() * 10) / 10 def gen_t_loop_mha(src): detr_nt_src = DETRNestedTensor.from_tensor_list(src) src, mask = detr_nt_src.decompose() src = src.flatten(2).permute(2, 0, 1).contiguous() mask = mask.flatten(1).contiguous() def te(): MODEL(src, src, src, key_padding_mask=mask, need_weights=False) return te def gen_nt_mha(src): src = nestedtensor.nested_tensor([t.flatten(1).permute( 1, 0) for t in src], device=DEVICE, dtype=torch.float) def nt(): MODEL(src, src, src, need_weights=False) return nt result_t = {**utils.benchmark_fn(gen_t_loop_mha(src), 5.0, cuda=True), "bsz": bsz, "sparsity": sparsity, "var": var, "mean_i": mean_i, "mean_j": mean_j} result_t["numel"] = sum([x.numel() for x in src_]) result_t["numel_div_avg_us"] = result_t["numel"] / result_t["avg_us"] result_t["avg_ns_div_numel"] = result_t["avg_us"] / \ result_t["numel"] * 1000 writer.writerow(result_t) result_nt = {**utils.benchmark_fn(gen_nt_mha(src), 5.0, cuda=True), "bsz": bsz, "sparsity": 0.0, "var": var, "mean_i": mean_i, "mean_j": mean_j} result_nt["numel"] = sum([x.numel() for x in src_]) result_nt["numel_div_avg_us"] = result_nt["numel"] / result_nt["avg_us"] result_nt["avg_ns_div_numel"] = result_nt["avg_us"] / \ result_nt["numel"] * 1000 writer.writerow(result_nt) if __name__ == "__main__": random.seed(1011) torch.manual_seed(1011) writer = csv.DictWriter(sys.stdout, fieldnames=[ "name", "avg_us", "std_us", "runs", "bsz", "sparsity", "var", "mean_i", "mean_j", "numel", "numel_div_avg_us", "avg_ns_div_numel"]) writer.writeheader() for var in [float(i) / 10 for i in range(0, 100, 50)]: for batch_size in [2, 8, 16]: run_benchmark(batch_size, 30, 30, var, writer)
py
1a592f3a6990d0a1ea3435f4a3ede2701c5ad2d9
# -*- coding: utf-8 -*- from ..base import * # noqa class TestAllVarTypes(EngineTestCase): def test_all_var_types(self): start = EmptyStartEvent() act_1 = ServiceActivity(component_code="debug_node") act_1.component.inputs.param_1 = Var(type=Var.SPLICE, value="${constant_1}") act_1.component.inputs.param_2 = Var(type=Var.LAZY, custom_type="upper_case", value="abc") act_1.component.inputs.param_3 = Var(type=Var.PLAIN, value="normal var") end = EmptyEndEvent() start.extend(act_1).extend(end) pipeline_data = Data() pipeline_data.inputs["${constant_1}"] = Var(type=Var.PLAIN, value="value_1") pipeline = self.create_pipeline_and_run(start, data=pipeline_data) self.join_or_fail(pipeline) self.assert_pipeline_finished(pipeline) self.assert_inputs_equals(act_1, "param_1", "value_1") self.assert_inputs_equals(act_1, "param_2", "ABC") self.assert_inputs_equals(act_1, "param_3", "normal var") self.test_pass()
py
1a593099b8a27a845af1f3801a1cd79d267fce35
__________________________________________________________________________________________________ sample 52 ms submission class Solution: def isPowerOfThree(self, n: int) -> bool: return n > 0 == 3**19 % n __________________________________________________________________________________________________ sample 13076 kb submission class Solution: def isPowerOfThree(self, n: int) -> bool: if n <= 0: return False while n > 1: n, res = divmod(n, 3) if res: return False return True __________________________________________________________________________________________________
py
1a5931d31c6580227f0a18e26c5690e33e103290
from src import Env, LoggerFactory as Logger LEYEND = ''' Leyend: R : Robot B : Baby C : Corral # : Obstacule * : Dirt - : Empty cell ''' def main(args, log): robot = 'Reagent' robotA = 'Practical' if args.practical: robot = 'Practical' robotA = 'Reagent' #//HACKME:Monkey patch current_agent #src.agent.current_agent.func = current_agent house = None while True: e = Env(args.rows, args.columns, args.dirtiness, args.obstacules, args.babies, args.time, args.bernoulli, robot) house = e.copy_house() log.info('The generated environment is:') print(e) print(LEYEND) print('If this environment ok to you? Insert REPEAT to re-generate, insert anything else to continue') s = input() if s != 'REPEAT': break mean, mess = e.simulate(args.interactive) if e.fired: log.info('The time is over, task failed for robot') log.info(f'The amount of dirt at the end of this simulation is: {mess}') if args.simulation: mean_first = mean print('\n') eA = Env(args.rows, args.columns, args.dirtiness, args.obstacules, args.babies, args.time, args.bernoulli, robotA) eA.house = house mean, mess = eA.simulate(args.interactive) if eA.fired: log.info('The time is over, task failed for robot') log.info(f'The amount of dirt at the end of this simulation is: {mess}') print('\n') print('*************************************************************') print('Final Results:', 'Results') print('\n') print(f'Task completed by {robot} agent: {e.succeded}') print(f'Task completed by {robotA} agent: {eA.succeded}') print('\n') print(f'{robot} agent fired: {e.fired}') print(f'{robotA} agent fired: {eA.fired}') print('\n') print(f'Percentage of dirt at the end of this simulation of the {robot} agent: {mean_first}') print(f'Percentage of dirt at the end of this simulation of the {robotA} agent: {mean}') print('\n') print(f'Final house env of {robotA} agent') print('\n') print(eA) print('\n') print(f'Final house env of {robot} agent') print('\n') print(e) print('*************************************************************') if __name__ == '__main__': import argparse import os parser = argparse.ArgumentParser(description='Kindergarden simulator') parser.add_argument('-r', '--rows', type=int, default=9, help='number of rows of the house') parser.add_argument('-c', '--columns', type=int, default=8, help='number of columns of the house') parser.add_argument('-d', '--dirtiness', type=int, default=30, help='percentage of dirty cells at the start of the simulation') parser.add_argument('-o', '--obstacules', type=int, default=20, help='percentage of obstacules at the start of the simulation') parser.add_argument('-b', '--babies', type=int, default=5, help='number of babies in the house') parser.add_argument('-t', '--time', type=int, default=5, help='number of turns between changes of the environment') parser.add_argument('-p', '--practical', type=bool, const=True, nargs='?', help='set if you want to simulate Practical agent. Reagent agent is default') parser.add_argument('-P', '--bernoulli', type=float, default=0.5, help='probability of a baby moving in an environment change (0 to 1)') parser.add_argument('-l', '--level', type=str, default='INFO', help='log level') parser.add_argument('-f', '--file', type=bool, const=True, nargs='?', help='set if you want log to a file') parser.add_argument('-i', '--interactive', type=bool, const=True, nargs='?', help='set if you want to see what happens in every turn') parser.add_argument('-s', '--simulation', type=bool, const=True, nargs='?', help='set if you want to simulate all the agents') args = parser.parse_args() if not os.path.exists('./logs'): os.mkdir('./logs/') log = Logger(name='Kindergarden', log=args.file) log.setLevel(args.level) main(args, log)
py
1a5932baa7414b38ee5c7ab8273e1ee5d1bdbab6
import unittest loader = unittest.TestLoader() start_dir = '.' suite = loader.discover(start_dir) runner = unittest.TextTestRunner() runner.run(suite)
py
1a59337196e5b63d0e329fe77f5b612f5f9b879e
# Generated by Django 3.2.6 on 2021-09-16 07:08 from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ ("students", "0003_studentgroup_user"), ] operations = [ migrations.AlterField( model_name="student", name="group", field=models.ForeignKey( on_delete=django.db.models.deletion.CASCADE, related_name="students", to="students.studentgroup", verbose_name="skupina", ), ), migrations.AlterField( model_name="student", name="name", field=models.CharField(max_length=255, verbose_name="ime"), ), migrations.AlterField( model_name="studentgroup", name="name", field=models.CharField(max_length=255, verbose_name="ime"), ), ]
py
1a5933deb42c13e670cc12fd3fd01eacdd6eab35
import os def ui2main(ui_name, file_name = 'main.py', model_name = '*_ui2py.py'): if model_name == '*_ui2py.py': model_name = ui_name.split('.')[0] + '_ui2py.py' if os.path.exists(ui_name): if os.path.exists(model_name): print('由ui直接转化为py格式的文件' + model_name + '已存在') else: print('开始转化ui文件至:' + model_name) # os.system('pyuic5 -o ' + model_name + ' ' + ui_name) os.system('python3 -m PyQt5.uic.pyuic -o ' + model_name + ' ' + ui_name) while True: if os.path.exists(model_name): break if os.path.exists(file_name): print('用于编写功能的py文件(运行此函数)' + file_name + '已存在') else: print('开始生成主函数文件至:' + file_name) model_text = open(model_name,encoding='utf8').read().split('\n') msg = {'model_class':'未识别出', 'button':[], 'action':[], 'combobox':[]} for line in model_text: if 'class ' in line: msg['model_class'] = line.split(' ')[1].split('(')[0] elif 'QtWidgets.QPushButton(' in line: button_name = line.split(' = ')[0] # .replace(' ','') msg['button'].append(button_name) elif '= QtWidgets.QAction(' in line: action_name = line.split(' = ')[0] msg['action'].append(action_name) elif 'QtWidgets.QComboBox(' in line: combobox_name = line.split(' = ')[0] msg['combobox'].append(combobox_name) buttonactive_test = '\n # 激活全部按钮、菜单选项、下拉列表用于测试,实际使用时注释掉\n' button_text = '\n # 事件连接函数\n' buttonfun_text = '\n\n # 按钮\n' for button in msg['button']: buttonactive_test += button + '.setEnabled(True)\n' button_text += button +'.clicked.connect(' + button.replace(' ','') + '_ClickFun)\n' buttonfun_text += ' def ' + button.replace(' ','').replace('self.', '') + '_ClickFun(self):\n print("你按了 " + ' + button.replace(' ','') + '.text() + " 这个按钮")\n\n' actionactive_test = '\n' action_text = '\n' actionfun_text = '\n # 菜单选项\n' for action in msg['action']: actionactive_test += action + '.setEnabled(True)\n' action_text += action + '.triggered.connect(' + action.replace(' ', '') + '_ClickFun)\n' actionfun_text += ' def ' + action.replace(' ', '').replace('self.', '') + '_ClickFun(self):\n print("你按了 " + ' + action.replace( ' ', '') + '.text() + " 这个菜单选项")\n\n' comboboxactive_test = '\n' combobox_text = '\n' comboboxfun_text = '\n # 下拉列表\n' for combobox in msg['combobox']: comboboxactive_test += combobox + '.setEnabled(True)\n' combobox_text += combobox + '.currentIndexChanged.connect(' + combobox.replace(' ', '') + '_ClickFun)\n' comboboxfun_text += ' def ' + combobox.replace(' ', '').replace('self.', '') + '_ClickFun(self):\n print("你将该下拉列表选项变成了 " + ' + combobox.replace( ' ', '') + '.currentText())\n\n' sum_test = buttonactive_test + actionactive_test + comboboxactive_test +\ button_text + action_text + combobox_text +\ buttonfun_text + actionfun_text + comboboxfun_text file_text = open(str(__file__).replace('__init__.py', 'model.txt'), encoding='utf8').read() file_text = file_text.replace('MyFunction', str(os.path.realpath(__file__)).replace('\\', '/').split('/')[-3]) file_text = file_text.replace('模板类', msg['model_class']) file_text = file_text.replace('模板', model_name.split('.')[0]) file_text = file_text.replace('此处是一堆连接', sum_test) open(file_name, 'w+', encoding='utf8').write(file_text) print('完成') else: print('文件' + ui_name + '不存在!程序退出') # if __name__ == '__main__': # CreateWritngFile('main.ui', 'test.py')
py
1a5934868663f78f7e61b0ff08953213de532205
from collections import defaultdict all_ingredients = [] candidates = defaultdict(list) with open('in', 'r') as f: for line in f.readlines(): ingredients, allergens = line.split(' (') ingredients = ingredients.strip().split() allergens = allergens.replace('contains ', '').replace(')', '').strip().split(', ') all_ingredients += ingredients for allergen in allergens: candidates[allergen].append(set(ingredients)) are_allergens = set() for allergen in candidates.keys(): options = set(all_ingredients) for i in candidates[allergen]: options = options.intersection(i) for i in options: are_allergens.add(i) count = 0 for ingredient in all_ingredients: if ingredient not in are_allergens: count += 1 print(count)
py
1a593674f54421fc0d04a623fb1fbf8b83f4e8d2
#standard imports from typing import Tuple,Union #scientific imports import numpy as np from sympy.ntheory import factorint #project imports from data_handler.signal_features import get_time_step def calculate_flicker_amplitude(data:np.ndarray) -> float: """ Computes the flicker amplitude after Bastien et al. (2013) :param data: data consisting of the full lightcurve :return: flicker amplitude """ flicker_time = get_flicker_time(data) t_step = get_time_step(data) elements = data[0].shape box_size = np.round(flicker_time/t_step) bin_count = int(elements / box_size) points_left = elements - box_size * bin_count index_shift,cols = get_index_shift(points_left) mean_array, subtract_array_amplitude = get_flicker_arrays(data, elements, cols, index_shift, box_size, flicker_time) mean_amp = np.mean(mean_array) subtract_array_amplitude = np.unique(subtract_array_amplitude) flic_amplitude = 0 for i in range(0, len(subtract_array_amplitude)): flic_amplitude += (subtract_array_amplitude[i] - mean_amp) ** 2 denominator = float(len(subtract_array_amplitude)) amp_flic = np.sqrt(flic_amplitude / denominator) return amp_flic def flicker_amplitude_to_frequency(flicker_amplitude : float) -> float: """ Converts the flicker amplitude to the first filter frequency according to Kallinger et al. (2016) :param flicker_amplitude: Flicker amplitude calculated according to Bastien et. al (2013) :return: First filter frequency """ return 10 ** (5.187) / (flicker_amplitude ** (1.560)) def get_flicker_time(data : np.ndarray) -> float: """ Returns the flicker time. 2.5 hours for SC, 5 days for LC data :param data: data consisting of the full lightcurve :return: flicker time """ t_step = get_time_step(data) t_step *= 24*60 if t_step < 10: return 2.5/(60*24) #2.5 hours time for SC data else: return 5/24 #5 days for LC data def get_index_shift(points_left : int) -> Tuple[int,int]: """ Depending on how many points are left in the array from the binsize, this method will return the according index_shift for the data as well as the amount of cols whereover these have to be iterated :param points_left: Restpoints after binning :return: index_shift,cols """ index_shift = 0 cols = 1 if points_left > 1: factors=factorint(points_left, multiple=True) if len(factors) > 1: index_shift = factors[0]**factors[1] else: index_shift = factors[0] cols = int(points_left / index_shift + 1) elif points_left == 1: cols = 2 index_shift = 1 return index_shift,cols def get_flicker_arrays(data : np.ndarray, elements : Union[int,Tuple[int,]], cols : int, index_shift : int, box_size : int , filter_time :float) -> Tuple[float,np.ndarray]: """ This method, depending on the indexshift, boxsize and filtertime creates the appropiate arrays, for which the flicker amplitude is calculated. It calculates the mean of every box for the boxsize """ if isinstance(elements,tuple) and len(elements) > 1: raise ValueError("Elements is not allowed to be a tuple longer than 1!") else: elements = elements[0] bin_count = int(elements / box_size) points_left = elements - box_size * bin_count array_mean = np.zeros(cols) for k in range(0,cols): array_rebin = np.zeros(int(elements-points_left)) n_points_bin_array = np.zeros(int(elements-points_left)) i = k * index_shift for j in range(0,bin_count): mean_bin = 0.0 timetime_referenceeference = i count = 1 while i < (int(elements)-1) and (data[0][i] - data[0][timetime_referenceeference])/(3600*24) < filter_time: mean_bin +=data[1][i] if data[1][i] != 0: count +=1 i+=1 mean_bin += data[1][i] if data[1][i] != 0: count +=1 if count > 1: mean_bin /= count-1 array_rebin[timetime_referenceeference - k * index_shift:(i - 1) - k * index_shift] = mean_bin n_points_bin_array[timetime_referenceeference - k * index_shift:(i - 1) - k * index_shift] = count subtract_array_amplitude = data[1][k * index_shift:k * index_shift + len(array_rebin)] - array_rebin subtract_array_amplitude = subtract_array_amplitude[n_points_bin_array >= box_size / 2] array_mean[k] = np.mean(subtract_array_amplitude) return array_mean,subtract_array_amplitude
py
1a59376ac1a23bd2eeacc62a27291cc13b31a164
normiefont = ['a','b','c','d','e','f','g','h','i','j','k','l','m','n','o','p','q','r','s','t','u','v','w','x','y','z'] weebyfont = ['卂','乃','匚','刀','乇','下','厶','卄','工','丁','长','乚','从','𠘨','口','尸','㔿','尺','丂','丅','凵','リ','山','乂','丫','乙'] tantextfont = ['Ꭿ','Ᏸ','Ꮳ','Ꮄ','Ꮛ','Ꮄ','Ꮆ','Ꮒ','i','Ꮰ','Ꮶ','l','m','Ꮑ','Ꮻ','Ꮅ','Ꮔ','ᖇ','Ꭶ','Ꮏ','Ꮜ','Ꮙ','Ꮿ','メ','Ꭹ','Ꮓ'] linetextfont = ['𝔸','𝔹','ℂ','𝔻','𝔼','𝔽','𝔾','ℍ','𝕀','𝕁','𝕂','𝕃','𝕄','ℕ','𝕆','ℙ','ℚ','ℝ','𝕊','𝕋','𝕌','𝕍','𝕎','𝕏','𝕐','ℤ'] boxtextfont = ['🄰','🄱','🄲','🄳','🄴','🄵','🄶','🄷','🄸','🄹','🄺','🄻','🄼','🄽','🄾','🄿','🅀','🅁','🅂','🅃','🅄','🅅','🅆','🅇','🅈','🅉'] bubbletextfont = ['Ⓐ','Ⓑ','Ⓒ','Ⓓ','Ⓔ','Ⓕ','Ⓖ','Ⓗ','Ⓘ','Ⓙ','Ⓚ','Ⓛ','Ⓜ','Ⓝ','Ⓞ','Ⓟ','Ⓠ','Ⓡ','Ⓢ','Ⓣ','Ⓤ','Ⓥ','Ⓦ','Ⓧ','Ⓨ','Ⓩ'] cursivefont = ['𝓪','𝓫','𝓬','𝓭','𝓮','𝓯','𝓰','𝓱','𝓲','𝓳','𝓴','𝓵','𝓶','𝓷','𝓸','𝓹','𝓺','𝓻','𝓼','𝓽','𝓾','𝓿','𝔀','𝔁','𝔂','𝔃' ] nefont = [ '𝕒' ,'𝕓','𝕔','𝕕','𝕖','𝕗','𝕘','𝕙','𝕚','𝕛','𝕜','𝕝','𝕞','𝕟','𝕠','𝕡','𝕢','𝕣','𝕤','𝕥','𝕦','𝕧','𝕨','𝕩','𝕪','𝕫'] @ultroid_cmd(pattern="dof ?(.*)") async def weebify(ult): args = ult.pattern_match.group(1) if not args: get = await ult.get_reply_message() args = get.text if not args: await ult.edit("What I am Supposed to Weebify? Please Give Text Sir") return string = ''.join(args).lower() for normiecharacter in string: if normiecharacter in normiefont: weebycharacter = nefont[normiefont.index(normiecharacter)] string = string.replace(normiecharacter, weebycharacter) await ult.edit(string) @ultroid_cmd(pattern="weeb ?(.*)") async def weebify(ult): args = ult.pattern_match.group(1) if not args: get = await ult.get_reply_message() args = get.text if not args: await ult.edit("What I am Supposed to Weebify? Please Give Text Sir") return string = ''.join(args).lower() for normiecharacter in string: if normiecharacter in normiefont: weebycharacter = weebyfont[normiefont.index(normiecharacter)] string = string.replace(normiecharacter, weebycharacter) await ult.edit(string) @ultroid_cmd(pattern="tantext ?(.*)") async def tantxt(ult): args = ult.pattern_match.group(1) if not args: get = await ult.get_reply_message() args = get.text if not args: await ult.edit("What I am Supposed to tanify? Please Give Text Sir") return string = ''.join(args).lower() for normiecharacter in string: if normiecharacter in normiefont: tanycharacter = tantextfont[normiefont.index(normiecharacter)] string = string.replace(normiecharacter, tanycharacter) await ult.edit(string) @ultroid_cmd(pattern="linetext ?(.*)") async def linetxt(ult): args = ult.pattern_match.group(1) if not args: get = await ult.get_reply_message() args = get.text if not args: await ult.edit("What I am Supposed to linefy? Please Give Text Sir") return string = ''.join(args).lower() for normiecharacter in string: if normiecharacter in normiefont: linecharacter = linetextfont[normiefont.index(normiecharacter)] string = string.replace(normiecharacter, linecharacter) await ult.edit(string) @ultroid_cmd(pattern="boxtext ?(.*)") async def boxtxt(ult): args = ult.pattern_match.group(1) if not args: get = await ult.get_reply_message() args = get.text if not args: await ult.edit("What I am Supposed to boxify? Please Give Text Sir") return string = ''.join(args).lower() for normiecharacter in string: if normiecharacter in normiefont: boxcharacter = boxtextfont[normiefont.index(normiecharacter)] string = string.replace(normiecharacter, boxcharacter) await ult.edit(string) @ultroid_cmd(pattern="bubbletext ?(.*)") async def bubbletxt(ult): args = ult.pattern_match.group(1) if not args: get = await ult.get_reply_message() args = get.text if not args: await ult.edit("What I am Supposed to bubblify? Please Give Text Sir") return string = ''.join(args).lower() for normiecharacter in string: if normiecharacter in normiefont: bubblecharacter = bubbletextfont[normiefont.index(normiecharacter)] string = string.replace(normiecharacter, bubblecharacter) await ult.edit(string) @ultroid_cmd(pattern="cursive ?(.*)") async def cursive(ult): args = ult.pattern_match.group(1) if not args: get = await ult.get_reply_message() args = get.text if not args: await ult.edit("What I am Supposed to write in cursive? Please Give Text Sir") return string = ''.join(args).lower() for normiecharacter in string: if normiecharacter in normiefont: cursivecharacter = cursivefont[normiefont.index(normiecharacter)] string = string.replace(normiecharacter, weebycharacter) await ult.edit(string)
py
1a5938409e2620d4d99ace248946c429af77e7d3
import colorama from colorama import Fore, Style, Back class RidePrinter: COLOR_MAP = { 5: Fore.MAGENTA, 4: Fore.CYAN, 3: Fore.BLUE, 2: Fore.GREEN, 1: Fore.YELLOW, 0: Fore.RED } def __init__(self): colorama.init() self.curr_color_num = 0 def print_rider_location(self, rider): """Print the current webpage of some WikiRider instance""" page_title = rider.html_source.find('title').text.split(' - ')[0] next_color = self.COLOR_MAP[self.curr_color_num] dash_counter = min(rider.depth_counter + 1, 25) self.curr_color_num = (self.curr_color_num + 1) % len(self.COLOR_MAP) print(Style.BRIGHT + Fore.WHITE + (" - " * dash_counter) + page_title + " - " + next_color + rider.next_url + Style.RESET_ALL) def print_end(self): print(Style.BRIGHT + Back.WHITE + Fore.BLACK + "You rode the Wiki!" + Style.RESET_ALL) def print_start(self): print("\n" + Style.BRIGHT + Back.WHITE + Fore.BLACK + "Starting the Track!" + Style.RESET_ALL) def print_banner(self): print(Style.BRIGHT + Fore.WHITE) print(" (_\\") print(" / \\") print(" `== / /\\=,_") print(" _-_- ;--==\\\\ \\\\o") print(" _-_-__ /____//__/__\\") print(" _-_- `(0) (0) ") print("\t-Wiki_Rider" + Style.RESET_ALL) def print_help(self): print(Style.BRIGHT + Fore.WHITE + "\nUsage: " + Fore.YELLOW + "./wikirun.py <starting url> <depth>" + Style.RESET_ALL) def print_invalid_input_error(self): print(Style.BRIGHT + Fore.RED + "\n Depth must be a number! \n Starting URL must be a valid " "WikiPedia URL! \n (You might me missing https:// and special " "Pages aren't Allowed)!" + Style.RESET_ALL) def print_connection_error(self): print(Style.BRIGHT + Fore.RED + 'Cannot connect to WikiPedia.' + Style.RESET_ALL)
py
1a5938546a059e5bc00efad59fa27c6b94eaf1b9
import requests import pprint import json import os from bs4 import BeautifulSoup if os.path.isfile('flipkart.json'): with open('flipkart.json','r')as file: file_data=json.load(file) print(file_data) else: link="https://www.flipkart.com/search?q=mi+all+mobile&sid=tyy%2C4io&as=on&as-show=on&otracker=AS_QueryStore_OrganicAutoSuggest_0_6&otracker1=AS_QueryStore_OrganicAutoSuggest_0_6&as-pos=0&as-type=RECENT&as-searchtext=mi%20all%20" req=requests.get(link) # print(req) page=req.text # print(page) soup=BeautifulSoup(page,'html.parser') # print(soup) main_div=soup.find_all('div',class_="_1UoZlX") # print(main_div) list_for_all_phones=[] list_for_rupess=[] list_for_rating=[] list_for_all_detail=[] dictionary={} dic_list=[] for i in main_div: # print(i) col=(i.find('div',class_='_1-2Iqu row')) # print(col) n=col.find('div',class_='_3wU53n') text=(n.text) list_for_all_phones.append(text) for k in list_for_all_phones: dictionary['mobile_name']=k price=col.find('div',class_='_1vC4OE _2rQ-NK') price_text=(price.text) list_for_rupess.append(price_text) for l in list_for_rupess: dictionary['price']=l rating=col.find('div',class_='hGSR34') rt=(rating.text) list_for_rating.append(rt) for m in list_for_rating: dictionary['rating']=m b=col.find('div',class_='_3ULzGw') c=b.find('ul') for j in c: sp=(j.text).split('\n') list_for_all_detail.append(sp) dictionary['ram']=list_for_all_detail[0] dictionary['Display']=list_for_all_detail[1] dictionary['camera']=list_for_all_detail[2] dictionary['battery']=list_for_all_detail[3] dictionary['processor']=list_for_all_detail[4] dictionary['warranty']=list_for_all_detail[5] # pprint.pprint(dictionary) dic_list.append(dictionary) # pprint.pprint(dic_list) with open('flipkart.json','w')as file: json.dump(dic_list,file) # print(list_for_all_phones) # print(list_for_rupess) # print(list_for_rating) # print(list_for_all_detail)
py
1a5939b1a66db34037f1df93473a07e1495aa258
import glob import json import time import yaml import luigi from luigi.contrib.spark import PySparkTask from py4j.protocol import Py4JJavaError from pyspark import SparkContext from pyspark.sql import DataFrame, SparkSession from pyspark.sql.utils import IllegalArgumentException from pyspark.sql.functions import lit, input_file_name, regexp_extract from pyspark.sql.types import StructType, StructField, StringType from etl import logger from etl.constants import Constants from etl.jobs.util.cleaner import trim_all_str from etl.source_files_conf_reader import read_module from etl.workflow.config import PdcmConfig ROOT_FOLDER = "data/UPDOG" def build_schema_from_cols(columns): schema = [] for column in columns: schema.append(StructField(column, StringType(), True)) return StructType(schema) def select_rows_with_data(df: DataFrame, columns) -> DataFrame: if "Field" in df.columns: df = df.select(columns).where("nvl(field, '') not like '#%'") else: df = df.select(columns) return df def clean_column_names(df: DataFrame): columns = df.columns for column in columns: df = df.withColumnRenamed(column, trim_all_str(column)) return df def read_files(session, path_patterns, schema): start = time.time() df = session.read.option('sep', '\t').option('header', True).option('schema', schema).csv(path_patterns) df = clean_column_names(df) df = select_rows_with_data(df, schema.fieldNames()) datasource_pattern = "{0}\\/([a-zA-Z-]+)(\\/)".format(ROOT_FOLDER.replace("/", "\\/")) df = df.withColumn("_data_source", lit(input_file_name())) df = df.withColumn(Constants.DATA_SOURCE_COLUMN, regexp_extract("_data_source", datasource_pattern, 1)) df = df.drop("_data_source") end = time.time() logger.info( "Read from path {0} count: {1} in {2} seconds".format(path_patterns, df.count(), round(end - start, 4))) return df def read_json(session, json_content): df = session.read.option("multiline", True).json(session.sparkContext.parallelize([json_content])) return df class ReadByModuleAndPathPatterns(PySparkTask): raw_folder_name = luigi.Parameter() path_patterns = luigi.ListParameter() columns_to_read = luigi.ListParameter() data_dir_out = luigi.Parameter() def output(self): return PdcmConfig().get_target( "{0}/{1}/{2}".format(self.data_dir_out, Constants.RAW_DIRECTORY, self.raw_folder_name)) def app_options(self): return [ '|'.join([p for p in self.path_patterns]), ','.join(self.columns_to_read), self.output().path] def main(self, sc: SparkContext, *args): spark = SparkSession(sc) path_patterns = args[0].split('|') columns_to_read = args[1].split(',') output_path = args[2] schema = build_schema_from_cols(columns_to_read) if sc.master == "yarn": hadoop = sc._jvm.org.apache.hadoop fs = hadoop.fs.FileSystem current_fs = fs.get(sc._jsc.hadoopConfiguration()) path_patterns = [path for path in path_patterns if path != "" and current_fs.globStatus(hadoop.fs.Path(path))] try: df = read_files(spark, path_patterns, schema) except (Py4JJavaError, IllegalArgumentException, FileNotFoundError, IOError) as error: no_empty_patterns = list(filter(lambda x: x != '', path_patterns)) if "java.io.FileNotFoundException" in str(error) or len(no_empty_patterns) == 0 or error.__class__ in [FileNotFoundError, IOError]: empty_df = spark.createDataFrame(sc.emptyRDD(), schema) df = empty_df df = df.withColumn(Constants.DATA_SOURCE_COLUMN, lit("")) else: raise error df.write.mode("overwrite").parquet(output_path) def build_path_patterns(data_dir, providers, file_patterns): data_dir_root = "{0}/{1}".format(data_dir, ROOT_FOLDER) paths_patterns = [] for file_pattern in file_patterns: matching_providers = [] for provider in providers: current_file_pattern = str(file_pattern).replace("$provider", provider) if glob.glob("{0}/{1}/{2}".format(data_dir_root, provider, current_file_pattern)) or PdcmConfig().deploy_mode == "cluster": matching_providers.append(provider) if matching_providers: joined_providers_list = ','.join([p for p in matching_providers]) providers_pattern = "{" + joined_providers_list + "}" path_pattern = "{0}/{1}/{2}".format( data_dir_root, providers_pattern, file_pattern.replace("$provider", providers_pattern)) paths_patterns.append(path_pattern) return paths_patterns def build_path_pattern_by_provider(data_dir, provider, file_pattern): data_dir_root = "{0}/{1}".format(data_dir, ROOT_FOLDER) path_pattern = "{0}/{1}/{2}".format(data_dir_root, provider, file_pattern.replace("$provider", provider)) return path_pattern def get_tsv_extraction_task_by_module(data_dir, providers, data_dir_out, module_name): module = read_module(module_name) file_patterns = module["name_patterns"] columns = module["columns"] path_patterns = build_path_patterns(data_dir, list(providers), file_patterns) return ReadByModuleAndPathPatterns(module_name, path_patterns, columns, data_dir_out) def extract_provider_name(path: str): init_index = path.index(ROOT_FOLDER) + len(ROOT_FOLDER) + 1 next_slash = path.index("/", init_index) return path[init_index:next_slash] def get_json_by_yaml(yaml_content): yaml_as_json = yaml.safe_load(yaml_content) yaml_as_json = json.dumps(yaml_as_json) yaml_as_json = yaml_as_json.encode("unicode_escape").decode("utf-8") return yaml_as_json class ReadYamlsByModule(PySparkTask): raw_folder_name = luigi.Parameter() yaml_paths = luigi.ListParameter() columns_to_read = luigi.ListParameter() data_dir_out = luigi.Parameter() def output(self): return PdcmConfig().get_target( "{0}/{1}/{2}".format(self.data_dir_out, Constants.RAW_DIRECTORY, self.raw_folder_name)) def app_options(self): return [ ','.join(self.yaml_paths), ','.join(self.columns_to_read), PdcmConfig().deploy_mode, self.output().path] def main(self, sc, *args): spark = SparkSession(sc) yaml_file_paths = args[0].split(',') columns_to_read = args[1].split(',') deploy_mode = args[2] output_path = args[3] all_json_and_providers = [] if deploy_mode == "cluster": for yaml_file_path in yaml_file_paths: yaml_as_json = sc.wholeTextFiles(yaml_file_path).collect()[0][1] yaml_as_json = get_json_by_yaml(yaml_as_json) json_content_and_provider = (yaml_as_json, extract_provider_name(yaml_file_path)) all_json_and_providers.append(json_content_and_provider) else: for yaml_file_path in yaml_file_paths: with open(yaml_file_path, 'r') as stream: yaml_as_json = get_json_by_yaml(stream) json_content_and_provider = (yaml_as_json, extract_provider_name(yaml_file_path)) all_json_and_providers.append(json_content_and_provider) source_df = spark.createDataFrame(spark.sparkContext.emptyRDD(), build_schema_from_cols(columns_to_read)) source_df = source_df.withColumn(Constants.DATA_SOURCE_COLUMN, lit(None).astype(StringType())) for json_and_provider in all_json_and_providers: json_content = json_and_provider[0] provider = json_and_provider[1] df = read_json(spark, json_content) df = df.select(columns_to_read) df = df.withColumn(Constants.DATA_SOURCE_COLUMN, lit(provider)) source_df = source_df.union(df) source_df.write.mode("overwrite").parquet(output_path) def get_yaml_extraction_task_by_module(data_dir, providers, data_dir_out, module_name): module = read_module(module_name) file_patterns = module["name_patterns"] columns = module["columns"] # There should be only one yaml file by module file_path = str(file_patterns[0]) yaml_paths = [] for provider in providers: yaml_file_path = build_path_pattern_by_provider(data_dir, provider, file_path) yaml_paths.append(yaml_file_path) return ReadYamlsByModule(module_name, yaml_paths, columns, data_dir_out) if __name__ == "__main__": luigi.run()
py
1a5939f3a8eb1d6f658c5d32f5f931d8c730c187
#!/usr/bin/env python3 import sys import warnings import tcod import g import states.mainmenu def main() -> None: screen_width = 720 screen_height = 480 tileset = tcod.tileset.load_tilesheet( "data/cp437-14.png", 32, 8, tcod.tileset.CHARMAP_CP437 ) with tcod.context.new( width=screen_width, height=screen_height, tileset=tileset, title="libtcod tutorial revised", renderer=tcod.RENDERER_SDL2, vsync=True, ) as g.context: g.console = tcod.Console(*g.context.recommended_console_size()) states.mainmenu.MainMenu().loop() if __name__ == "__main__": if not sys.warnoptions: warnings.simplefilter("default") # Show all warnings once by default. main()
py
1a593a39b789117fd7562b6377a7964b5751693d
# -*- coding: utf-8 -*- # This code is part of Qiskit. # # (C) Copyright IBM 2018, 2019. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. from ._inputparser import InputParser __all__ = ['InputParser']
py
1a593c3c4440a54fa10bb266a062e10ffc5122be
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. # pylint: disable=W0401,W0614 from telemetry.page.actions.all_page_actions import * from telemetry.page import page as page_module from telemetry.page import page_set as page_set_module class BypassPage(page_module.Page): def __init__(self, url, page_set): super(BypassPage, self).__init__(url=url, page_set=page_set) self.archive_data_file = '../data/chrome_proxy_bypass.json' class BypassPageSet(page_set_module.PageSet): """ Chrome proxy test sites """ def __init__(self): super(BypassPageSet, self).__init__( archive_data_file='../data/chrome_proxy_bypass.json') urls_list = [ 'http://aws1.mdw.la/bypass/', 'http://aws1.mdw.la/piatek/bypass-demo' ] for url in urls_list: self.AddPage(BypassPage(url, self))
py
1a593cd85aa556e83bcb327827e37ca68f1c2b20
# This file is exec'd from settings.py, so it has access to and can # modify all the variables in settings.py. # If this file is changed in development, the development server will # have to be manually restarted because changes will not be noticed # immediately. DEBUG = False DATABASES = { 'default': { 'ENGINE': 'django.db.backends.mysql', # Django needs to make databases in the test mysql server 'NAME': 'travismep', 'USER': 'root', 'PASSWORD': '', 'HOST': '127.0.0.1', 'OPTIONS': { # In each case, we want strict mode on to catch truncation issues 'init_command': "SET sql_mode='STRICT_TRANS_TABLES'", # Load the socket info from .my.cnf in the travis user 'read_default_file': '~travis/.my.cnf' }, 'PORT': '', 'TEST': { # We also want the test databse to for utf8 and the general # collation to keep case sensitive unicode searches working # as we would expect on production 'CHARSET': 'utf8', 'COLLATION': 'utf8_general_ci', }, }, } SOLR_CONNECTIONS = { 'default': { 'URL': 'http://localhost:8983/solr/', 'COLLECTION': 's-and-co', 'CONFIGSET': 'sandco', 'TEST': { # aggressive commitWithin for test only 'COMMITWITHIN': 750, } } } # required by mezzanine for unit tests ALLOWED_HOSTS = ['*'] # secret key added as a travis build step
py
1a593ee9490e7d594fe409bbaf662a6700fbbd4d
# Generated by the protocol buffer compiler. DO NOT EDIT! # source: music.proto import sys _b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1')) from google.protobuf import descriptor as _descriptor from google.protobuf import message as _message from google.protobuf import reflection as _reflection from google.protobuf import symbol_database as _symbol_database from google.protobuf import descriptor_pb2 # @@protoc_insertion_point(imports) _sym_db = _symbol_database.Default() DESCRIPTOR = _descriptor.FileDescriptor( name='music.proto', package='tensorflow.magenta', syntax='proto3', serialized_pb=_b('\n\x0bmusic.proto\x12\x12tensorflow.magenta\"\xd3\x16\n\x0cNoteSequence\x12\n\n\x02id\x18\x01 \x01(\t\x12\x10\n\x08\x66ilename\x18\x02 \x01(\t\x12\x17\n\x0f\x63ollection_name\x18\x03 \x01(\t\x12\x19\n\x11ticks_per_quarter\x18\x04 \x01(\x05\x12G\n\x0ftime_signatures\x18\x05 \x03(\x0b\x32..tensorflow.magenta.NoteSequence.TimeSignature\x12\x45\n\x0ekey_signatures\x18\x06 \x03(\x0b\x32-.tensorflow.magenta.NoteSequence.KeySignature\x12\x36\n\x06tempos\x18\x07 \x03(\x0b\x32&.tensorflow.magenta.NoteSequence.Tempo\x12\x34\n\x05notes\x18\x08 \x03(\x0b\x32%.tensorflow.magenta.NoteSequence.Note\x12\x12\n\ntotal_time\x18\t \x01(\x01\x12?\n\x0bpitch_bends\x18\n \x03(\x0b\x32*.tensorflow.magenta.NoteSequence.PitchBend\x12G\n\x0f\x63ontrol_changes\x18\x0b \x03(\x0b\x32..tensorflow.magenta.NoteSequence.ControlChange\x12=\n\npart_infos\x18\x0c \x03(\x0b\x32).tensorflow.magenta.NoteSequence.PartInfo\x12@\n\x0bsource_info\x18\r \x01(\x0b\x32+.tensorflow.magenta.NoteSequence.SourceInfo\x12I\n\x10text_annotations\x18\x0e \x03(\x0b\x32/.tensorflow.magenta.NoteSequence.TextAnnotation\x1a\xf9\x01\n\x04Note\x12\r\n\x05pitch\x18\x01 \x01(\x05\x12>\n\npitch_name\x18\x0b \x01(\x0e\x32*.tensorflow.magenta.NoteSequence.PitchName\x12\x10\n\x08velocity\x18\x02 \x01(\x05\x12\x12\n\nstart_time\x18\x03 \x01(\x01\x12\x10\n\x08\x65nd_time\x18\x04 \x01(\x01\x12\x11\n\tnumerator\x18\x05 \x01(\x05\x12\x13\n\x0b\x64\x65nominator\x18\x06 \x01(\x05\x12\x12\n\ninstrument\x18\x07 \x01(\x05\x12\x0f\n\x07program\x18\x08 \x01(\x05\x12\x0f\n\x07is_drum\x18\t \x01(\x08\x12\x0c\n\x04part\x18\n \x01(\x05\x1a\x45\n\rTimeSignature\x12\x0c\n\x04time\x18\x01 \x01(\x01\x12\x11\n\tnumerator\x18\x02 \x01(\x05\x12\x13\n\x0b\x64\x65nominator\x18\x03 \x01(\x05\x1a\x89\x03\n\x0cKeySignature\x12\x0c\n\x04time\x18\x01 \x01(\x01\x12>\n\x03key\x18\x02 \x01(\x0e\x32\x31.tensorflow.magenta.NoteSequence.KeySignature.Key\x12@\n\x04mode\x18\x03 \x01(\x0e\x32\x32.tensorflow.magenta.NoteSequence.KeySignature.Mode\"\xb7\x01\n\x03Key\x12\x05\n\x01\x43\x10\x00\x12\x0b\n\x07\x43_SHARP\x10\x01\x12\n\n\x06\x44_FLAT\x10\x01\x12\x05\n\x01\x44\x10\x02\x12\x0b\n\x07\x44_SHARP\x10\x03\x12\n\n\x06\x45_FLAT\x10\x03\x12\x05\n\x01\x45\x10\x04\x12\x05\n\x01\x46\x10\x05\x12\x0b\n\x07\x46_SHARP\x10\x06\x12\n\n\x06G_FLAT\x10\x06\x12\x05\n\x01G\x10\x07\x12\x0b\n\x07G_SHARP\x10\x08\x12\n\n\x06\x41_FLAT\x10\x08\x12\x05\n\x01\x41\x10\t\x12\x0b\n\x07\x41_SHARP\x10\n\x12\n\n\x06\x42_FLAT\x10\n\x12\x05\n\x01\x42\x10\x0b\x1a\x02\x10\x01\"/\n\x04Mode\x12\t\n\x05MAJOR\x10\x00\x12\t\n\x05MINOR\x10\x01\x12\x11\n\rNOT_SPECIFIED\x10\x02\x1a\"\n\x05Tempo\x12\x0c\n\x04time\x18\x01 \x01(\x01\x12\x0b\n\x03qpm\x18\x02 \x01(\x01\x1a]\n\tPitchBend\x12\x0c\n\x04time\x18\x01 \x01(\x01\x12\x0c\n\x04\x62\x65nd\x18\x02 \x01(\x05\x12\x12\n\ninstrument\x18\x03 \x01(\x05\x12\x0f\n\x07program\x18\x04 \x01(\x05\x12\x0f\n\x07is_drum\x18\x05 \x01(\x08\x1a\x82\x01\n\rControlChange\x12\x0c\n\x04time\x18\x01 \x01(\x01\x12\x16\n\x0e\x63ontrol_number\x18\x02 \x01(\x05\x12\x15\n\rcontrol_value\x18\x03 \x01(\x05\x12\x12\n\ninstrument\x18\x04 \x01(\x05\x12\x0f\n\x07program\x18\x05 \x01(\x05\x12\x0f\n\x07is_drum\x18\x06 \x01(\x08\x1a&\n\x08PartInfo\x12\x0c\n\x04part\x18\x01 \x01(\x05\x12\x0c\n\x04name\x18\x02 \x01(\t\x1a\xc6\x03\n\nSourceInfo\x12K\n\x0bsource_type\x18\x01 \x01(\x0e\x32\x36.tensorflow.magenta.NoteSequence.SourceInfo.SourceType\x12O\n\rencoding_type\x18\x02 \x01(\x0e\x32\x38.tensorflow.magenta.NoteSequence.SourceInfo.EncodingType\x12\x42\n\x06parser\x18\x03 \x01(\x0e\x32\x32.tensorflow.magenta.NoteSequence.SourceInfo.Parser\"M\n\nSourceType\x12\x17\n\x13UNKNOWN_SOURCE_TYPE\x10\x00\x12\x0f\n\x0bSCORE_BASED\x10\x01\x12\x15\n\x11PERFORMANCE_BASED\x10\x02\"K\n\x0c\x45ncodingType\x12\x19\n\x15UNKNOWN_ENCODING_TYPE\x10\x00\x12\r\n\tMUSIC_XML\x10\x01\x12\x07\n\x03\x41\x42\x43\x10\x02\x12\x08\n\x04MIDI\x10\x03\":\n\x06Parser\x12\x12\n\x0eUNKNOWN_PARSER\x10\x00\x12\x0b\n\x07MUSIC21\x10\x01\x12\x0f\n\x0bPRETTY_MIDI\x10\x02\x1a\xbe\x01\n\x0eTextAnnotation\x12\x0c\n\x04time\x18\x01 \x01(\x01\x12\x0c\n\x04text\x18\x02 \x01(\t\x12[\n\x0f\x61nnotation_type\x18\x03 \x01(\x0e\x32\x42.tensorflow.magenta.NoteSequence.TextAnnotation.TextAnnotationType\"3\n\x12TextAnnotationType\x12\x0b\n\x07UNKNOWN\x10\x00\x12\x10\n\x0c\x43HORD_SYMBOL\x10\x01\"\xff\x03\n\tPitchName\x12\x16\n\x12UNKNOWN_PITCH_NAME\x10\x00\x12\x0f\n\x0b\x46_FLAT_FLAT\x10\x01\x12\x0f\n\x0b\x43_FLAT_FLAT\x10\x02\x12\x0f\n\x0bG_FLAT_FLAT\x10\x03\x12\x0f\n\x0b\x44_FLAT_FLAT\x10\x04\x12\x0f\n\x0b\x41_FLAT_FLAT\x10\x05\x12\x0f\n\x0b\x45_FLAT_FLAT\x10\x06\x12\x0f\n\x0b\x42_FLAT_FLAT\x10\x07\x12\n\n\x06\x46_FLAT\x10\x08\x12\n\n\x06\x43_FLAT\x10\t\x12\n\n\x06G_FLAT\x10\n\x12\n\n\x06\x44_FLAT\x10\x0b\x12\n\n\x06\x41_FLAT\x10\x0c\x12\n\n\x06\x45_FLAT\x10\r\x12\n\n\x06\x42_FLAT\x10\x0e\x12\x05\n\x01\x46\x10\x0f\x12\x05\n\x01\x43\x10\x10\x12\x05\n\x01G\x10\x11\x12\x05\n\x01\x44\x10\x12\x12\x05\n\x01\x41\x10\x13\x12\x05\n\x01\x45\x10\x14\x12\x05\n\x01\x42\x10\x15\x12\x0b\n\x07\x46_SHARP\x10\x16\x12\x0b\n\x07\x43_SHARP\x10\x17\x12\x0b\n\x07G_SHARP\x10\x18\x12\x0b\n\x07\x44_SHARP\x10\x19\x12\x0b\n\x07\x41_SHARP\x10\x1a\x12\x0b\n\x07\x45_SHARP\x10\x1b\x12\x0b\n\x07\x42_SHARP\x10\x1c\x12\x11\n\rF_SHARP_SHARP\x10\x1d\x12\x11\n\rC_SHARP_SHARP\x10\x1e\x12\x11\n\rG_SHARP_SHARP\x10\x1f\x12\x11\n\rD_SHARP_SHARP\x10 \x12\x11\n\rA_SHARP_SHARP\x10!\x12\x11\n\rE_SHARP_SHARP\x10\"\x12\x11\n\rB_SHARP_SHARP\x10#b\x06proto3') ) _sym_db.RegisterFileDescriptor(DESCRIPTOR) _NOTESEQUENCE_KEYSIGNATURE_KEY = _descriptor.EnumDescriptor( name='Key', full_name='tensorflow.magenta.NoteSequence.KeySignature.Key', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='C', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='C_SHARP', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='D_FLAT', index=2, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='D', index=3, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='D_SHARP', index=4, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='E_FLAT', index=5, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='E', index=6, number=4, options=None, type=None), _descriptor.EnumValueDescriptor( name='F', index=7, number=5, options=None, type=None), _descriptor.EnumValueDescriptor( name='F_SHARP', index=8, number=6, options=None, type=None), _descriptor.EnumValueDescriptor( name='G_FLAT', index=9, number=6, options=None, type=None), _descriptor.EnumValueDescriptor( name='G', index=10, number=7, options=None, type=None), _descriptor.EnumValueDescriptor( name='G_SHARP', index=11, number=8, options=None, type=None), _descriptor.EnumValueDescriptor( name='A_FLAT', index=12, number=8, options=None, type=None), _descriptor.EnumValueDescriptor( name='A', index=13, number=9, options=None, type=None), _descriptor.EnumValueDescriptor( name='A_SHARP', index=14, number=10, options=None, type=None), _descriptor.EnumValueDescriptor( name='B_FLAT', index=15, number=10, options=None, type=None), _descriptor.EnumValueDescriptor( name='B', index=16, number=11, options=None, type=None), ], containing_type=None, options=_descriptor._ParseOptions(descriptor_pb2.EnumOptions(), _b('\020\001')), serialized_start=1235, serialized_end=1418, ) _sym_db.RegisterEnumDescriptor(_NOTESEQUENCE_KEYSIGNATURE_KEY) _NOTESEQUENCE_KEYSIGNATURE_MODE = _descriptor.EnumDescriptor( name='Mode', full_name='tensorflow.magenta.NoteSequence.KeySignature.Mode', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='MAJOR', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='MINOR', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='NOT_SPECIFIED', index=2, number=2, options=None, type=None), ], containing_type=None, options=None, serialized_start=1420, serialized_end=1467, ) _sym_db.RegisterEnumDescriptor(_NOTESEQUENCE_KEYSIGNATURE_MODE) _NOTESEQUENCE_SOURCEINFO_SOURCETYPE = _descriptor.EnumDescriptor( name='SourceType', full_name='tensorflow.magenta.NoteSequence.SourceInfo.SourceType', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNKNOWN_SOURCE_TYPE', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='SCORE_BASED', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='PERFORMANCE_BASED', index=2, number=2, options=None, type=None), ], containing_type=None, options=None, serialized_start=2014, serialized_end=2091, ) _sym_db.RegisterEnumDescriptor(_NOTESEQUENCE_SOURCEINFO_SOURCETYPE) _NOTESEQUENCE_SOURCEINFO_ENCODINGTYPE = _descriptor.EnumDescriptor( name='EncodingType', full_name='tensorflow.magenta.NoteSequence.SourceInfo.EncodingType', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNKNOWN_ENCODING_TYPE', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='MUSIC_XML', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='ABC', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='MIDI', index=3, number=3, options=None, type=None), ], containing_type=None, options=None, serialized_start=2093, serialized_end=2168, ) _sym_db.RegisterEnumDescriptor(_NOTESEQUENCE_SOURCEINFO_ENCODINGTYPE) _NOTESEQUENCE_SOURCEINFO_PARSER = _descriptor.EnumDescriptor( name='Parser', full_name='tensorflow.magenta.NoteSequence.SourceInfo.Parser', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNKNOWN_PARSER', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='MUSIC21', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='PRETTY_MIDI', index=2, number=2, options=None, type=None), ], containing_type=None, options=None, serialized_start=2170, serialized_end=2228, ) _sym_db.RegisterEnumDescriptor(_NOTESEQUENCE_SOURCEINFO_PARSER) _NOTESEQUENCE_TEXTANNOTATION_TEXTANNOTATIONTYPE = _descriptor.EnumDescriptor( name='TextAnnotationType', full_name='tensorflow.magenta.NoteSequence.TextAnnotation.TextAnnotationType', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNKNOWN', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='CHORD_SYMBOL', index=1, number=1, options=None, type=None), ], containing_type=None, options=None, serialized_start=2370, serialized_end=2421, ) _sym_db.RegisterEnumDescriptor(_NOTESEQUENCE_TEXTANNOTATION_TEXTANNOTATIONTYPE) _NOTESEQUENCE_PITCHNAME = _descriptor.EnumDescriptor( name='PitchName', full_name='tensorflow.magenta.NoteSequence.PitchName', filename=None, file=DESCRIPTOR, values=[ _descriptor.EnumValueDescriptor( name='UNKNOWN_PITCH_NAME', index=0, number=0, options=None, type=None), _descriptor.EnumValueDescriptor( name='F_FLAT_FLAT', index=1, number=1, options=None, type=None), _descriptor.EnumValueDescriptor( name='C_FLAT_FLAT', index=2, number=2, options=None, type=None), _descriptor.EnumValueDescriptor( name='G_FLAT_FLAT', index=3, number=3, options=None, type=None), _descriptor.EnumValueDescriptor( name='D_FLAT_FLAT', index=4, number=4, options=None, type=None), _descriptor.EnumValueDescriptor( name='A_FLAT_FLAT', index=5, number=5, options=None, type=None), _descriptor.EnumValueDescriptor( name='E_FLAT_FLAT', index=6, number=6, options=None, type=None), _descriptor.EnumValueDescriptor( name='B_FLAT_FLAT', index=7, number=7, options=None, type=None), _descriptor.EnumValueDescriptor( name='F_FLAT', index=8, number=8, options=None, type=None), _descriptor.EnumValueDescriptor( name='C_FLAT', index=9, number=9, options=None, type=None), _descriptor.EnumValueDescriptor( name='G_FLAT', index=10, number=10, options=None, type=None), _descriptor.EnumValueDescriptor( name='D_FLAT', index=11, number=11, options=None, type=None), _descriptor.EnumValueDescriptor( name='A_FLAT', index=12, number=12, options=None, type=None), _descriptor.EnumValueDescriptor( name='E_FLAT', index=13, number=13, options=None, type=None), _descriptor.EnumValueDescriptor( name='B_FLAT', index=14, number=14, options=None, type=None), _descriptor.EnumValueDescriptor( name='F', index=15, number=15, options=None, type=None), _descriptor.EnumValueDescriptor( name='C', index=16, number=16, options=None, type=None), _descriptor.EnumValueDescriptor( name='G', index=17, number=17, options=None, type=None), _descriptor.EnumValueDescriptor( name='D', index=18, number=18, options=None, type=None), _descriptor.EnumValueDescriptor( name='A', index=19, number=19, options=None, type=None), _descriptor.EnumValueDescriptor( name='E', index=20, number=20, options=None, type=None), _descriptor.EnumValueDescriptor( name='B', index=21, number=21, options=None, type=None), _descriptor.EnumValueDescriptor( name='F_SHARP', index=22, number=22, options=None, type=None), _descriptor.EnumValueDescriptor( name='C_SHARP', index=23, number=23, options=None, type=None), _descriptor.EnumValueDescriptor( name='G_SHARP', index=24, number=24, options=None, type=None), _descriptor.EnumValueDescriptor( name='D_SHARP', index=25, number=25, options=None, type=None), _descriptor.EnumValueDescriptor( name='A_SHARP', index=26, number=26, options=None, type=None), _descriptor.EnumValueDescriptor( name='E_SHARP', index=27, number=27, options=None, type=None), _descriptor.EnumValueDescriptor( name='B_SHARP', index=28, number=28, options=None, type=None), _descriptor.EnumValueDescriptor( name='F_SHARP_SHARP', index=29, number=29, options=None, type=None), _descriptor.EnumValueDescriptor( name='C_SHARP_SHARP', index=30, number=30, options=None, type=None), _descriptor.EnumValueDescriptor( name='G_SHARP_SHARP', index=31, number=31, options=None, type=None), _descriptor.EnumValueDescriptor( name='D_SHARP_SHARP', index=32, number=32, options=None, type=None), _descriptor.EnumValueDescriptor( name='A_SHARP_SHARP', index=33, number=33, options=None, type=None), _descriptor.EnumValueDescriptor( name='E_SHARP_SHARP', index=34, number=34, options=None, type=None), _descriptor.EnumValueDescriptor( name='B_SHARP_SHARP', index=35, number=35, options=None, type=None), ], containing_type=None, options=None, serialized_start=2424, serialized_end=2935, ) _sym_db.RegisterEnumDescriptor(_NOTESEQUENCE_PITCHNAME) _NOTESEQUENCE_NOTE = _descriptor.Descriptor( name='Note', full_name='tensorflow.magenta.NoteSequence.Note', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='pitch', full_name='tensorflow.magenta.NoteSequence.Note.pitch', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='pitch_name', full_name='tensorflow.magenta.NoteSequence.Note.pitch_name', index=1, number=11, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='velocity', full_name='tensorflow.magenta.NoteSequence.Note.velocity', index=2, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='start_time', full_name='tensorflow.magenta.NoteSequence.Note.start_time', index=3, number=3, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='end_time', full_name='tensorflow.magenta.NoteSequence.Note.end_time', index=4, number=4, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='numerator', full_name='tensorflow.magenta.NoteSequence.Note.numerator', index=5, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='denominator', full_name='tensorflow.magenta.NoteSequence.Note.denominator', index=6, number=6, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='instrument', full_name='tensorflow.magenta.NoteSequence.Note.instrument', index=7, number=7, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='program', full_name='tensorflow.magenta.NoteSequence.Note.program', index=8, number=8, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='is_drum', full_name='tensorflow.magenta.NoteSequence.Note.is_drum', index=9, number=9, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='part', full_name='tensorflow.magenta.NoteSequence.Note.part', index=10, number=10, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=751, serialized_end=1000, ) _NOTESEQUENCE_TIMESIGNATURE = _descriptor.Descriptor( name='TimeSignature', full_name='tensorflow.magenta.NoteSequence.TimeSignature', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='time', full_name='tensorflow.magenta.NoteSequence.TimeSignature.time', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='numerator', full_name='tensorflow.magenta.NoteSequence.TimeSignature.numerator', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='denominator', full_name='tensorflow.magenta.NoteSequence.TimeSignature.denominator', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1002, serialized_end=1071, ) _NOTESEQUENCE_KEYSIGNATURE = _descriptor.Descriptor( name='KeySignature', full_name='tensorflow.magenta.NoteSequence.KeySignature', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='time', full_name='tensorflow.magenta.NoteSequence.KeySignature.time', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='key', full_name='tensorflow.magenta.NoteSequence.KeySignature.key', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='mode', full_name='tensorflow.magenta.NoteSequence.KeySignature.mode', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _NOTESEQUENCE_KEYSIGNATURE_KEY, _NOTESEQUENCE_KEYSIGNATURE_MODE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1074, serialized_end=1467, ) _NOTESEQUENCE_TEMPO = _descriptor.Descriptor( name='Tempo', full_name='tensorflow.magenta.NoteSequence.Tempo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='time', full_name='tensorflow.magenta.NoteSequence.Tempo.time', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='qpm', full_name='tensorflow.magenta.NoteSequence.Tempo.qpm', index=1, number=2, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1469, serialized_end=1503, ) _NOTESEQUENCE_PITCHBEND = _descriptor.Descriptor( name='PitchBend', full_name='tensorflow.magenta.NoteSequence.PitchBend', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='time', full_name='tensorflow.magenta.NoteSequence.PitchBend.time', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='bend', full_name='tensorflow.magenta.NoteSequence.PitchBend.bend', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='instrument', full_name='tensorflow.magenta.NoteSequence.PitchBend.instrument', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='program', full_name='tensorflow.magenta.NoteSequence.PitchBend.program', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='is_drum', full_name='tensorflow.magenta.NoteSequence.PitchBend.is_drum', index=4, number=5, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1505, serialized_end=1598, ) _NOTESEQUENCE_CONTROLCHANGE = _descriptor.Descriptor( name='ControlChange', full_name='tensorflow.magenta.NoteSequence.ControlChange', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='time', full_name='tensorflow.magenta.NoteSequence.ControlChange.time', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='control_number', full_name='tensorflow.magenta.NoteSequence.ControlChange.control_number', index=1, number=2, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='control_value', full_name='tensorflow.magenta.NoteSequence.ControlChange.control_value', index=2, number=3, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='instrument', full_name='tensorflow.magenta.NoteSequence.ControlChange.instrument', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='program', full_name='tensorflow.magenta.NoteSequence.ControlChange.program', index=4, number=5, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='is_drum', full_name='tensorflow.magenta.NoteSequence.ControlChange.is_drum', index=5, number=6, type=8, cpp_type=7, label=1, has_default_value=False, default_value=False, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1601, serialized_end=1731, ) _NOTESEQUENCE_PARTINFO = _descriptor.Descriptor( name='PartInfo', full_name='tensorflow.magenta.NoteSequence.PartInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='part', full_name='tensorflow.magenta.NoteSequence.PartInfo.part', index=0, number=1, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='name', full_name='tensorflow.magenta.NoteSequence.PartInfo.name', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1733, serialized_end=1771, ) _NOTESEQUENCE_SOURCEINFO = _descriptor.Descriptor( name='SourceInfo', full_name='tensorflow.magenta.NoteSequence.SourceInfo', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='source_type', full_name='tensorflow.magenta.NoteSequence.SourceInfo.source_type', index=0, number=1, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='encoding_type', full_name='tensorflow.magenta.NoteSequence.SourceInfo.encoding_type', index=1, number=2, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='parser', full_name='tensorflow.magenta.NoteSequence.SourceInfo.parser', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _NOTESEQUENCE_SOURCEINFO_SOURCETYPE, _NOTESEQUENCE_SOURCEINFO_ENCODINGTYPE, _NOTESEQUENCE_SOURCEINFO_PARSER, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=1774, serialized_end=2228, ) _NOTESEQUENCE_TEXTANNOTATION = _descriptor.Descriptor( name='TextAnnotation', full_name='tensorflow.magenta.NoteSequence.TextAnnotation', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='time', full_name='tensorflow.magenta.NoteSequence.TextAnnotation.time', index=0, number=1, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='text', full_name='tensorflow.magenta.NoteSequence.TextAnnotation.text', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='annotation_type', full_name='tensorflow.magenta.NoteSequence.TextAnnotation.annotation_type', index=2, number=3, type=14, cpp_type=8, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[], enum_types=[ _NOTESEQUENCE_TEXTANNOTATION_TEXTANNOTATIONTYPE, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=2231, serialized_end=2421, ) _NOTESEQUENCE = _descriptor.Descriptor( name='NoteSequence', full_name='tensorflow.magenta.NoteSequence', filename=None, file=DESCRIPTOR, containing_type=None, fields=[ _descriptor.FieldDescriptor( name='id', full_name='tensorflow.magenta.NoteSequence.id', index=0, number=1, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='filename', full_name='tensorflow.magenta.NoteSequence.filename', index=1, number=2, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='collection_name', full_name='tensorflow.magenta.NoteSequence.collection_name', index=2, number=3, type=9, cpp_type=9, label=1, has_default_value=False, default_value=_b("").decode('utf-8'), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='ticks_per_quarter', full_name='tensorflow.magenta.NoteSequence.ticks_per_quarter', index=3, number=4, type=5, cpp_type=1, label=1, has_default_value=False, default_value=0, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='time_signatures', full_name='tensorflow.magenta.NoteSequence.time_signatures', index=4, number=5, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='key_signatures', full_name='tensorflow.magenta.NoteSequence.key_signatures', index=5, number=6, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='tempos', full_name='tensorflow.magenta.NoteSequence.tempos', index=6, number=7, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='notes', full_name='tensorflow.magenta.NoteSequence.notes', index=7, number=8, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='total_time', full_name='tensorflow.magenta.NoteSequence.total_time', index=8, number=9, type=1, cpp_type=5, label=1, has_default_value=False, default_value=float(0), message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='pitch_bends', full_name='tensorflow.magenta.NoteSequence.pitch_bends', index=9, number=10, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='control_changes', full_name='tensorflow.magenta.NoteSequence.control_changes', index=10, number=11, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='part_infos', full_name='tensorflow.magenta.NoteSequence.part_infos', index=11, number=12, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='source_info', full_name='tensorflow.magenta.NoteSequence.source_info', index=12, number=13, type=11, cpp_type=10, label=1, has_default_value=False, default_value=None, message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), _descriptor.FieldDescriptor( name='text_annotations', full_name='tensorflow.magenta.NoteSequence.text_annotations', index=13, number=14, type=11, cpp_type=10, label=3, has_default_value=False, default_value=[], message_type=None, enum_type=None, containing_type=None, is_extension=False, extension_scope=None, options=None), ], extensions=[ ], nested_types=[_NOTESEQUENCE_NOTE, _NOTESEQUENCE_TIMESIGNATURE, _NOTESEQUENCE_KEYSIGNATURE, _NOTESEQUENCE_TEMPO, _NOTESEQUENCE_PITCHBEND, _NOTESEQUENCE_CONTROLCHANGE, _NOTESEQUENCE_PARTINFO, _NOTESEQUENCE_SOURCEINFO, _NOTESEQUENCE_TEXTANNOTATION, ], enum_types=[ _NOTESEQUENCE_PITCHNAME, ], options=None, is_extendable=False, syntax='proto3', extension_ranges=[], oneofs=[ ], serialized_start=36, serialized_end=2935, ) _NOTESEQUENCE_NOTE.fields_by_name['pitch_name'].enum_type = _NOTESEQUENCE_PITCHNAME _NOTESEQUENCE_NOTE.containing_type = _NOTESEQUENCE _NOTESEQUENCE_TIMESIGNATURE.containing_type = _NOTESEQUENCE _NOTESEQUENCE_KEYSIGNATURE.fields_by_name['key'].enum_type = _NOTESEQUENCE_KEYSIGNATURE_KEY _NOTESEQUENCE_KEYSIGNATURE.fields_by_name['mode'].enum_type = _NOTESEQUENCE_KEYSIGNATURE_MODE _NOTESEQUENCE_KEYSIGNATURE.containing_type = _NOTESEQUENCE _NOTESEQUENCE_KEYSIGNATURE_KEY.containing_type = _NOTESEQUENCE_KEYSIGNATURE _NOTESEQUENCE_KEYSIGNATURE_MODE.containing_type = _NOTESEQUENCE_KEYSIGNATURE _NOTESEQUENCE_TEMPO.containing_type = _NOTESEQUENCE _NOTESEQUENCE_PITCHBEND.containing_type = _NOTESEQUENCE _NOTESEQUENCE_CONTROLCHANGE.containing_type = _NOTESEQUENCE _NOTESEQUENCE_PARTINFO.containing_type = _NOTESEQUENCE _NOTESEQUENCE_SOURCEINFO.fields_by_name['source_type'].enum_type = _NOTESEQUENCE_SOURCEINFO_SOURCETYPE _NOTESEQUENCE_SOURCEINFO.fields_by_name['encoding_type'].enum_type = _NOTESEQUENCE_SOURCEINFO_ENCODINGTYPE _NOTESEQUENCE_SOURCEINFO.fields_by_name['parser'].enum_type = _NOTESEQUENCE_SOURCEINFO_PARSER _NOTESEQUENCE_SOURCEINFO.containing_type = _NOTESEQUENCE _NOTESEQUENCE_SOURCEINFO_SOURCETYPE.containing_type = _NOTESEQUENCE_SOURCEINFO _NOTESEQUENCE_SOURCEINFO_ENCODINGTYPE.containing_type = _NOTESEQUENCE_SOURCEINFO _NOTESEQUENCE_SOURCEINFO_PARSER.containing_type = _NOTESEQUENCE_SOURCEINFO _NOTESEQUENCE_TEXTANNOTATION.fields_by_name['annotation_type'].enum_type = _NOTESEQUENCE_TEXTANNOTATION_TEXTANNOTATIONTYPE _NOTESEQUENCE_TEXTANNOTATION.containing_type = _NOTESEQUENCE _NOTESEQUENCE_TEXTANNOTATION_TEXTANNOTATIONTYPE.containing_type = _NOTESEQUENCE_TEXTANNOTATION _NOTESEQUENCE.fields_by_name['time_signatures'].message_type = _NOTESEQUENCE_TIMESIGNATURE _NOTESEQUENCE.fields_by_name['key_signatures'].message_type = _NOTESEQUENCE_KEYSIGNATURE _NOTESEQUENCE.fields_by_name['tempos'].message_type = _NOTESEQUENCE_TEMPO _NOTESEQUENCE.fields_by_name['notes'].message_type = _NOTESEQUENCE_NOTE _NOTESEQUENCE.fields_by_name['pitch_bends'].message_type = _NOTESEQUENCE_PITCHBEND _NOTESEQUENCE.fields_by_name['control_changes'].message_type = _NOTESEQUENCE_CONTROLCHANGE _NOTESEQUENCE.fields_by_name['part_infos'].message_type = _NOTESEQUENCE_PARTINFO _NOTESEQUENCE.fields_by_name['source_info'].message_type = _NOTESEQUENCE_SOURCEINFO _NOTESEQUENCE.fields_by_name['text_annotations'].message_type = _NOTESEQUENCE_TEXTANNOTATION _NOTESEQUENCE_PITCHNAME.containing_type = _NOTESEQUENCE DESCRIPTOR.message_types_by_name['NoteSequence'] = _NOTESEQUENCE NoteSequence = _reflection.GeneratedProtocolMessageType('NoteSequence', (_message.Message,), dict( Note = _reflection.GeneratedProtocolMessageType('Note', (_message.Message,), dict( DESCRIPTOR = _NOTESEQUENCE_NOTE, __module__ = 'music_pb2' # @@protoc_insertion_point(class_scope:tensorflow.magenta.NoteSequence.Note) )) , TimeSignature = _reflection.GeneratedProtocolMessageType('TimeSignature', (_message.Message,), dict( DESCRIPTOR = _NOTESEQUENCE_TIMESIGNATURE, __module__ = 'music_pb2' # @@protoc_insertion_point(class_scope:tensorflow.magenta.NoteSequence.TimeSignature) )) , KeySignature = _reflection.GeneratedProtocolMessageType('KeySignature', (_message.Message,), dict( DESCRIPTOR = _NOTESEQUENCE_KEYSIGNATURE, __module__ = 'music_pb2' # @@protoc_insertion_point(class_scope:tensorflow.magenta.NoteSequence.KeySignature) )) , Tempo = _reflection.GeneratedProtocolMessageType('Tempo', (_message.Message,), dict( DESCRIPTOR = _NOTESEQUENCE_TEMPO, __module__ = 'music_pb2' # @@protoc_insertion_point(class_scope:tensorflow.magenta.NoteSequence.Tempo) )) , PitchBend = _reflection.GeneratedProtocolMessageType('PitchBend', (_message.Message,), dict( DESCRIPTOR = _NOTESEQUENCE_PITCHBEND, __module__ = 'music_pb2' # @@protoc_insertion_point(class_scope:tensorflow.magenta.NoteSequence.PitchBend) )) , ControlChange = _reflection.GeneratedProtocolMessageType('ControlChange', (_message.Message,), dict( DESCRIPTOR = _NOTESEQUENCE_CONTROLCHANGE, __module__ = 'music_pb2' # @@protoc_insertion_point(class_scope:tensorflow.magenta.NoteSequence.ControlChange) )) , PartInfo = _reflection.GeneratedProtocolMessageType('PartInfo', (_message.Message,), dict( DESCRIPTOR = _NOTESEQUENCE_PARTINFO, __module__ = 'music_pb2' # @@protoc_insertion_point(class_scope:tensorflow.magenta.NoteSequence.PartInfo) )) , SourceInfo = _reflection.GeneratedProtocolMessageType('SourceInfo', (_message.Message,), dict( DESCRIPTOR = _NOTESEQUENCE_SOURCEINFO, __module__ = 'music_pb2' # @@protoc_insertion_point(class_scope:tensorflow.magenta.NoteSequence.SourceInfo) )) , TextAnnotation = _reflection.GeneratedProtocolMessageType('TextAnnotation', (_message.Message,), dict( DESCRIPTOR = _NOTESEQUENCE_TEXTANNOTATION, __module__ = 'music_pb2' # @@protoc_insertion_point(class_scope:tensorflow.magenta.NoteSequence.TextAnnotation) )) , DESCRIPTOR = _NOTESEQUENCE, __module__ = 'music_pb2' # @@protoc_insertion_point(class_scope:tensorflow.magenta.NoteSequence) )) _sym_db.RegisterMessage(NoteSequence) _sym_db.RegisterMessage(NoteSequence.Note) _sym_db.RegisterMessage(NoteSequence.TimeSignature) _sym_db.RegisterMessage(NoteSequence.KeySignature) _sym_db.RegisterMessage(NoteSequence.Tempo) _sym_db.RegisterMessage(NoteSequence.PitchBend) _sym_db.RegisterMessage(NoteSequence.ControlChange) _sym_db.RegisterMessage(NoteSequence.PartInfo) _sym_db.RegisterMessage(NoteSequence.SourceInfo) _sym_db.RegisterMessage(NoteSequence.TextAnnotation) _NOTESEQUENCE_KEYSIGNATURE_KEY.has_options = True _NOTESEQUENCE_KEYSIGNATURE_KEY._options = _descriptor._ParseOptions(descriptor_pb2.EnumOptions(), _b('\020\001')) # @@protoc_insertion_point(module_scope)
py
1a593ef683877b1df8366d42e5785536348988da
#!/usr/bin/env python # -*- coding: utf-8 -*- ############################################################################### # # # RMG - Reaction Mechanism Generator # # # # Copyright (c) 2002-2019 Prof. William H. Green ([email protected]), # # Prof. Richard H. West ([email protected]) and the RMG Team ([email protected]) # # # # Permission is hereby granted, free of charge, to any person obtaining a # # copy of this software and associated documentation files (the 'Software'), # # to deal in the Software without restriction, including without limitation # # the rights to use, copy, modify, merge, publish, distribute, sublicense, # # and/or sell copies of the Software, and to permit persons to whom the # # Software is furnished to do so, subject to the following conditions: # # # # The above copyright notice and this permission notice shall be included in # # all copies or substantial portions of the Software. # # # # THE SOFTWARE IS PROVIDED 'AS IS', WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # # DEALINGS IN THE SOFTWARE. # # # ############################################################################### """ This module contains functionality for the parallel execution of RMG-Py. """ import sys import traceback import warnings from functools import wraps logger = None try: from scoop import futures from scoop.futures import map, submit from scoop import shared from scoop import logger as scooplogger logger = scooplogger # logger.setLevel(20)#10 : debug, 20: info except ImportError: import logging as logging logger = logging.getLogger() logger.debug("Could not properly import SCOOP.") def warnScoopStartedProperly(func): @wraps(func) def wrapper(*args, **kwargs): futures_not_loaded = 'scoop.futures' not in sys.modules warnings.simplefilter('ignore', RuntimeWarning) try: controller_not_started = not ( sys.modules['scoop.futures'].__dict__.get("_controller", None) ) except KeyError: warnings.warn( "SCOOP was not started properly.\n" "Be sure to start your program with the " "'-m scoop' parameter. You can find " "further information in the " "documentation.\n", RuntimeWarning ) return if futures_not_loaded or controller_not_started: warnings.warn( "SCOOP was not started properly.\n" "Be sure to start your program with the " "'-m scoop' parameter. You can find " "further information in the " "documentation.\n", RuntimeWarning ) return return func(*args, **kwargs) return wrapper class WorkerWrapper(object): """ This class can be used to expose the exception trace of a worker that was running on a remote worker. Use it as follows: Wrap the function that will be running on the remote worker with the current class: futures.map(WorkerWrapper(f), mapped_data, ...) or futures.submit(WorkerWrapper(f), mapped_data, ...) """ __name__ = 'WorkerWrapper' def __init__(self, myfn): self.myfn = myfn def __call__(self, *args, **kwargs): try: return self.myfn(*args, **kwargs) except: type, value, tb = sys.exc_info() lines = traceback.format_exception(type, value, tb) print ''.join(lines) raise @warnScoopStartedProperly def broadcast(obj, key): """ Broadcasts the object across the workers using the key parameter as the key. """ kwargs = {key : obj} try: if shared.getConst(key): logger.debug('An object with the key {} was already broadcasted.'.format(key)) else: shared.setConst(**kwargs) except NameError: """ Name error will be caught when the SCOOP library is not imported properly. """ logger.debug('SCOOP not loaded. Not broadcasting the object {}'.format(obj)) @warnScoopStartedProperly def get(key): """ Searches for the shared variable to retrieve identified by the parameter key. """ try: data = shared.getConst(key, timeout=1e-9) return data except NameError: """ Name error will be caught when the SCOOP library is not imported properly. """ logger.debug('SCOOP not loaded. Not retrieving the shared object with key {}'.format(key)) def map_(*args, **kwargs): return map(WorkerWrapper(args[0]), *args[1:], **kwargs) def submit_(func, *args, **kwargs): """ Task submission of a function. returns the return value of the called function, or when SCOOP is loaded, the future object. """ try: task = submit(WorkerWrapper(func), *args, **kwargs)#returns immediately return task except Exception: """ Name error will be caught when the SCOOP library is not imported properly. """ logger.debug('SCOOP not loaded. Submitting serial mode.') return func(*args, **kwargs)
py
1a593f0f1e40d4ae5aedd4b2ac36cf37a5c5b29e
"""Handle method for writting a dictionary to differnet file types.""" import os from datetime import datetime import json import csv import pandas class OutputDict: def __init__(self, data: dict, outDir: str, filetype: dict): """Handle method for writting a dictionary to differnet file types. Args: data - (dict) Data to output. outDir - (str) Path to the output file directory. filetype - (str) Output filetype. """ self.data = data self.outDir = outDir self.filetype = filetype def write_output(self): """Outputs the data in the desired file format to the predefined directory. """ os.makedirs(self.outDir, exist_ok=True) getattr(self, f'_{self.filetype.lower()}')() @staticmethod def valdidate_filetype(filetype: str): """Checks if the filetype is supported.""" return str(filetype).lower() in ('json', 'csv', 'xlsx') def _output_file(self, extension: str): """Returns the path to the output file. Args: extension - (str) File extension. """ return os.path.join( self.outDir, f'link_{hex(int(datetime.now().timestamp()))}.{extension}' ) def _json(self): """Outputs the python dict as json.""" with open(self._output_file('json'), 'w') as jsonFile: jsonFile.write(json.dumps(self.data)) def _csv(self): """Outputs the python dict as csv.""" with open(self._output_file('csv'), 'w') as csvFile: writer = csv.writer(csvFile) writer.writerows([[email, link] for email, link in self.data.items()]) def _xlsx(self): """Outputs the python dict as xlsx.""" pandas.DataFrame(data=self.data, index=[0]).transpose()[1:].to_excel( self._output_file('xlsx'), header=False )
py
1a593f930d4df919e2c695dded9824df5c70605f
# -*- coding: utf-8 -*- ############################################################################### # # DestroyFavorite # Removes the specified status from a favorites list. # # Python versions 2.6, 2.7, 3.x # # Copyright 2014, Temboo Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, # either express or implied. See the License for the specific # language governing permissions and limitations under the License. # # ############################################################################### from temboo.core.choreography import Choreography from temboo.core.choreography import InputSet from temboo.core.choreography import ResultSet from temboo.core.choreography import ChoreographyExecution import json class DestroyFavorite(Choreography): def __init__(self, temboo_session): """ Create a new instance of the DestroyFavorite Choreo. A TembooSession object, containing a valid set of Temboo credentials, must be supplied. """ super(DestroyFavorite, self).__init__(temboo_session, '/Library/Twitter/Favorites/DestroyFavorite') def new_input_set(self): return DestroyFavoriteInputSet() def _make_result_set(self, result, path): return DestroyFavoriteResultSet(result, path) def _make_execution(self, session, exec_id, path): return DestroyFavoriteChoreographyExecution(session, exec_id, path) class DestroyFavoriteInputSet(InputSet): """ An InputSet with methods appropriate for specifying the inputs to the DestroyFavorite Choreo. The InputSet object is used to specify input parameters when executing this Choreo. """ def set_AccessTokenSecret(self, value): """ Set the value of the AccessTokenSecret input for this Choreo. ((required, string) The Access Token Secret provided by Twitter or retrieved during the OAuth process.) """ super(DestroyFavoriteInputSet, self)._set_input('AccessTokenSecret', value) def set_AccessToken(self, value): """ Set the value of the AccessToken input for this Choreo. ((required, string) The Access Token provided by Twitter or retrieved during the OAuth process.) """ super(DestroyFavoriteInputSet, self)._set_input('AccessToken', value) def set_ConsumerKey(self, value): """ Set the value of the ConsumerKey input for this Choreo. ((required, string) The API Key (or Consumer Key) provided by Twitter.) """ super(DestroyFavoriteInputSet, self)._set_input('ConsumerKey', value) def set_ConsumerSecret(self, value): """ Set the value of the ConsumerSecret input for this Choreo. ((required, string) The API Secret (or Consumer Secret) provided by Twitter.) """ super(DestroyFavoriteInputSet, self)._set_input('ConsumerSecret', value) def set_ID(self, value): """ Set the value of the ID input for this Choreo. ((required, string) The ID of the status to remove from your favorites.) """ super(DestroyFavoriteInputSet, self)._set_input('ID', value) def set_IncludeEntities(self, value): """ Set the value of the IncludeEntities input for this Choreo. ((optional, boolean) The "entities" node containing extra metadata will not be included when set to false.) """ super(DestroyFavoriteInputSet, self)._set_input('IncludeEntities', value) class DestroyFavoriteResultSet(ResultSet): """ A ResultSet with methods tailored to the values returned by the DestroyFavorite Choreo. The ResultSet object is used to retrieve the results of a Choreo execution. """ def getJSONFromString(self, str): return json.loads(str) def get_Response(self): """ Retrieve the value for the "Response" output from this Choreo execution. ((json) The response from Twitter.) """ return self._output.get('Response', None) class DestroyFavoriteChoreographyExecution(ChoreographyExecution): def _make_result_set(self, response, path): return DestroyFavoriteResultSet(response, path)
py
1a593fb3738791f7e39dee7cd6fb18774ba48271
import sys import os import numpy as np import pandas as pd import copy # extract names, temperature, pressures and make folder # function for mech reading def readmechanism(input_type, cwd): ''' method to read the mechanism file depending on the input type ''' if input_type == 'MESS': # extract parameters P_LIST, T_LIST, species_names, species_names_bimol_frag2 = data_names_mess( os.path.join(cwd, 'inp')) # extract matrix of rate constants rates = MATRIX(os.path.join(cwd, 'inp'), P_LIST, T_LIST, species_names) mech_dict = dict(zip(['P_VECT_MESS', 'T_VECT_MESS', 'SPECIES', 'SPECIES_BIMOL', 'rates'], [ P_LIST, T_LIST, species_names, species_names_bimol_frag2, rates])) elif input_type == 'CKI': species_names, species_names_bimol_frag2 = data_names_CKI( os.path.join(cwd, 'inp')) mech_dict = dict(zip(['SPECIES', 'SPECIES_BIMOL'], [ species_names, species_names_bimol_frag2])) return mech_dict ########################### extract and process MESS type mechanism ##################### def data_names_mess(cwd): """ Extract from the input file me_ktp.inp useful data: - list of pressures - list of temperatures - list of species (names) """ species_names_unimol = np.array([], dtype='<U16') species_names_bimol = np.array([], dtype='<U16') species_names_unimol_frag2 = np.array([], dtype='<U16') species_names_bimol_frag2 = np.array([], dtype='<U16') look_for_species = 0 look_for_bimol_fragment = 0 bad_wellwrds = ['WellDepth', 'WellCutoff', 'WellExtension', 'WellReductionThreshold', 'WellPartitionMethod', 'WellProjectionThreshold'] with open(os.path.join(cwd, 'me_ktp.inp')) as myfile: for line in myfile: # do not read comments if line[0] == '!': line = '' # empty line elif len(line.split('!')) > 1: # remove commented part line = line.split('!')[0] # if you enter the model section: start looking for species if line.find('Model') != -1 and line.find('ModelEnergyLimit') == -1: look_for_species = 1 if line.find('PressureList') != -1: # verifica di non poter usare semplicemente line.split pressures = [x.strip() for x in line.split()] del pressures[0] # print(pressures) # print(len(pressures)) if line.find('TemperatureList') != -1: temperatures = [x.strip() for x in line.split()] del temperatures[0] if (line.find('Well') != -1 and all(line.find(bad) == -1 for bad in bad_wellwrds)) and look_for_species == 1: full_line = [x.strip() for x in line.split()] species_names_unimol = np.append( species_names_unimol, full_line[1]) species_names_unimol_frag2 = np.append( species_names_unimol_frag2, '') if (line.find('Bimolecular')) != -1 and look_for_species == 1: look_for_bimol_fragment = 1 full_line = [x.strip() for x in line.split()] species_names_bimol = np.append( species_names_bimol, full_line[1]) if (line.find('Fragment')) != -1 and look_for_bimol_fragment > 0: look_for_bimol_fragment += 1 if look_for_bimol_fragment == 3: full_line = [x.strip() for x in line.split()] species_names_bimol_frag2 = np.append( species_names_bimol_frag2, full_line[1]) look_for_bimol_fragment = 0 myfile.close() # write files P_LIST = np.unique(np.array(pressures, dtype=np.float32)) T_LIST = np.unique(np.array(temperatures, dtype=np.int16)) species_names = np.append(species_names_unimol, species_names_bimol) species_names_frag2 = np.append( species_names_unimol_frag2, species_names_bimol_frag2) # check that bimol fragments have different names if len(list(set(species_names_bimol_frag2))) != len(list(species_names_bimol_frag2)): print('*Warning: some bimol fragments share the same names. check that they are isomers') return P_LIST, T_LIST, species_names, species_names_frag2 def MATRIX(cwd, P_LIST, T_LIST, species_names): """ Extract from rate.out all the rate constants in the form of a list """ # pre-allocation matrix_list = [] capture_list = [] # define checks for temperature and pressure and read the file # len works on b check_P = len(T_LIST)*len(species_names)*len(P_LIST) # define current checks check_P_curr = 0 check_list = 0 with open(os.path.join(cwd, 'rate.out')) as myfile: for line in myfile: # find the 'Temperature-Species rate tables to extract the rates if line.find('Temperature-Species Rate Tables:') != -1: check_list = 1 if ((check_list == 1) and (check_P_curr < check_P)): # add the check on 'Pressure' in case the values of temperature and pressure are accidentally the same if any(line.find(T) != -1 for T in np.array(T_LIST, dtype=str)) and (line.find('Pressure') == -1): check_P_curr += 1 rates = [x.strip() for x in line.split()] # replace '***' values with 0 rates = [x.replace('***', '0') for x in rates] matrix_list.append(rates[1:-2]) # -2 excluded capture_list.append(float(rates[-1])) if line.find('Temperature-Pressure Rate Tables:') != -1: check_list = 0 # don't read the file anylonger myfile.close() matrix_float = np.array(matrix_list, dtype=np.float64) # remove negative values from the matrix n_T = len(T_LIST) n_P = len(P_LIST) warnings_neg = '' # generate list of warnings for negative values for ii, row in enumerate(matrix_float): mask_neg = np.where(row < 0) mask_toohigh = np.where(row > capture_list[ii]) row[mask_toohigh] = 0 for mask_neg_i in mask_neg[0]: row[mask_neg_i] = 0 R = int(ii/n_T/n_P) P = int((ii-R*n_T*n_P)/n_T) T = ii-R*n_T*n_P-P*n_T warnings_neg = warnings_neg + \ 'removed negative k_{R} at {T} K and {P} atm, be careful \n'.format( R=species_names[R], T=T_LIST[T], P=P_LIST[P]) np.savetxt('warnings_negval_messrates.txt', [warnings_neg], fmt='%s') return matrix_float def MATRIX_TP(T, P, T_LIST, P_LIST, species_names, matrix_float): """ Extract square matrix of k_ij at the selected temperature and pressure T,P are expected to be numbers, either floating or integers, to be compared with T,P in the lists """ # transform input types if not isinstance(T, int): try: T = int(T) except: print('input T not convertible to number') return None if not isinstance(P, float): try: P = float(P) except: print('input P not convertible to number') return None n_T = len(T_LIST) n_P = len(P_LIST) # preallocate the matrix n_species = len(species_names) mat_TP = np.zeros((n_species, n_species)) # reconstruct indices # set as option the exception case: if you put a value not present # index [0][0] to save just the index itself P_index = np.where((P == P_LIST))[0][0] T_index = np.where((T == T_LIST))[0][0] for ii in range(0, n_species): # identify the string to place in the row: ki->prods rates_row = matrix_float[ii*(n_P)*(n_T)+P_index*(n_T)+T_index, :] # use list comprehension: kij = rate from reactant i to product j col_indices = np.array([jj != ii for jj in range(0, n_species)]) mat_TP[ii, col_indices] = rates_row # this returns the matrix at a certain temperature and pressure return mat_TP def REAC_P(P, reac, P_LIST, T_LIST, species_names, matrix_float): """ Method: only needed for MESS input. it extracts from the matrix of rate constants the reactivity of the selected reactant at a certain pressure output: matrix [n_T*n_species-1]|P useful if: you need to check the reactivity of the reactant in the full range of temperature """ n_T = len(T_LIST) n_P = len(P_LIST) # select the reactant if not isinstance(reac, str): print('input is not a string. please insert "reac" as string') return None # if the input is not a string: show an error elif sum(np.array([reac == r_list for r_list in species_names])) == 0: print('selected reactant not found in the list of species. select another reactant') return None # if the reactant is not in the list: show an error else: try: float(P) except ValueError as e: print('P not convertible to number, error: ' + str(e)) return None # derive the reactivity matrix at all the different temperatures P_index = np.where((P == P_LIST))[0][0] reac_index = np.array( [reac == r_list for r_list in species_names], dtype=int) ii_reac = np.where(reac_index == 1)[0][0] # index of the reactant ii_in = ii_reac*(n_P)*(n_T)+P_index*(n_T) rates_reac = matrix_float[ii_in:ii_in+n_T, :] return rates_reac # extract and process CHEMKIN type mechanism ########################à def data_names_CKI(cwd): ''' extract species names of CKI input mechanism ''' # names of all the primary species species_names = np.array([], dtype='<U32') # names of the "second" reactant found in bimolecular reaction channels species_names_bimol = np.array([], dtype='<U32') # check in the file if you reading the part with all the reactions check_reactions = 0 with open(os.path.join(cwd, 'kin.CKI')) as myfile: for line in myfile: if line.find('REACTIONS') != -1: check_reactions += 1 # read the lines with the reaction rates; # only irreversible reactions are considered ('=>') # lines starting with a comment ('!') are not considered if check_reactions == 1 and line.find('=>') != -1 and line.strip()[0] != '!': line = line.split('!')[0] # remove the comments REACS = [x.strip() for x in line.split('=>')][0] REACS = [x.strip() for x in REACS.split('+')] rest_ofline = [x.strip() for x in line.split('=>')][1] # the arrhenius parameters will be the last three elements in the split # ARR_PAR = rest_ofline.split()[-3:] PRODS = ''.join(rest_ofline.split()[:-3]) PRODS = PRODS.split('+') reacting_species = [REACS, PRODS] # ALLOCATE THE SPECIES INTO THE ARRAYS # if species are not into the array of species: append them for x in reacting_species: # IF LEN IS 1 AND REACTION IS LIKE 2A=>PRODS OR REACS=>2B: RECOGNIZE SPECIES B # and add another product if x[0][0] == '2': x[0] = x[0][1:] x.append(x[0]) if len(x) == 1 and np.array([x[0] == SP for SP in species_names]).any() != True: species_names = np.append(species_names, x[0]) species_names_bimol = np.append( species_names_bimol, '') elif len(x) == 2: r1 = x[0] r2 = x[1] # check that the combination is not present flag = 0 for i in np.arange(0, len(species_names)): s1 = species_names[i] s2 = species_names_bimol[i] # set flag to 1 if you find the same set of species if (r1==s1 and r2==s2) or (r1==s2 and r2==s1): flag = 1 if flag == 0: species_names = np.append(species_names, r1) species_names_bimol = np.append(species_names_bimol, r2) if len(x) > 2: print( 'Wrong number of elements when reading the reaction: line ' + line) print('exiting now..') sys.exit() return species_names, species_names_bimol def copy_CKI_processed(oldpath, newpath, PRODSINKS, ISOM_EQUIL, REAC, PRODS): ''' IN THIS METHOD, THE MECHANISM IS COPIED TO NEWPATH FOLDER AFTER PREPROCESSING PRODSINKS = 1: THE PRODUCTS ARE SET AS IRREVERSIBLE SINKS, SO THE LINES ARE COMMENTED WITH ! ISOM_EQUIL = 1: ALL BIMOLECULAR REACTIONS ARE DELETED,SO THAT EQUILIBRIUM WITHIN SETS OF ISOMERS IS REACHED. => ALL BIMOLECULAR REACTIONS COMMENTED WITH ! ''' if os.path.isdir(newpath) == False: raise RuntimeError( 'The destination folder for the new mech does not exist ') else: with open(os.path.join(oldpath, 'kin.CKI'), mode='r') as mech_orig_file: mech_orig = mech_orig_file.readlines() newfile = copy.deepcopy(mech_orig) for idx, row in enumerate(newfile): if ISOM_EQUIL == 1 and row.find('=>') != -1 and row.strip()[0] != '!': # check first if you only want isomer equilibrium: so you delete all bimolecular reactions reactant = [x.strip() for x in row.split('=>')][0] reactant = [x.strip() for x in reactant.split('+')][0] product = [x.strip() for x in row.split('=>')][1] # only bimolecular products will be meaningful product = [x.strip() for x in product.split('+')][0] product = product.split()[0] # delete all the reactions involving species other than the reactants if np.array([reactant == REAC]).any() and np.array([product == REAC]).any(): delete = 'NO' else: delete = 'YES' elif PRODSINKS == 1 and row.find('=>') != -1 and row.strip()[0] != '!': # if there is no isom_equil, you did not delete all bimol. reactions: # so if you set product as sinks, then you have to delete those lines. # NB product sinks are incompatible with isom_equil # extract the first reactant of each row reactant = [x.strip() for x in row.split('=>')][0] reactant = [x.strip() for x in reactant.split('+')][0] # if the reactant of the line is in the list of products, comment the line if np.array([reactant == np.array(PRODS)]).any(): delete = 'YES' else: delete = 'NO' else: # any other case (read a reaction or an empty line with no need of deleting anything) delete = 'NO' # ON THE BASIS OF THE CONDITIONS ABOVE: DELETE THE REACTION OR NOT if delete == 'YES': # comment the line newfile[idx] = '!' + row # if duplicate reaction: comment also that # the "lower" notation is to trasfer all to lower cases so that you make the search independent of the font if newfile[idx+1].lower().find('DUPLICATE'.lower()) != -1 or newfile[idx+1].lower().find('DUP'.lower()) != -1: newfile[idx+1] = '!' + newfile[idx+1] # if PLOG: comment also all the lines below until you don't find PLOG anymore check_plog = 0 iline = 1 while check_plog == 0: if newfile[idx+iline].find('PLOG') != -1: newfile[idx+iline] = '!' + newfile[idx+iline] iline += 1 else: check_plog = 1 # check for duplicates if newfile[idx+iline].lower().find('DUPLICATE'.lower()) != -1 or newfile[idx+iline].lower().find('DUP'.lower()) != -1: newfile[idx+iline] = '!' + newfile[idx+iline] elif delete == 'NO': # copy the line as it is newfile[idx] = row # remove the file if it exists and write the new one if os.path.isfile(os.path.join(newpath, 'kin.txt')): os.remove(os.path.join(newpath, 'kin.txt')) with open(os.path.join(newpath, 'kin.txt'), mode='x') as inp: inp.writelines(newfile)
py
1a594022180b2aa6148de0d97b85c5515f5a0f8f
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved # -*- coding: utf-8 -*- # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved from detectron2.config import CfgNode as CN def add_detr_config(cfg): """ Add config for DETR. """ cfg.MODEL.DETR = CN() cfg.MODEL.DETR.NUM_CLASSES = 80 # FBNet cfg.MODEL.FBNET_V2.OUT_FEATURES = ["trunk3"] # For Segmentation cfg.MODEL.DETR.FROZEN_WEIGHTS = '' # LOSS cfg.MODEL.DETR.DEFORMABLE = False cfg.MODEL.DETR.USE_FOCAL_LOSS = False cfg.MODEL.DETR.CENTERED_POSITION_ENCODIND = False cfg.MODEL.DETR.CLS_WEIGHT = 1.0 cfg.MODEL.DETR.NUM_FEATURE_LEVELS = 4 cfg.MODEL.DETR.GIOU_WEIGHT = 2.0 cfg.MODEL.DETR.L1_WEIGHT = 5.0 cfg.MODEL.DETR.DEEP_SUPERVISION = True cfg.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1 # TRANSFORMER cfg.MODEL.DETR.NHEADS = 8 cfg.MODEL.DETR.DROPOUT = 0.1 cfg.MODEL.DETR.DIM_FEEDFORWARD = 2048 cfg.MODEL.DETR.ENC_LAYERS = 6 cfg.MODEL.DETR.DEC_LAYERS = 6 cfg.MODEL.DETR.PRE_NORM = False cfg.MODEL.DETR.HIDDEN_DIM = 256 cfg.MODEL.DETR.NUM_OBJECT_QUERIES = 100 cfg.SOLVER.OPTIMIZER = "ADAMW" cfg.SOLVER.BACKBONE_MULTIPLIER = 0.1
py
1a5940d24ddd108e485365d04986cdb518111b50
import random, math from agent import Agent from variables import * MAX_NEIGHBOR_FORCE = abs(math.log(FISH_SENSING_DISTANCE/FISH_DESIRED_DIST)) # Model attempt some sort of propagation wave. # Affected by propagation wave that pushes it farther away. # If a marked fish is in a certain PROP_DIST (meaning getting # attacked by Predator), then it experiences repulsive force. # Smaller repulsive farther from the marked fish it is. class PropagationFish(Agent): def __init__(self, sim, start_loc = None): random.seed() super().__init__(sim, start_loc) blue = random.randint(150, 255) green = random.randint(0, 100) red = random.randint(0, 100) missing = 400-blue-green-red red += missing//2 if red > 255: red = 255 green += missing//2 if green > 255: green = 255 self.nearby_predators = [] self.nearby_marked = [] self.marked = False self.color = (red, green, blue) def update(self): total_x_vec = 0.0 total_y_vec = 0.0 # Computing effect that nearby predators on fish. if len(self.nearby_predators) > 0: predators = self.nearby_predators for predator in predators: x_vec = 0.0 y_vec = 0.0 pred, dist = predator target = self.get_perceived_target_pos(pred.loc) x, y = self.get_vector_to_target(target) total_force = - PREDATOR_FISH_FORCE * pow((1.0)/dist, 4) total_x_vec += x * total_force total_y_vec += y * total_force # Compute effect of propagation wave and compute other effects normally. elif len(self.neighbors) > 0 or len(self.nearby_marked) > 0: marked = self.nearby_marked for fish in marked: x_vec = 0.0 y_vec = 0.0 a, dist = fish target = self.get_perceived_target_pos(a.loc) x, y = self.get_vector_to_target(target) total_force = -(1.0)*pow((1.0)/dist, 0.2) total_x_vec += x * total_force total_y_vec += y * total_force neighbors = self.neighbors if len(neighbors) > FISH_MAX_NEIGHBORS: neighbors = sorted(self.neighbors, key=lambda x: x[1]) neighbors = neighbors[:FISH_MAX_NEIGHBORS] for neighbor in neighbors: x_vec = 0 y_vec = 0 fish, dist = neighbor target = self.get_perceived_target_pos(fish.loc) x, y = self.get_vector_to_target(target) # Made the force between fishes stronger. Maybe change neighbor # force constant in variables.py? if dist > FISH_DESIRED_DIST: total_force = 10*FISH_NEIGHBOR_FORCE * math.log(dist/FISH_DESIRED_DIST)/MAX_NEIGHBOR_FORCE else: total_force = -pow(FISH_DESIRED_DIST-dist, 1.5) total_x_vec += x * total_force total_y_vec += y * total_force elif len(self.nearby_predators) == 0 and len(self.neighbors) == 0: # randomly adjust speed total_x_vec = (random.random() - 0.5) * 2 * FISH_ACCEL total_y_vec = (random.random() - 0.5) * 2 * FISH_ACCEL # normalize acceleration accel = abs(total_x_vec) + abs(total_y_vec) if accel > FISH_ACCEL: adj = FISH_ACCEL/accel total_x_vec *= adj total_y_vec *= adj self.x_speed += total_x_vec self.y_speed += total_y_vec # normalize speed speed = abs(self.x_speed) + abs(self.y_speed) if speed > FISH_SPEED: adj = FISH_SPEED/speed self.x_speed *= adj self.y_speed *= adj self.move()
py
1a5940d8eed4bc08301add15d5e09804d25c720a
# # (c) Copyright 2017-2018 Hewlett Packard Enterprise Development LP # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import os def load_proxy(): # docker exposes all of these variables as build args # except for all_proxy proxy_keys = ('http_proxy', 'https_proxy', 'no_proxy', 'all_proxy', 'ftp_proxy') return {key: os.environ[key] for key in proxy_keys if key in os.environ} def split_image(image): """Get the tag from a full image name. 127.0.0.1:5000/image:latest => 127.0.0.1:5000/image, latest image:tag => image, tag """ parts = image.split('/', 1) if len(parts) == 1 or ( '.' not in parts[0] and ':' not in parts[0]): host, img = '', image else: host, img = parts if ':' in img: imagename, tag = img.rsplit(':', 1) else: imagename, tag = img, 'latest' if host: return host + "/" + imagename, tag return imagename, tag def all_blob_names(tree, parent=''): """This returns all blobs names from git tree object """ for t in tree.trees: for b in all_blob_names(t, os.path.join(parent, tree.name)): yield b for b in tree.blobs: yield os.path.join(parent, tree.name, b.name)
py
1a59417b9179fc71f5185fca375bb4b2b787e1f6
import subprocess import os import json from uuid import uuid4 from vo2mft.util import _solve_front_path, _twodof_solve_front_path, _twodof_body_fixed_solve_front_path def solve(env, eps=1e-8, ions=False, flags=None, twodof=False, twodof_body_indep=False): '''Return the solved final env corresponding to the given env, solved to accuracy given by eps. ''' solver_path = _solve_front_path() if twodof and twodof_body_indep: solver_path = _twodof_solve_front_path() elif twodof: solver_path = _twodof_body_fixed_solve_front_path() in_path, out_path = str(uuid4()), str(uuid4()) write_env_file(env, in_path) # Run solver. solver_call = None if twodof: solver_call = [solver_path, "--eps", str(eps)] if ions: solver_call.append("--ions") if flags != None: solver_call.extend(flags) solver_call.extend([in_path, out_path]) elif ions and not twodof: solver_call = [solver_path, "--eps", str(eps), "--ions"] if flags != None: solver_call.extend(flags) solver_call.extend([in_path, out_path]) subprocess.call(solver_call) # Read solver output, if it exists. final_env_path = out_path + "_fenv.json" final_env = None try: final_env = read_env_file(final_env_path) except FileNotFoundError: pass # Clean up solver input/output. try: os.remove(in_path) os.remove(final_env_path) except FileNotFoundError: pass return final_env def solve_set(envs, eps=1e-8, ions=False, flags=None, twodof=False, twodof_body_indep=False): '''Return a list of solved final envs corresponding to the given list of envs, solved to accuracy given by eps. The set of envs is solved serially (only one process is invoked). ''' final_envs = [] for i, initial_env in enumerate(envs): this_flags = None if flags != None: this_flags = flags[i] this_final_env = solve(initial_env, eps, ions, this_flags, twodof, twodof_body_indep) final_envs.append(this_final_env) return final_envs def write_env_file(env, env_path): env_str = json.dumps(env) with open(env_path, 'w') as fp: fp.write(env_str) def read_env_file(env_path): env = None with open(env_path, 'r') as fp: env_str = fp.read() env = json.loads(env_str) return env
py
1a594200ac034c8ff882369c01b708bccf0f05dd
import joblib import pytest import os import numpy as np import pandas as pd from sklearn.compose import ColumnTransformer from sklearn.dummy import DummyClassifier, DummyRegressor from sklearn.pipeline import Pipeline from sklearn.preprocessing import StandardScaler, OneHotEncoder from mlserver.settings import ModelSettings, ModelParameters from mlserver.types import InferenceRequest from mlserver_sklearn import SKLearnModel TESTS_PATH = os.path.dirname(__file__) TESTDATA_PATH = os.path.join(TESTS_PATH, "testdata") def pytest_collection_modifyitems(items): """ Add pytest.mark.asyncio marker to every test. """ for item in items: item.add_marker("asyncio") @pytest.fixture def model_uri(tmp_path) -> str: n = 4 X = np.random.rand(n) y = np.random.rand(n) clf = DummyClassifier(strategy="prior") clf.fit(X, y) model_uri = os.path.join(tmp_path, "sklearn-model.joblib") joblib.dump(clf, model_uri) return model_uri @pytest.fixture def model_settings(model_uri: str) -> ModelSettings: return ModelSettings( name="sklearn-model", parameters=ModelParameters(uri=model_uri, version="v1.2.3"), ) @pytest.fixture async def model(model_settings: ModelSettings) -> SKLearnModel: model = SKLearnModel(model_settings) await model.load() return model @pytest.fixture def inference_request() -> InferenceRequest: payload_path = os.path.join(TESTDATA_PATH, "inference-request.json") return InferenceRequest.parse_file(payload_path) @pytest.fixture async def regression_model(tmp_path) -> SKLearnModel: # Build a quick DummyRegressor n = 4 X = np.random.rand(n) y = np.random.rand(n) clf = DummyRegressor() clf.fit(X, y) model_uri = os.path.join(tmp_path, "sklearn-regression-model.joblib") joblib.dump(clf, model_uri) settings = ModelSettings( name="sklearn-regression-model", parameters=ModelParameters(uri=model_uri, version="v1.2.3"), ) model = SKLearnModel(settings) await model.load() return model @pytest.fixture def pandas_model_uri(tmp_path) -> str: data: pd.DataFrame = pd.DataFrame( {"a": [1, 2, 3], "op": ["+", "+", "-"], "y": [11, 22, -33]} ) X: pd.DataFrame = data.drop("y", axis=1) y: pd.DataFrame = data["y"] numeric_features = ["a"] numeric_transformer = StandardScaler() categorical_features = ["op"] categorical_transformer = OneHotEncoder(handle_unknown="ignore") preprocessor = ColumnTransformer( transformers=[ ("num", numeric_transformer, numeric_features), ("cat", categorical_transformer, categorical_features), ] ) model = Pipeline( steps=[("preprocessor", preprocessor), ("regression", DummyRegressor())] ) model.fit(X, y) model_uri = os.path.join(tmp_path, "sklearn-pandas-model.joblib") joblib.dump(model, model_uri) return model_uri @pytest.fixture def pandas_model_settings(pandas_model_uri: str) -> ModelSettings: return ModelSettings( name="sklearn-pandas-model", parameters=ModelParameters(uri=pandas_model_uri, version="v1.2.3"), ) @pytest.fixture async def pandas_model(pandas_model_settings: ModelSettings) -> SKLearnModel: model = SKLearnModel(pandas_model_settings) await model.load() return model @pytest.fixture def pandas_inference_request() -> InferenceRequest: inference_request = { "parameters": {"content_type": "pd"}, "inputs": [ {"name": "a", "datatype": "INT32", "data": [10], "shape": [1]}, { "name": "op", "datatype": "BYTES", "data": ["-"], "shape": [1], "parameters": {"content_type": "str"}, }, ], } return InferenceRequest.parse_obj(inference_request)