content
stringlengths
0
1.55M
<import_stmt>random<import_stmt>unittest<import_from_stmt>tested_code random_not_42 find_foo random_float_between_inclusive random_float_between_noninclusive<class_stmt>TestRandomNot42(unittest.TestCase)<block_start><def_stmt>test_many_values self<block_start>"""call the function 100 times and make sure the result isn't 42"""<for_stmt>n_attempt range(100)<block_start>value=random_not_42()<line_sep>self.assertNotEqual(value 42)<block_end><block_end><block_end><class_stmt>TestFindFoo(unittest.TestCase)<block_start>"""tests for the find_foo() function find_foo(s) returns an object if "foo" is a sub-string of s, and None otherwise. """<line_sep># valid_names = [ # 'foo', # 'Bar', # 'foorBar', # 'foo_bar', # '_fooBar', # 'foo1', # 'foo_', # ] # # invalid_names = [ # '1foo', # 'foo-bar', # '$foo', # 'foo bar', # 'foo+bar4ever', # ] strings_with_foo=['foo' 'aaa foo bbb' 'aaa foo' 'foo bbb' 'no foo for you, come back oen year!']<line_sep>strings_without_foo=['boo' 'aaa bbb' 'four' ]<def_stmt>test_identical self<block_start>"""check that find_foo finds 'foo' in 'foo'"""<line_sep>self.assertIsNotNone(find_foo('foo'))<block_end><def_stmt>test_strings_with_foo self<block_start>"""check that find_foo finds 'foo' in all of the strings with 'foo'"""<for_stmt>s self.strings_with_foo<block_start>self.assertIsNotNone(find_foo(s))<block_end><block_end><def_stmt>test_strings_without_foo self<block_start>"""check that find_foo finds 'foo' in all of the strings with 'foo'"""<for_stmt>s self.strings_without_foo<block_start>self.assertIsNone(find_foo(s))<block_end><block_end><block_end><class_stmt>TestRandomFloatBetweenInclusive(unittest.TestCase)<block_start><def_stmt>test_random_values self<block_start><for_stmt>i range(100)<block_start>start=random.random()<line_sep>end=random.random()<if_stmt>start<g>end<block_start>start,end=end start<block_end>value=random_float_between_inclusive(start end)<line_sep>self.assertGreaterEqual(value start)<line_sep>self.assertLessEqual(value end)<block_end><block_end><block_end><class_stmt>TestRandomFloatBetweenNoninclusive(unittest.TestCase)<block_start><def_stmt>test_random_values self<block_start><for_stmt>i range(100)<block_start>start=random.random()<line_sep>end=random.random()<if_stmt>start<g>end<block_start>start,end=end start<block_end>value=random_float_between_noninclusive(start end)<line_sep>self.assertGreater(value start)<line_sep>self.assertLess(value end)<block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>unittest<import_stmt>dukpy<class_stmt>TestReactJS(unittest.TestCase)<block_start><def_stmt>test_hello_world self<block_start>jsx=dukpy.jsx_compile('var react_hello = <h1>Hello, world!</h1>;')<line_sep>jsi=dukpy.JSInterpreter()<line_sep>result=jsi.evaljs([''' var React = require('react/react'), ReactDOM = require('react/react-dom-server'); ''' jsx 'ReactDOM.renderToStaticMarkup(react_hello, null);'])<assert_stmt>result<eq>'<h1>Hello, world!</h1>' res<block_end><def_stmt>test_jsx_mixed self<block_start>code=''' var React = require('react/react'), ReactDOM = require('react/react-dom-server'); ReactDOM.renderToStaticMarkup(<h1>Hello, world!</h1>, null); '''<line_sep>jsx=dukpy.jsx_compile(code)<line_sep>res=dukpy.evaljs(jsx)<assert_stmt>res<eq>'<h1>Hello, world!</h1>' res<block_end><def_stmt>test_react_binding self<block_start>code=''' var React = require('react/react'), ReactDOM = require('react/react-dom-server'); var HelloWorld = React.createClass({ render: function() { return ( <div className="helloworld"> Hello {this.props.data.name} </div> ); } }); ReactDOM.renderToStaticMarkup(<HelloWorld data={dukpy.data}/>, null); '''<line_sep>jsx=dukpy.jsx_compile(code)<line_sep>res=dukpy.evaljs(jsx data={'id':1 'name':"Alessandro"})<assert_stmt>res<eq>'<div class="helloworld">Hello Alessandro</div>' res<block_end><def_stmt>test_jsx6 self<block_start>code=''' import React from 'react/react'; var ReactDOM = require('react/react-dom-server'); class HelloWorld extends React.Component { render() { return ( <div className="helloworld"> Hello {this.props.data.name} </div> ); } } ReactDOM.renderToStaticMarkup(<HelloWorld data={dukpy.data}/>, null); '''<line_sep>jsx=dukpy.jsx_compile(code)<line_sep>res=dukpy.evaljs(jsx data={'id':1 'name':"Alessandro"})<assert_stmt>res<eq>'<div class="helloworld">Hello Alessandro</div>' res<block_end><block_end>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>datetime<import_stmt>json<import_stmt>logging<import_stmt>os<import_from_stmt>functools lru_cache<import_from_stmt>typing Callable<import_from_stmt>pandas Series<import_from_stmt>pandas._libs.missing NAType<line_sep># Based on recipe for structured logging # https://docs.python.org/3/howto/logging-cookbook.html#implementing-structured-logging <class_stmt>LogEncoder(json.JSONEncoder)# pylint: disable=method-hidden <block_start><def_stmt>default self o<block_start><if_stmt>isinstance(o set)<block_start><return>tuple(o)<block_end><elif_stmt>isinstance(o str)<block_start><return>o.encode("unicode_escape").decode("ascii")<block_end><elif_stmt>isinstance(o Series)<block_start><return>o.to_dict()<block_end><elif_stmt>isinstance(o NAType)<block_start><return><none><block_end><elif_stmt>isinstance(o Exception)<block_start><return>f"{o.__class__.__name__}: {str(o)}"<block_end><return>super(LogEncoder self).default(o)<block_end><block_end><class_stmt>StructuredMessage<block_start><def_stmt>__init__ self message **kwargs<block_start>self._kwargs=kwargs<line_sep>self._kwargs["message"]=message<block_end>@lru_cache()<def_stmt>__str__ self<block_start><return>LogEncoder().encode(self._kwargs)<block_end><block_end><class_stmt>ErrorLogger<block_start>""" Simple class to be inherited by other classes to add error logging functions. """<line_sep>name:str<line_sep>""" Name of the logger, defaults to the class name. """<line_sep>logger:logging.Logger<line_sep>""" Instance of logger which will be used. Each ErrorLogger instance has its own Logger. """<def_stmt>__init__ self name:str=<none># Default to the classname <block_start>self.name=name<or>self.__class__.__name__<line_sep># Create an instance of logger self.logger=logging.getLogger(self.name)<line_sep># Read logging level from env variable, default to INFO level_name=os.getenv("LOG_LEVEL")<or>"INFO"<line_sep>self.logger.setLevel(getattr(logging level_name logging.INFO))<line_sep># Only add a handler if it does not already have one <if_stmt><not>self.logger.hasHandlers()# Configure the handler to use our preferred logging format <block_start>handler=logging.StreamHandler()<line_sep>handler.setFormatter(logging.Formatter("%(message)s"))<line_sep>self.logger.addHandler(handler)<line_sep>self.log_debug(f"Initialized logger {self.name} with level {level_name}")<block_end><block_end><def_stmt>timestamp self<arrow>str<block_start><return>datetime.datetime.now().isoformat()[:24]<block_end><def_stmt>_log_msg self log_func:Callable msg:str **kwargs<arrow><none><block_start>log_func(StructuredMessage(msg logname=self.name timestamp=self.timestamp() # TODO: consider whether we should keep classname or if logname is sufficient classname=self.__class__.__name__ loglevel=log_func.__name__ **kwargs ))<block_end><def_stmt>log_error self msg:str **kwargs<arrow><none><block_start>self._log_msg(self.logger.error msg **kwargs)<block_end><def_stmt>log_warning self msg:str **kwargs<arrow><none><block_start>self._log_msg(self.logger.warning msg **kwargs)<block_end><def_stmt>log_info self msg:str **kwargs<arrow><none><block_start>self._log_msg(self.logger.info msg **kwargs)<block_end><def_stmt>log_debug self msg:str **kwargs<arrow><none><block_start>self._log_msg(self.logger.debug msg **kwargs)<block_end><block_end>
<import_stmt>os<import_stmt>tempfile<import_from_stmt>mock Mock patch<import_from_stmt>dusty.systems.nfs server<import_from_stmt>dusty constants<import_from_stmt>....testcases DustyTestCase<class_stmt>TestNFSServer(DustyTestCase)<block_start><def_stmt>setUp self<block_start>super(TestNFSServer self).setUp()<block_end><def_stmt>tearDown self<block_start>super(TestNFSServer self).tearDown()<block_end>@patch('dusty.systems.config_file.get_dusty_config_section')<def_stmt>test_get_current_exports self fake_get_dusty_config_section<block_start>fake_get_dusty_config_section.return_value='export numba 1\n/private/etc/some/repo 192.168.59.103 -alldirs -maproot=0:0\n'<line_sep>expected_current_exports=set(['export numba 1\n' '/private/etc/some/repo 192.168.59.103 -alldirs -maproot=0:0\n'])<line_sep>self.assertEqual(expected_current_exports server._get_current_exports())<block_end><def_stmt>test_maproot_for_repo self<block_start>fake_repo=Mock()<line_sep>fake_repo.local_path=tempfile.mkdtemp()<line_sep>expected_maproot='{}:{}'.format(os.stat(fake_repo.local_path).st_uid os.stat(fake_repo.local_path).st_gid)<line_sep>self.assertEqual(expected_maproot server._maproot_for_repo(fake_repo))<block_end><def_stmt>test_write_exports_config self<block_start>exports_set=set(['export1\n' 'export2\n'])<line_sep>constants.EXPORTS_PATH=tempfile.mkstemp()[1]<block_end><block_end>
<class_stmt>Node# Constructor to initialize the node object <block_start><def_stmt>__init__ self data<block_start>self.data=data<line_sep>self.next=<none><block_end><block_end><class_stmt>LinkedList# Function to initialize head <block_start><def_stmt>__init__ self<block_start>self.head=<none><block_end># Function to insert a new node at the beginning <def_stmt>push self new_data<block_start>new_node=Node(new_data)<line_sep>new_node.next=self.head<line_sep>self.head=new_node<block_end># Utility function to prit the linked LinkedList <def_stmt>printList self<block_start>temp=self.head<while_stmt>(temp)<block_start>print(temp.data) <line_sep>temp=temp.next<block_end><block_end><def_stmt>detectLoop self<block_start>slow_p=self.head<line_sep>fast_p=self.head<while_stmt>(slow_p<and>fast_p<and>fast_p.next)<block_start>slow_p=slow_p.next<line_sep>fast_p=fast_p.next.next<if_stmt>slow_p<eq>fast_p<block_start>print("Found Loop")<line_sep><return><block_end><block_end>print("Not Found Loop")<block_end><block_end># Driver program for testing llist=LinkedList()<line_sep>llist.push(20)<line_sep>llist.push(4)<line_sep>llist.push(15)<line_sep>llist.push(10)<line_sep># Create a loop for testing llist.head.next.next.next.next=llist.head<line_sep>llist.detectLoop()<line_sep>
# -*- coding: utf-8 -*- <import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>argparse<import_stmt>os<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>tensorflow<as>tf<import_from_stmt>dltk.core.metrics dice<import_from_stmt>dltk.core.losses sparse_balanced_crossentropy<import_from_stmt>dltk.networks.segmentation.unet residual_unet_3d<import_from_stmt>dltk.networks.segmentation.unet asymmetric_residual_unet_3d<import_from_stmt>dltk.networks.segmentation.fcn residual_fcn_3d<import_from_stmt>dltk.core.activations leaky_relu<import_from_stmt>dltk.io.abstract_reader Reader<import_from_stmt>reader read_fn<import_stmt>json<line_sep># PARAMS EVAL_EVERY_N_STEPS=1000<line_sep>EVAL_STEPS=1<line_sep>NUM_CLASSES=14<line_sep>NUM_CHANNELS=1<line_sep>BATCH_SIZE=4<line_sep>SHUFFLE_CACHE_SIZE=128<line_sep>MAX_STEPS=100000<line_sep># MODEL <def_stmt>model_fn features labels mode params<block_start>"""Summary Args: features (TYPE): Description labels (TYPE): Description mode (TYPE): Description params (TYPE): Description Returns: TYPE: Description """<line_sep># 1. create a model and its outputs filters=params["filters"]<line_sep>strides=params["strides"]<line_sep>num_residual_units=params["num_residual_units"]<line_sep>loss_type=params["loss"]<line_sep>net=params["net"]<def_stmt>lrelu x<block_start><return>leaky_relu(x 0.1)<block_end><if_stmt>net<eq>'fcn'<block_start>net_output_ops=residual_fcn_3d(features['x'] NUM_CLASSES num_res_units=num_residual_units filters=filters strides=strides activation=lrelu mode=mode)<block_end><elif_stmt>net<eq>'unet'<block_start>net_output_ops=residual_unet_3d(features['x'] NUM_CLASSES num_res_units=num_residual_units filters=filters strides=strides activation=lrelu mode=mode)<block_end><elif_stmt>net<eq>'asym_unet'<block_start>net_output_ops=asymmetric_residual_unet_3d(features['x'] NUM_CLASSES num_res_units=num_residual_units filters=filters strides=strides activation=lrelu mode=mode)<block_end># 1.1 Generate predictions only (for `ModeKeys.PREDICT`) <if_stmt>mode<eq>tf.estimator.ModeKeys.PREDICT<block_start><return>tf.estimator.EstimatorSpec(mode=mode predictions=net_output_ops export_outputs={'out':tf.estimator.export.PredictOutput(net_output_ops)})<block_end># 2. set up a loss function <if_stmt>loss_type<eq>'ce'<block_start>ce=tf.nn.sparse_softmax_cross_entropy_with_logits(logits=net_output_ops['logits'] labels=labels['y'])<line_sep>loss=tf.reduce_mean(ce)<block_end><elif_stmt>loss_type<eq>'balce'<block_start>loss=sparse_balanced_crossentropy(net_output_ops['logits'] labels['y'])<block_end># 3. define a training op and ops for updating # moving averages (i.e. for batch normalisation) global_step=tf.train.get_global_step()<if_stmt>params["opt"]<eq>'adam'<block_start>optimiser=tf.train.AdamOptimizer(learning_rate=params["learning_rate"] epsilon=1e-5)<block_end><elif_stmt>params["opt"]<eq>'momentum'<block_start>optimiser=tf.train.MomentumOptimizer(learning_rate=params["learning_rate"] momentum=0.9)<block_end><elif_stmt>params["opt"]<eq>'rmsprop'<block_start>optimiser=tf.train.RMSPropOptimizer(learning_rate=params["learning_rate"] momentum=0.9)<block_end>update_ops=tf.get_collection(tf.GraphKeys.UPDATE_OPS)<with_stmt>tf.control_dependencies(update_ops)<block_start>train_op=optimiser.minimize(loss global_step=global_step)<block_end># 4.1 (optional) create custom image summaries for tensorboard my_image_summaries={}<line_sep>my_image_summaries['feat_t1']=tf.expand_dims(features['x'][: 0 : : 0] 3)<line_sep>my_image_summaries['labels']=tf.expand_dims(tf.cast(labels['y'] tf.float32)[: 0 : :] 3)<line_sep>my_image_summaries['predictions']=tf.expand_dims(tf.cast(net_output_ops['y_'] tf.float32)[: 0 : :] 3)<line_sep>[tf.summary.image(name image)<for>name,image my_image_summaries.items()]<line_sep># 4.2 (optional) create custom metric summaries for tensorboard dice_tensor=tf.py_func(dice [net_output_ops['y_'] labels['y'] tf.constant(NUM_CLASSES)] tf.float32)<line_sep>[tf.summary.scalar('dsc_l{}'.format(i) dice_tensor[i])<for>i range(NUM_CLASSES)]<line_sep># 5. Return EstimatorSpec object <return>tf.estimator.EstimatorSpec(mode=mode predictions=net_output_ops loss=loss train_op=train_op eval_metric_ops=<none>)<block_end><def_stmt>train args<block_start>np.random.seed(42)<line_sep>tf.set_random_seed(42)<line_sep>print('Setting up...')<with_stmt>open(args.config)<as>f<block_start>run_config=json.load(f)<block_end># Parse csv files for file names train_filenames=pd.read_csv(args.train_csv dtype=object keep_default_na=<false> na_values=[]).as_matrix()<line_sep>val_filenames=pd.read_csv(args.val_csv dtype=object keep_default_na=<false> na_values=[]).as_matrix()<line_sep># Set up a data reader to handle the file i/o. reader_params={'n_examples':32 'example_size':[64 64 64] 'extract_examples':<true>}<line_sep>reader_example_shapes={'features':{'x':reader_params['example_size']+[NUM_CHANNELS ]} 'labels':{'y':reader_params['example_size']}}<line_sep>reader=Reader(read_fn {'features':{'x':tf.float32} 'labels':{'y':tf.int32}})<line_sep># Get input functions and queue initialisation hooks # for training and validation data train_input_fn,train_qinit_hook=reader.get_inputs(train_filenames tf.estimator.ModeKeys.TRAIN example_shapes=reader_example_shapes batch_size=BATCH_SIZE shuffle_cache_size=SHUFFLE_CACHE_SIZE params=reader_params)<line_sep>val_input_fn,val_qinit_hook=reader.get_inputs(val_filenames tf.estimator.ModeKeys.EVAL example_shapes=reader_example_shapes batch_size=BATCH_SIZE shuffle_cache_size=min(SHUFFLE_CACHE_SIZE EVAL_STEPS) params=reader_params)<line_sep>config=tf.ConfigProto()<line_sep># config.gpu_options.allow_growth = True # Instantiate the neural network estimator nn=tf.estimator.Estimator(model_fn=model_fn model_dir=args.save_path params=run_config config=tf.estimator.RunConfig(session_config=config))<line_sep># Hooks for validation summaries val_summary_hook=tf.contrib.training.SummaryAtEndHook(os.path.join(args.save_path 'eval'))<line_sep>step_cnt_hook=tf.train.StepCounterHook(every_n_steps=EVAL_EVERY_N_STEPS output_dir=args.save_path)<line_sep>print('Starting training...')<try_stmt><block_start><for_stmt>_ range(MAX_STEPS<floordiv>EVAL_EVERY_N_STEPS)<block_start>nn.train(input_fn=train_input_fn hooks=[train_qinit_hook step_cnt_hook] steps=EVAL_EVERY_N_STEPS)<line_sep>results_val=nn.evaluate(input_fn=val_input_fn hooks=[val_qinit_hook val_summary_hook] steps=EVAL_STEPS)<line_sep>print('Step = {}; val loss = {:.5f};'.format(results_val['global_step'] results_val['loss']))<block_end><block_end><except_stmt>KeyboardInterrupt<block_start><pass><block_end>print('Stopping now.')<line_sep>export_dir=nn.export_savedmodel(export_dir_base=args.save_path serving_input_receiver_fn=reader.serving_input_receiver_fn(reader_example_shapes))<line_sep>print('Model saved to {}.'.format(export_dir))<block_end><if_stmt>__name__<eq>'__main__'# Set up argument parser <block_start>parser=argparse.ArgumentParser(description='Example: Synapse CT example segmentation training script')<line_sep>parser.add_argument('--resume' default=<false> action='store_true')<line_sep>parser.add_argument('--verbose' default=<false> action='store_true')<line_sep>parser.add_argument('--cuda_devices' '-c' default='0')<line_sep>parser.add_argument('--save_path' '-p' default='/tmp/synapse_ct_seg/')<line_sep>parser.add_argument('--train_csv' default='train.csv')<line_sep>parser.add_argument('--val_csv' default='val.csv')<line_sep>parser.add_argument('--config' default="config.json")<line_sep>args=parser.parse_args()<line_sep># Set verbosity <if_stmt>args.verbose<block_start>os.environ['TF_CPP_MIN_LOG_LEVEL']='1'<line_sep>tf.logging.set_verbosity(tf.logging.INFO)<block_end><else_stmt><block_start>os.environ['TF_CPP_MIN_LOG_LEVEL']='3'<line_sep>tf.logging.set_verbosity(tf.logging.ERROR)<block_end># GPU allocation options os.environ["CUDA_VISIBLE_DEVICES"]=args.cuda_devices<line_sep># Create model save path os.system("rm -rf %s"%args.save_path)<line_sep>os.system("mkdir -p %s"%args.save_path)<line_sep># Call training train(args)<block_end>
<import_stmt>multiprocessing<as>mp<import_from_stmt>abc abstractmethod<import_from_stmt>probflow.utils.base BaseDataGenerator<class_stmt>DataGenerator(BaseDataGenerator)<block_start>"""Abstract base class for a data generator, which uses multiprocessing to load the data in parallel. TODO User needs to implement: * :meth:`~__init__` * :meth:`~n_samples` * :meth:`~batch_size` * :meth:`~get_batch` And can optionally implement: * :meth:`~on_epoch_start` * :meth:`~on_epoch_end` """<def_stmt>__init__ self num_workers=<none><block_start>self.num_workers=num_workers<block_end>@abstractmethod<def_stmt>get_batch self index<block_start>"""Generate one batch of data"""<block_end><def_stmt>__getitem__ self index<block_start>"""Generate one batch of data"""<line_sep># No multiprocessing <if_stmt>self.num_workers<is><none><block_start><return>self.get_batch(index)<block_end># Multiprocessing <else_stmt># Start the next worker <block_start>pid=index+self.num_workers<if_stmt>pid<l>len(self)<block_start>self._workers[pid].start()<block_end># Return data from the multiprocessing queue <return>self._queue.get()<block_end><block_end><def_stmt>__iter__ self<block_start>"""Get an iterator over batches"""<line_sep># Multiprocessing? <if_stmt>self.num_workers<is><not><none><block_start><def_stmt>get_data index queue<block_start>queue.put(self.get_batch(index))<block_end># Create the queue and worker processes self._queue=mp.Queue()<line_sep>self._workers=[mp.Process(target=get_data args=(i self._queue))<for>i range(len(self))]<line_sep># Start the first num_workers workers <for_stmt>i range(min(self.num_workers len(self)))<block_start>self._workers[i].start()<block_end><block_end># Keep track of what batch we're on self._batch=-1<line_sep># Return iterator <return>self<block_end><def_stmt>__next__ self<block_start>"""Get the next batch"""<line_sep>self._batch<augadd>1<if_stmt>self._batch<l>len(self)<block_start><return>self[self._batch]<block_end><else_stmt><block_start><raise>StopIteration()<block_end><block_end><block_end>
# vim: expandtab:ts=4:sw=4 <import_stmt>argparse<import_stmt>os<import_stmt>deep_sort_app<def_stmt>parse_args <block_start>""" Parse command line arguments. """<line_sep>parser=argparse.ArgumentParser(description="MOTChallenge evaluation")<line_sep>parser.add_argument("--mot_dir" help="Path to MOTChallenge directory (train or test)" required=<true>)<line_sep>parser.add_argument("--detection_dir" help="Path to detections." default="detections" required=<true>)<line_sep>parser.add_argument("--output_dir" help="Folder in which the results will be stored. Will "<concat>"be created if it does not exist." default="results")<line_sep>parser.add_argument("--min_confidence" help="Detection confidence threshold. Disregard "<concat>"all detections that have a confidence lower than this value." default=0.0 type=float)<line_sep>parser.add_argument("--min_detection_height" help="Threshold on the detection bounding "<concat>"box height. Detections with height smaller than this value are "<concat>"disregarded" default=0 type=int)<line_sep>parser.add_argument("--nms_max_overlap" help="Non-maxima suppression threshold: Maximum "<concat>"detection overlap." default=1.0 type=float)<line_sep>parser.add_argument("--max_cosine_distance" help="Gating threshold for cosine distance "<concat>"metric (object appearance)." type=float default=0.2)<line_sep>parser.add_argument("--nn_budget" help="Maximum size of the appearance descriptors "<concat>"gallery. If None, no budget is enforced." type=int default=100)<line_sep><return>parser.parse_args()<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=parse_args()<line_sep>os.makedirs(args.output_dir exist_ok=<true>)<line_sep>sequences=os.listdir(args.mot_dir)<for_stmt>sequence sequences<block_start>print("Running sequence %s"%sequence)<line_sep>sequence_dir=os.path.join(args.mot_dir sequence)<line_sep>detection_file=os.path.join(args.detection_dir "%s.npy"%sequence)<line_sep>output_file=os.path.join(args.output_dir "%s.txt"%sequence)<line_sep>deep_sort_app.run(sequence_dir detection_file output_file args.min_confidence args.nms_max_overlap args.min_detection_height args.max_cosine_distance args.nn_budget display=<false>)<block_end><block_end>
"""Plots solution routes"""<import_from_stmt>typing List Iterable Optional<import_stmt>folium<import_stmt>numpy<as>np<import_stmt>polyline<import_stmt>requests<import_from_stmt>loggibud.v1.types CVRPSolution Point<import_from_stmt>loggibud.v1.distances OSRMConfig<line_sep># All available map colors MAP_COLORS=("black" "blue" "darkred" "purple" "red" "orange" "green" "pink" "darkblue" "beige" "gray" "lightgreen" "lightblue" "lightgray" "cadetblue" )<def_stmt>plot_cvrp_solution_routes solution:CVRPSolution route_indices_to_plot:Optional[List[int]]=<none> config:Optional[OSRMConfig]=<none> <arrow><none><block_start>"""Plot solution routes in a map along the streets Parameters ---------- solution A solution to any solver with the vehicles routes to plot route_indices_to_plot If specified, selects a smaller subset of routes to plot by their indices. This can be useful to reduce the clutter in case of a solution with too many vehicles config OSRM configuration """<line_sep>config=config<or>OSRMConfig()<line_sep># Initialize map centered at the mean of the origins origins_mean=np.mean([(vehicle.origin.lat vehicle.origin.lng)<for>vehicle solution.vehicles] axis=0 )<line_sep>m=folium.Map(location=origins_mean zoom_start=12 tiles="cartodbpositron" )<line_sep>num_vehicles=len(solution.vehicles)<line_sep>route_indices_to_plot=route_indices_to_plot<or>range(num_vehicles)<line_sep>vehicles_subset=[solution.vehicles[i]<for>i route_indices_to_plot]<for_stmt>i,vehicle enumerate(vehicles_subset)<block_start>vehicle_color=MAP_COLORS[i%len(MAP_COLORS)]<line_sep># Plot origin origin=(vehicle.origin.lat vehicle.origin.lng)<line_sep>folium.CircleMarker(origin color="red" radius=3 weight=5).add_to(m)<line_sep># Plot street outlines wiring=_route_wiring(vehicle.circuit config)<line_sep>folium.PolyLine(wiring color=vehicle_color weight=1.0 popup=f"Vehicle {i}").add_to(m)<line_sep># Plot the deliveries as regular points <for_stmt>delivery vehicle.deliveries<block_start>folium.Circle(location=(delivery.point.lat delivery.point.lng) radius=10 fill=<true> color=vehicle_color popup=(f"Vehicle {i} ({delivery.point.lat}, {delivery.point.lng})") ).add_to(m)<block_end><block_end><return>m<block_end><def_stmt>_route_wiring points:Iterable[Point] config<block_start>coords_uri=";".join(f"{point.lng},{point.lat}"<for>point points)<line_sep>response=requests.get(f"{config.host}/route/v1/driving/{coords_uri}?overview=simplified" timeout=config.timeout_s )<line_sep>data=response.json()<line_sep>line=data["routes"][0]["geometry"]<line_sep><return>[(lat lng)<for>lat,lng polyline.decode(line)]<block_end><def_stmt>plot_cvrp_solution solution:CVRPSolution route_indices_to_plot:Optional[List[int]]=<none><arrow><none><block_start>"""Plot solution deliveries in a map This is a simplified version showing only the edges between each delivery. It does not require an OSRM server configuration. Parameters ---------- solution A solution to any solver with the vehicles routes to plot route_indices_to_plot If specified, selects a smaller subset of routes to plot by their indices. This can be useful to reduce the clutter in case of a solution with too many vehicles """<line_sep># Initialize map centered at the mean of the origins origins_mean=np.mean([(vehicle.origin.lat vehicle.origin.lng)<for>vehicle solution.vehicles] axis=0 )<line_sep>m=folium.Map(location=origins_mean zoom_start=12 tiles="cartodbpositron" )<line_sep>num_vehicles=len(solution.vehicles)<line_sep>route_indices_to_plot=route_indices_to_plot<or>range(num_vehicles)<line_sep>vehicles_subset=[solution.vehicles[i]<for>i route_indices_to_plot]<for_stmt>i,vehicle enumerate(vehicles_subset)<block_start>origin=(vehicle.origin.lat vehicle.origin.lng)<line_sep>folium.CircleMarker(origin color="red" radius=3 weight=5).add_to(m)<line_sep>vehicle_color=MAP_COLORS[i%len(MAP_COLORS)]<line_sep>vehicle_coords=[(point.lat point.lng)<for>point vehicle.circuit]<line_sep>folium.Polygon(vehicle_coords popup=f"Vehicle {i}" color=vehicle_color weight=1 ).add_to(m)<block_end><return>m<block_end>
# from @有一种悲伤叫颓废 """ 注: 1. 主要用来求三角剖分和维诺图,算法的思路可以看我的这期视频:https://www.bilibili.com/video/BV1Ck4y1z7VT 2. 时间复杂度O(nlogn),一般情况应该够用,如发现bug请联系颓废 3. 只需导入两个函数:DelaunayTrianglation(求德劳内三角剖分), Voronoi(求维诺图) """<import_stmt>numpy<as>np<import_from_stmt>manimlib.mobject.types.vectorized_mobject VGroup<import_from_stmt>manimlib.constants PI<import_from_stmt>manimlib.utils.config_ops digest_config<import_from_stmt>manimlib.mobject.geometry Dot Line Polygon<import_from_stmt>manimlib.scene.scene Scene<import_from_stmt>manimlib.utils.space_ops normalize<line_sep>#import time #import math #from manimlib.imports import * #from manim_sandbox.utils.imports import * # 以下比例建议2不要乱改,精度大,或者小,都有可能出bug # 小于误差则等 ev=np.exp(1)<power>PI/1000000000<line_sep>ev_sq=ev<power>2<line_sep># 无穷大 Infinity=333<line_sep># 判断两个点是否相等,小于误差的平方,则相等,O(1) <def_stmt>point_is_equal p q<block_start>p,q=np.array(p) np.array(q)<line_sep># 两点距离的平方小于误差的平方,则相等 <if_stmt>np.dot(q-p q-p)<l>ev_sq<block_start><return><true><block_end><return><false><block_end># b在向量pq左为正,右为负,O(1) <def_stmt>cross2 p q b<block_start>''' 叉积公式 \begin{align} ToLeft(p, q, b)=\begin{vmatrix} x_p & y_p & 1\\ x_q & y_q & 1\\ x_b & y_b & 1\\ \end{vmatrix}\end{align} '''<line_sep><return>p[0]<times>q[1]-p[1]<times>q[0]+q[0]<times>b[1]-q[1]<times>b[0]+b[0]<times>p[1]-b[1]<times>p[0]<block_end># 忽略误差,b在向量pq左为正,右为负,O(1) <def_stmt>ToLeft p q b<block_start>a=cross2(p q b)<line_sep># 小于误差,认为在向量上 <if_stmt>abs(a)<l>ev<block_start><return>0<block_end># 隐含else abs(a) >= ev: <return>a<block_end># 点d在三角形pqb内,返回True,O(1) <def_stmt>InTriangle p q b d<block_start>tl1=ToLeft(p q d)<if_stmt>abs(tl1)<l>ev<block_start>tl2=ToLeft(q b d)<line_sep>tl3=ToLeft(b p d)<if_stmt>tl2<l>ev<and>tl3<l>ev<or>tl2<g>-ev<and>tl3<g>-ev<block_start><return><true><block_end><return><false><block_end><if_stmt>tl1<g>ev<block_start><if_stmt>ToLeft(q b d)<g>-ev<and>ToLeft(b p d)<g>-ev<block_start><return><true><block_end><return><false><block_end><if_stmt>tl1<l>-ev<block_start><if_stmt>ToLeft(q b d)<l>ev<and>ToLeft(b p d)<l>ev<block_start><return><true><block_end><return><false><block_end><block_end># 点d在三点p,q,b的外接圆内,返回True,O(1) <def_stmt>InCircle p q b d<block_start>''' 点与三点圆关系 \begin{align} InCircle(p, q, b, d)=\begin{vmatrix} x_p & y_p & x_p^2+y_p^2 & 1\\ x_q & y_q & x_q^2+y_q^2 & 1\\ x_b & y_b & x_b^2+y_b^2 & 1\\ x_d & y_d & x_d^2+y_d^2 & 1\\ \end{vmatrix}\end{align} '''<line_sep>a13=p[0]<power>2+p[1]<power>2<line_sep>a23=q[0]<power>2+q[1]<power>2<line_sep>a33=b[0]<power>2+b[1]<power>2<line_sep>a43=d[0]<power>2+d[1]<power>2<line_sep>det=np.linalg.det([[p[0] p[1] a13 1] [q[0] q[1] a23 1] [b[0] b[1] a33 1] [d[0] d[1] a43 1] ])<if_stmt>det<l>-ev<block_start><return><true><block_end><return><false><block_end># 三点外接圆圆心,O(1) <def_stmt>CircumcircleCenter p q b<block_start>''' \begin{align} &三点外接圆圆心公式\\ &x=\frac{1}{2}\begin{vmatrix} 1 & x_p^2+y_p^2 & y_p\\ 1 & x_q^2+y_q^2 & y_q\\ 1 & x_b^2+y_b^2 & y_b\\ \end{vmatrix}/\begin{vmatrix} 1 & x_p & y_p\\ 1 & x_q & y_q\\ 1 & x_b & y_b\\ \end{vmatrix}\\ &y=\frac{1}{2}\begin{vmatrix} 1 & x_p & x_p^2+y_p^2\\ 1 & x_q & x_q^2+y_q^2\\ 1 & x_b & x_b^2+y_b^2\\ \end{vmatrix}/\begin{vmatrix} 1 & x_p & y_p\\ 1 & x_q & y_q\\ 1 & x_b & y_b\\ \end{vmatrix} \end{align} '''<line_sep>a1=p[0]<power>2+p[1]<power>2<line_sep>a2=q[0]<power>2+q[1]<power>2<line_sep>a3=b[0]<power>2+b[1]<power>2<line_sep>det1=np.linalg.det([[1 p[0] p[1]] [1 q[0] q[1]] [1 b[0] b[1]] ])<if_stmt>det1<eq>0<block_start>print("三点共线")<line_sep><return><none><block_end>det2=np.linalg.det([[1 a1 p[1]] [1 a2 q[1]] [1 a3 b[1]] ])<line_sep>det3=np.linalg.det([[1 p[0] a1] [1 q[0] a2] [1 b[0] a3] ])<line_sep><return>np.array([det2/det1 det3/det1 0])/2<block_end># 面 <class_stmt>Face()<block_start><def_stmt>__init__ self halfedge# 标记访问面 <block_start>self.Visit=<false><line_sep># 属于这个面的一个半边 self.HalfEdge=halfedge<line_sep># 面对应的桶 self.Bucket=<none><line_sep># 外接圆圆心,求维诺图的时候用到 self.Center=<none><block_end><block_end># 顶点 <class_stmt>Vertice()<block_start><def_stmt>__init__ self point# 顶点坐标 <block_start>self.Point=point<line_sep># 由顶点引出的一条半边 self.HalfEdge=<none><block_end><block_end># 半边 <class_stmt>HalfEdge()<block_start><def_stmt>__init__ self start end# 标记访问 <block_start>self.Visit=<false><line_sep># 边的起点 self.Start=start<line_sep># 边的终点 self.End=end<line_sep># 边的孪生兄弟 self.Twin=<none><line_sep># 半边所在的平面 self.Face=<none><line_sep># 边的前驱 self.Pre=<none><line_sep># 边的后继 self.Suc=<none><block_end><block_end># 桶 <class_stmt>Bucket()<block_start><def_stmt>__init__ self points# 桶装的点 <block_start>self.Points=points<line_sep># 桶对应的面 self.Face=<none><block_end><block_end># 初始化无穷大的网,O(1) <def_stmt>InitInfNet points=<none># 初始化无穷远点 # 逆时针 <block_start>infv1=Vertice(np.array([Infinity 0 0]))<line_sep>infv2=Vertice(np.array([0 Infinity 0]))<line_sep>infv3=Vertice(np.array([-Infinity -Infinity 0]))<line_sep># 初始化无穷远半边 halfedge1=HalfEdge(infv1 infv2)<line_sep>halfedge2=HalfEdge(infv2 infv3)<line_sep>halfedge3=HalfEdge(infv3 infv1)<line_sep># 初始化点引出的边 infv1.HalfEdge=halfedge1<line_sep>infv2.HalfEdge=halfedge2<line_sep>infv3.HalfEdge=halfedge3<line_sep># 初始化无穷大面 face1=Face(halfedge1)<line_sep># 初始化无穷半边的前驱,后继,和所在的面 halfedge1.Pre=halfedge3<line_sep>halfedge1.Suc=halfedge2<line_sep>halfedge1.Face=face1<line_sep>halfedge2.Pre=halfedge1<line_sep>halfedge2.Suc=halfedge3<line_sep>halfedge2.Face=face1<line_sep>halfedge3.Pre=halfedge2<line_sep>halfedge3.Suc=halfedge1<line_sep>halfedge3.Face=face1<line_sep># 初始化桶,此桶囊括了所有的点 bucket1=Bucket(points)<line_sep>bucket1.Face=face1<line_sep># 面对应的桶 face1.Bucket=bucket1<line_sep><return>face1<block_end># 得到多边形的带符号面积,对于不自交的多边形,正表示逆时针多边形,负表示顺时针多边形,特殊考虑0,O(n) <def_stmt>get_polygon_directed_area polygon<block_start>a=polygon.get_vertices()<line_sep>l=len(a)<line_sep><return>1/2<times>sum([a[i][0]<times>a[(i+1)%l][1]-a[(i+1)%l][0]<times>a[i][1]<for>i range(l)])<block_end># 边翻转,O(1) <def_stmt>EdgeFlipping halfedge# 记录面的旧visit值 <block_start>visitvalue=halfedge.Face.Visit<line_sep># 待翻转边所在的四边形的顶点 v1=halfedge.Start<line_sep>v2=halfedge.Twin.Suc.End<line_sep>v3=halfedge.End<line_sep>v4=halfedge.Suc.End<line_sep># 顶点的坐标 p1=v1.Point<line_sep>p2=v2.Point<line_sep>p3=v3.Point<line_sep>p4=v4.Point<line_sep># 待翻转边所在的四边形的边,ei由vi引出 e1=halfedge.Twin.Suc<line_sep>e2=halfedge.Twin.Pre<line_sep>e3=halfedge.Suc<line_sep>e4=halfedge.Pre<line_sep># 修改顶点引出的边为非翻转的边(待翻转边所在的四边形的边) v1.HalfEdge=e1<line_sep>v2.HalfEdge=e2<line_sep>v3.HalfEdge=e3<line_sep>v4.HalfEdge=e4<line_sep># 待翻转边所在的四边形的两个桶中的点 oldpoints=[*halfedge.Face.Bucket.Points *halfedge.Twin.Face.Bucket.Points]<line_sep># 重新分桶 newpoints1,newpoints2=[] []<for_stmt>oldpoint oldpoints<block_start><if_stmt>InTriangle(p1 p2 p4 oldpoint)<block_start>newpoints1.append(oldpoint)<block_end><else_stmt><block_start>newpoints2.append(oldpoint)<block_end><block_end># 重新构造的面,逆时针 newface1,newface2=Face(e1) Face(e2)<line_sep>newface1.Visit=visitvalue<line_sep>newface2.Visit=visitvalue<line_sep># 构造翻转后的边 e5,e6=HalfEdge(v2 v4) HalfEdge(v4 v2)<line_sep>e5.Twin=e6<line_sep>e6.Twin=e5<line_sep>e5.Visit=visitvalue<line_sep>e6.Visit=visitvalue<line_sep># 构造newface1的边 e1.Suc=e5<line_sep>e5.Suc=e4<line_sep>e4.Suc=e1<line_sep>e1.Pre=e4<line_sep>e4.Pre=e5<line_sep>e5.Pre=e1<line_sep># 构造newface2的边 e2.Suc=e3<line_sep>e3.Suc=e6<line_sep>e6.Suc=e2<line_sep>e2.Pre=e6<line_sep>e6.Pre=e3<line_sep>e3.Pre=e2<line_sep># 边指向newface1 e1.Face=newface1<line_sep>e4.Face=newface1<line_sep>e5.Face=newface1<line_sep># 边指向newface2 e2.Face=newface2<line_sep>e3.Face=newface2<line_sep>e6.Face=newface2<line_sep># 构造两个新桶,并维持桶和面的联系 bucket1=Bucket(newpoints1)<line_sep>bucket2=Bucket(newpoints2)<line_sep>bucket1.Face=newface1<line_sep>bucket2.Face=newface2<line_sep>newface1.Bucket=bucket1<line_sep>newface2.Bucket=bucket2<block_end># 点vo撕裂面face,O(1) <def_stmt>ClipFace face vo remainedpoints<block_start>visitvalue=face.Visit<line_sep>hf1=face.HalfEdge<line_sep>hf2=hf1.Suc<line_sep>hf3=hf2.Suc<line_sep># 剪开面 clipface1=Face(hf1)<line_sep>clipface2=Face(hf2)<line_sep>clipface3=Face(hf3)<line_sep>clipface1.Visit=visitvalue<line_sep>clipface2.Visit=visitvalue<line_sep>clipface3.Visit=visitvalue<line_sep># face1 hf1_pre=HalfEdge(vo hf1.Start)<line_sep>hf1_suc=HalfEdge(hf1.End vo)<line_sep>hf1_pre.Visit=visitvalue<line_sep>hf1_suc.Visit=visitvalue<line_sep>hf1.Pre=hf1_pre<line_sep>hf1.Suc=hf1_suc<line_sep>hf1_pre.Pre=hf1_suc<line_sep>hf1_pre.Suc=hf1<line_sep>hf1_suc.Pre=hf1<line_sep>hf1_suc.Suc=hf1_pre<line_sep>hf1.Face=clipface1<line_sep>hf1_pre.Face=clipface1<line_sep>hf1_suc.Face=clipface1<line_sep># face2 hf2_pre=HalfEdge(vo hf2.Start)<line_sep>hf2_suc=HalfEdge(hf2.End vo)<line_sep>hf2_pre.Visit=visitvalue<line_sep>hf2_suc.Visit=visitvalue<line_sep>hf2.Pre=hf2_pre<line_sep>hf2.Suc=hf2_suc<line_sep>hf2_pre.Pre=hf2_suc<line_sep>hf2_pre.Suc=hf2<line_sep>hf2_suc.Pre=hf2<line_sep>hf2_suc.Suc=hf2_pre<line_sep>hf2.Face=clipface2<line_sep>hf2_pre.Face=clipface2<line_sep>hf2_suc.Face=clipface2<line_sep># face3 hf3_pre=HalfEdge(vo hf3.Start)<line_sep>hf3_suc=HalfEdge(hf3.End vo)<line_sep>hf3_pre.Visit=visitvalue<line_sep>hf3_suc.Visit=visitvalue<line_sep>hf3.Pre=hf3_pre<line_sep>hf3.Suc=hf3_suc<line_sep>hf3_pre.Pre=hf3_suc<line_sep>hf3_pre.Suc=hf3<line_sep>hf3_suc.Pre=hf3<line_sep>hf3_suc.Suc=hf3_pre<line_sep>hf3.Face=clipface3<line_sep>hf3_pre.Face=clipface3<line_sep>hf3_suc.Face=clipface3<line_sep>vo.HalfEdge=hf1_pre<line_sep># twin hf1_pre.Twin=hf3_suc<line_sep>hf3_suc.Twin=hf1_pre<line_sep>hf2_pre.Twin=hf1_suc<line_sep>hf1_suc.Twin=hf2_pre<line_sep>hf3_pre.Twin=hf2_suc<line_sep>hf2_suc.Twin=hf3_pre<line_sep>## 点放入桶 # 桶所在三角形的顶点 point=vo.Point<line_sep>p1=hf1.Start.Point<line_sep>p2=hf2.Start.Point<line_sep>p3=hf3.Start.Point<line_sep># 拆分桶 clipbucketps1,clipbucketps2,clipbucketps3=[] [] []<for_stmt>eachpoint remainedpoints<block_start><if_stmt>InTriangle(p1 p2 point eachpoint)<block_start>clipbucketps1.append(eachpoint)<block_end><elif_stmt>InTriangle(p2 p3 point eachpoint)<block_start>clipbucketps2.append(eachpoint)<block_end><else_stmt><block_start>clipbucketps3.append(eachpoint)<block_end><block_end># 撕裂的平面关联桶 clipbucket1=Bucket(clipbucketps1)<line_sep>clipbucket2=Bucket(clipbucketps2)<line_sep>clipbucket3=Bucket(clipbucketps3)<line_sep>clipface1.Bucket=clipbucket1<line_sep>clipface2.Bucket=clipbucket2<line_sep>clipface3.Bucket=clipbucket3<line_sep>clipbucket1.Face=clipface1<line_sep>clipbucket2.Face=clipface2<line_sep>clipbucket3.Face=clipface3<line_sep><return>clipface1 clipface2 clipface3<block_end># 访问网,O(n) <def_stmt>VisitNet face<block_start>visitvalue=face.Visit<line_sep>notvisitvalue=<not>visitvalue<line_sep>faces=[face]<line_sep># 访问过 face.Visit=notvisitvalue<line_sep>delaunaynet=[]<while_stmt>faces<block_start>eachface=faces[-1]<line_sep>faces.pop(-1)<line_sep># 面所在的三条边 e1=eachface.HalfEdge<line_sep>e2=e1.Suc<line_sep>e3=e2.Suc<line_sep>## 将正在访问的面的三个相邻的面加入faces eis=[e1 e2 e3]<for_stmt>ei eis# ei的孪生兄弟 <block_start>eiTwin=ei.Twin<line_sep># ei未被访问过 <if_stmt>ei.Visit<eq>visitvalue<block_start>ls,le=ei.Start.Point ei.End.Point<if_stmt>abs(ls[0])<ne>Infinity<and>abs(ls[1])<ne>Infinity<and>abs(le[0])<ne>Infinity<and>abs(le[1])<ne>Infinity<block_start>delaunaynet.append([ls le])<block_end>ei.Visit=notvisitvalue<if_stmt>eiTwin<block_start>faces.append(eiTwin.Face)<line_sep># 访问过 eiTwin.Face.Visit=notvisitvalue<line_sep>eiTwin.Visit=notvisitvalue<block_end><block_end><block_end><block_end><return>delaunaynet<block_end># 访问三角形,O(n) <def_stmt>VisitTriangles face# 访问网 <block_start>visitvalue=face.Visit<line_sep>notvisitvalue=<not>visitvalue<line_sep>faces=[face]<line_sep># 访问过 face.Visit=notvisitvalue<line_sep>delaunaynet=VGroup()<while_stmt>faces<block_start>eachface=faces[-1]<line_sep>faces.pop(-1)<line_sep># 面所在的三条边 e1=eachface.HalfEdge<line_sep>e2=e1.Suc<line_sep>e3=e2.Suc<line_sep># 标记访问过 e1.Visit=notvisitvalue<line_sep>e2.Visit=notvisitvalue<line_sep>e3.Visit=notvisitvalue<line_sep># 面对三个点 p1=e1.Start.Point<line_sep>p2=e2.Start.Point<line_sep>p3=e3.Start.Point<line_sep>delaunaynet.add(Polygon(p1 p2 p3))<line_sep>ei=[e1 e2 e3]<for_stmt>each ei<block_start>et=each.Twin<if_stmt>et<block_start>etf=et.Face<line_sep># 未访问过 <if_stmt>etf.Visit<eq>visitvalue# 访问过 <block_start>etf.Visit=notvisitvalue<line_sep>faces.append(etf)<block_end><block_end><block_end><block_end><return>delaunaynet<block_end># 访问维诺图,O(n) <def_stmt>VisitVoronoi face<block_start>visitvalue=face.Visit<line_sep>notvisitvalue=<not>visitvalue<line_sep>faces=[face]<line_sep># 访问过 face.Visit=notvisitvalue<line_sep>voronoi=[]<while_stmt>faces<block_start>eachface=faces[-1]<line_sep>faces.pop(-1)<line_sep># 面所在的三条边 e1=eachface.HalfEdge<line_sep>e2=e1.Suc<line_sep>e3=e2.Suc<line_sep>## 将正在访问的面的三个相邻的面加入faces eis=[e1 e2 e3]<for_stmt>ei eis# ei的孪生兄弟 <block_start>eiTwin=ei.Twin<line_sep># ei未被访问过 <if_stmt>ei.Visit<eq>visitvalue<block_start>ei.Visit=notvisitvalue<if_stmt>eiTwin<block_start>ls,le=ei.Start.Point ei.End.Point<if_stmt>abs(ls[0])<ne>Infinity<and>abs(ls[1])<ne>Infinity<and>abs(le[0])<ne>Infinity<and>abs(le[1])<ne>Infinity<block_start>efc,etfc=ei.Face.Center eiTwin.Face.Center<line_sep>ese=eiTwin.Suc.End.Point<line_sep># 边的对点是无穷点 <if_stmt>abs(ese[0])<eq>Infinity<or>abs(ese[1])<eq>Infinity<block_start>eis,eie=np.array(ei.Start.Point) np.array(ei.End.Point)<line_sep>vertical=np.cross(eie-eis np.array([0 0 1]))<line_sep>vertical=normalize(vertical)<line_sep>vertical=Infinity<times>vertical<line_sep>newle=efc+vertical<line_sep>voronoi.append([efc newle])<block_end><else_stmt><block_start>voronoi.append([efc etfc])<block_end><block_end>faces.append(eiTwin.Face)<line_sep># 访问过 eiTwin.Face.Visit=notvisitvalue<line_sep>eiTwin.Visit=notvisitvalue<block_end><block_end><block_end><block_end><return>voronoi<block_end># 给网加圆心,O(n) <def_stmt>InitNetCircumcircleCenter face# 访问网 <block_start>visitvalue=face.Visit<line_sep>notvisitvalue=<not>visitvalue<line_sep>faces=[face]<line_sep># 访问过 face.Visit=notvisitvalue<line_sep>#delaunaynet = VGroup() <while_stmt>faces<block_start>eachface=faces[-1]<line_sep>faces.pop(-1)<line_sep># 面所在的三条边 e1=eachface.HalfEdge<line_sep>e2=e1.Suc<line_sep>e3=e2.Suc<line_sep># 标记访问过 e1.Visit=notvisitvalue<line_sep>e2.Visit=notvisitvalue<line_sep>e3.Visit=notvisitvalue<line_sep># 面对三个点 p1=e1.Start.Point<line_sep>p2=e2.Start.Point<line_sep>p3=e3.Start.Point<line_sep># 赋值圆心 <if_stmt>eachface.Center<is><none><block_start>eachface.Center=CircumcircleCenter(p1 p2 p3)<block_end>#delaunaynet.add(Polygon(p1, p2, p3)) eis=[e1 e2 e3]<for_stmt>ei eis<block_start>eit=ei.Twin<if_stmt>eit<block_start>eitf=eit.Face<line_sep># 未访问过 <if_stmt>eitf.Visit<eq>visitvalue# 访问过 <block_start>eitf.Visit=notvisitvalue<line_sep>faces.append(eitf)<block_end><block_end><block_end><block_end><block_end># 构造网,O(nlogn) <def_stmt>ConstructNet points=<none><block_start>face1=InitInfNet(points)<line_sep>infedge=face1.HalfEdge<line_sep>buckets=[face1.Bucket]<while_stmt>buckets# 取桶 <block_start>bucket=buckets[-1]<line_sep>buckets.pop(-1)<line_sep># 取桶的点 point=bucket.Points[-1]<line_sep>bucket.Points.pop(-1)<line_sep>vo=Vertice(point)<line_sep># 桶所在三角形的边 crpface=bucket.Face<line_sep>hf1=crpface.HalfEdge<line_sep>hf2=hf1.Suc<line_sep>hf3=hf2.Suc<line_sep># 撕裂面 ClipFace(crpface vo bucket.Points)<line_sep># 看看是否要边翻转 edges=[hf1 hf2 hf3]<while_stmt>edges<block_start>eachedge=edges[-1]<line_sep>edges.pop(-1)<line_sep>eachedgetwin=eachedge.Twin<if_stmt>eachedgetwin<block_start>trip1=vo.Point<line_sep>trip2=eachedgetwin.Start.Point<line_sep>trip3=eachedgetwin.End.Point<line_sep>trip4=eachedgetwin.Suc.End.Point<if_stmt>InCircle(trip1 trip2 trip3 trip4)<block_start>etfb=eachedgetwin.Face.Bucket<if_stmt>len(etfb.Points)<g>0<block_start>buckets.remove(etfb)<block_end>edges.append(eachedgetwin.Pre)<line_sep>edges.append(eachedgetwin.Suc)<line_sep>EdgeFlipping(eachedge)<block_end><block_end><block_end># 遍历点周围的所有边,把桶加入 ringvisit=vo.HalfEdge<line_sep>currvisit=ringvisit.Twin.Suc<while_stmt>currvisit<ne>ringvisit<block_start>currbucket=currvisit.Face.Bucket<if_stmt>len(currbucket.Points)<g>0<block_start>buckets.append(currbucket)<block_end>currvisit=currvisit.Twin.Suc<block_end>currbucket=currvisit.Face.Bucket<if_stmt>len(currbucket.Points)<g>0<block_start>buckets.append(currbucket)<block_end><block_end><return>infedge.Face<block_end># 得到某点在网中的面 <def_stmt>get_point_posface point net# 访问网 <block_start>visitvalue=net.Visit<line_sep>notvisitvalue=<not>visitvalue<line_sep>faces=[net]<line_sep># 访问过 net.Visit=notvisitvalue<line_sep># 位置 #posface = None mark=<true><while_stmt>faces<block_start>eachface=faces[-1]<line_sep>faces.pop(-1)<line_sep># 面所在的三条边 e1=eachface.HalfEdge<line_sep>e2=e1.Suc<line_sep>e3=e2.Suc<line_sep># 标记访问过 e1.Visit=notvisitvalue<line_sep>e2.Visit=notvisitvalue<line_sep>e3.Visit=notvisitvalue<line_sep># 面对三个点 p1=e1.Start.Point<line_sep>p2=e2.Start.Point<line_sep>p3=e3.Start.Point<line_sep># 位置未找到 <if_stmt>mark<block_start><if_stmt>InTriangle(p1 p2 p3 point)<block_start>posface=eachface<block_end><block_end>ei=[e1 e2 e3]<for_stmt>each ei<block_start>et=each.Twin<if_stmt>et<block_start>etf=et.Face<line_sep># 未访问过 <if_stmt>etf.Visit<eq>visitvalue# 访问过 <block_start>etf.Visit=notvisitvalue<line_sep>faces.append(etf)<block_end><block_end><block_end><block_end><return>posface<block_end># 在网中插入点,O(n) <def_stmt>net_insert_point point net# 点所在的面 <block_start>posface=get_point_posface(point net)<line_sep>posface.Bucket.Points.append(point)<line_sep>infedge=posface.HalfEdge<line_sep>buckets=[posface.Bucket]<while_stmt>buckets# 取桶 <block_start>bucket=buckets[-1]<line_sep>buckets.pop(-1)<line_sep># 取桶的点 point=bucket.Points[-1]<line_sep>bucket.Points.pop(-1)<line_sep>vo=Vertice(point)<line_sep># 桶所在三角形的边 crpface=bucket.Face<line_sep>hf1=crpface.HalfEdge<line_sep>hf2=hf1.Suc<line_sep>hf3=hf2.Suc<line_sep># 撕裂面 ClipFace(crpface vo bucket.Points)<line_sep># 看看是否要边翻转 edges=[hf1 hf2 hf3]<while_stmt>edges<block_start>eachedge=edges[-1]<line_sep>edges.pop(-1)<line_sep>eachedgetwin=eachedge.Twin<if_stmt>eachedgetwin<block_start>trip1=vo.Point<line_sep>trip2=eachedgetwin.Start.Point<line_sep>trip3=eachedgetwin.End.Point<line_sep>trip4=eachedgetwin.Suc.End.Point<if_stmt>InCircle(trip1 trip2 trip3 trip4)<block_start>etfb=eachedgetwin.Face.Bucket<if_stmt>len(etfb.Points)<g>0<block_start>buckets.remove(etfb)<block_end>edges.append(eachedgetwin.Pre)<line_sep>edges.append(eachedgetwin.Suc)<line_sep>EdgeFlipping(eachedge)<block_end><block_end><block_end># 遍历点周围的所有边,把桶加入 ringvisit=vo.HalfEdge<line_sep>currvisit=ringvisit.Twin.Suc<while_stmt>currvisit<ne>ringvisit<block_start>currbucket=currvisit.Face.Bucket<if_stmt>len(currbucket.Points)<g>0<block_start>buckets.append(currbucket)<block_end>currvisit=currvisit.Twin.Suc<block_end>currbucket=currvisit.Face.Bucket<if_stmt>len(currbucket.Points)<g>0<block_start>buckets.append(currbucket)<block_end><block_end><return>infedge.Face<block_end># 在网中插入点,并设置外心,O(n) <def_stmt>net_insert_point_and_set_circumcirclecenter point net# 点所在的面,O(n) <block_start>posface=get_point_posface(point net)<line_sep>vo=Vertice(point)<line_sep># 桶所在三角形的边 crpface=posface<line_sep>hf1=crpface.HalfEdge<line_sep>hf2=hf1.Suc<line_sep>hf3=hf2.Suc<line_sep># 撕裂面 ClipFace(crpface vo [])<line_sep># 设置外心 hf1.Face.Center=CircumcircleCenter(hf1.Start.Point hf1.End.Point point)<line_sep>hf2.Face.Center=CircumcircleCenter(hf2.Start.Point hf2.End.Point point)<line_sep>hf3.Face.Center=CircumcircleCenter(hf3.Start.Point hf3.End.Point point)<line_sep># 看看是否要边翻转,O(6) edges=[hf1 hf2 hf3]<while_stmt>edges<block_start>eachedge=edges[-1]<line_sep>edges.pop(-1)<line_sep>eachedgetwin=eachedge.Twin<if_stmt>eachedgetwin<block_start>trip1=vo.Point<line_sep>trip2=eachedgetwin.Start.Point<line_sep>trip3=eachedgetwin.End.Point<line_sep>trip4=eachedgetwin.Suc.End.Point<if_stmt>InCircle(trip1 trip2 trip3 trip4)<block_start>edges.append(eachedgetwin.Pre)<line_sep>edges.append(eachedgetwin.Suc)<line_sep>efv1=eachedge.Suc<line_sep>efv2=eachedgetwin.Suc<line_sep>EdgeFlipping(eachedge)<line_sep>efv1.Face.Center=CircumcircleCenter(trip1 trip2 trip4)<line_sep>efv2.Face.Center=CircumcircleCenter(trip1 trip3 trip4)<block_end><block_end><block_end><return>vo.HalfEdge.Face<block_end># 德劳内三角网,O(nlogn) <class_stmt>DelaunayTrianglation(VGroup)<block_start><def_stmt>__init__ self *points **kwargs<block_start>digest_config(self kwargs)<line_sep>self.net=ConstructNet(list(points))<line_sep>self.kwargs=kwargs<line_sep>VGroup.__init__(self *[Line(*each **kwargs)<for>each self.VisitNet()])<block_end># 获取网的顶点对,即用坐标表示的线 <def_stmt>VisitNet self<block_start><return>VisitNet(self.net)<block_end><def_stmt>VisitTriangles self<block_start><return>VGroup(*VisitTriangles(self.net) **self.kwargs)<block_end># 获取网 <def_stmt>GetNet self<block_start><return>self.net<block_end># 插入节点 <def_stmt>InsertPoint self point<block_start>net_insert_point(point self.net)<line_sep>self.become(VGroup(*[Line(*each **self.kwargs)<for>each self.VisitNet()]))<line_sep><return>self<block_end><block_end># 维诺图,O(n)+O(nlogn)=O(nlogn) <class_stmt>Voronoi(VGroup)<block_start><def_stmt>__init__ self *points **kwargs<block_start>digest_config(self kwargs)<line_sep>self.kwargs=kwargs<line_sep>self.net=DelaunayTrianglation(*points).GetNet()<line_sep>InitNetCircumcircleCenter(self.net)<line_sep>self.voronoi=self.VisitVoronoi()<line_sep>VGroup.__init__(self *[Line(*each **kwargs)<for>each self.voronoi])<block_end><def_stmt>VisitVoronoi self<block_start><return>VisitVoronoi(self.net)<block_end># 获取网 <def_stmt>GetNet self<block_start><return>self.net<block_end># 插入节点 <def_stmt>InsertPoint self point<block_start>net_insert_point_and_set_circumcirclecenter(point self.net)<line_sep>self.voronoi=self.VisitVoronoi()<line_sep>self.become(VGroup(*[Line(*each **self.kwargs)<for>each self.voronoi]))<line_sep><return>self<block_end><block_end># 测试类 <class_stmt>test(Scene)<block_start><def_stmt>construct self<block_start>np.random.seed(2007)<line_sep>points=[[np.random.randint(-70000 70000)/10500 np.random.randint(-38000 38000)/10500 0]<for>i range(800)]<line_sep>#points = [UL, UP, UR, LEFT, ORIGIN, RIGHT, DL, DOWN, DR] #points = [UL, DR, UR, DL] dots=[Dot(p).scale(0.5)<for>p points]<line_sep>self.add(*dots)<line_sep>start=time.perf_counter()<line_sep>net=Voronoi(*points)<line_sep>self.add(net)<line_sep>end=time.perf_counter()<line_sep>print(end-start)<line_sep>''' p1, p2, p3 = DL, UL, UR p4 = DR p5 = ORIGIN p6 = UL/2 p7 = UL p8 = UL*2 print(InTriangle(p1, p2, p3, p4)) print(InTriangle(p1, p2, p3, p5)) print(InTriangle(p1, p2, p3, p6)) print(InTriangle(p1, p2, p3, p7)) print(InTriangle(p1, p2, p3, p8)) print(InCircle(p1, p2, p3, p4)) print(InCircle(p1, p2, p3, p5)) print(InCircle(p1, p2, p3, p6)) print(InCircle(p1, p2, p3, p7)) print(InCircle(p1, p2, p3, p8)) '''<line_sep>''' infnet = InitInfNet() he1 = infnet.HalfEdge he2 = he1.Suc he3 = he2.Suc print(get_polygon_directed_area(Polygon(he1.Start.Point, he2.Start.Point, he3.Start.Point))) '''<line_sep>''' np.random.seed(2007) points = [ [np.random.randint(-70000, 70000)/10500, np.random.randint(-38000, 38000)/10500, 0] for i in range(1000) ] #points = [UL, UP, UR, LEFT, ORIGIN, RIGHT, DL, DOWN, DR] #points = [UL, DR, UR, DL] dots = [Dot(p) for p in points] #self.add(*dots) start = time.perf_counter() delaunay = ConstructNet(self, points) net = VisitNet(delaunay) end = time.perf_counter() print(end - start) self.add(net) '''<line_sep>''' np.random.seed(2000007) points = [ [np.random.randint(-70000, 70000)/10000, np.random.randint(-38000, 38000)/10000, 0] for i in range(7) ] dots = [Dot(p) for p in points] self.add(*dots) start = time.perf_counter() delaunay = InitInfNet(points) #print(points[0]) net1, net2, net3 = ClipFace(delaunay, Vertice(points[0]), points[1:]) net = VisitTriangles(net1) end = time.perf_counter() print(end - start) self.add(net) '''<line_sep>''' p1, p2, p3, p4 = UL, UR*2, DR, DL*2 v1, v2, v3, v4 = Vertice(p1), Vertice(p2), Vertice(p3), Vertice(p4) he1 = HalfEdge(v1, v2) he2 = HalfEdge(v2, v3) he3 = HalfEdge(v3, v4) he4 = HalfEdge(v4, v1) he5 = HalfEdge(v3, v1) he6 = HalfEdge(v1, v3) he1.Suc = he2 he2.Pre = he1 he2.Suc = he5 he5.Pre = he2 he5.Suc = he1 he1.Pre = he5 he3.Suc = he4 he4.Pre = he3 he4.Suc = he6 he6.Pre = he4 he6.Suc = he3 he3.Pre = he6 bucket1 = Bucket([UR+RIGHT/5, UR+LEFT/5]) bucket2 = Bucket([]) face1 = Face(he1) face1.Bucket = bucket1 bucket1.Face = face1 he1.Face = face1 he2.Face = face1 he5.Face = face1 face2 = Face(he3) face2.Bucket = bucket2 bucket2.Face = face2 he3.Face = face2 he4.Face = face2 he6.Face = face2 he5.Twin = he6 he6.Twin = he5 EdgeFlipping(he5) start = time.perf_counter() net = VisitInfNet(face1) end = time.perf_counter() print(end - start) print(get_polygon_directed_area(Polygon(face1.HalfEdge.Start.Point, face1.HalfEdge.Suc.Start.Point, face1.HalfEdge.Suc.Suc.Start.Point))) print(get_polygon_directed_area(Polygon(face2.HalfEdge.Start.Point, face2.HalfEdge.Suc.Start.Point, face2.HalfEdge.Suc.Suc.Start.Point))) self.add(net) '''<line_sep>#p1, p2, p3, p4 = UL, UR, DR, DL #print(InTriangle(p1, p2, p3, ORIGIN), InTriangle(p1, p2, p3, UR/2), InTriangle(p1, p2, p3, p4)) ''' start = time.perf_counter() print( InCircle(p1, p2, p3, p4), InCircle(p1, p2, p3, ORIGIN), InCircle(p1, p2, p3, p4+LEFT) ) end = time.perf_counter() print(end - start) start = time.perf_counter() print( InCircle2(p1, p2, p3, p4), InCircle2(p1, p2, p3, ORIGIN), InCircle2(p1, p2, p3, p4+LEFT) ) end = time.perf_counter() print(end - start) '''<line_sep>self.wait()<block_end><block_end>
# -*- coding: utf-8 -*- # Generated by Django 1.9.2 on 2016-02-19 10:55 <import_from_future_stmt> unicode_literals<import_stmt>django.contrib.postgres.fields.jsonb<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>initial=<true><line_sep>dependencies=[('inventory' '0001_initial') ]<line_sep>operations=[migrations.CreateModel(name='Catalog' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('name' models.CharField(max_length=256 unique=<true>)) ('priority' models.PositiveIntegerField(default=0)) ('created_at' models.DateTimeField(auto_now_add=<true>)) ('updated_at' models.DateTimeField(auto_now=<true>)) ('archived_at' models.DateTimeField(blank=<true> null=<true>)) ] options={'ordering':('-priority' 'name') } ) migrations.CreateModel(name='Manifest' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('catalogs' models.ManyToManyField(to='monolith.Catalog')) ('meta_business_unit' models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='inventory.MetaBusinessUnit')) ] ) migrations.CreateModel(name='PkgInfo' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('data' django.contrib.postgres.fields.jsonb.JSONField()) ('created_at' models.DateTimeField(auto_now_add=<true>)) ('updated_at' models.DateTimeField(auto_now=<true>)) ('archived_at' models.DateTimeField(blank=<true> null=<true>)) ('catalogs' models.ManyToManyField(to='monolith.Catalog')) ] ) migrations.CreateModel(name='PkgInfoCategory' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('name' models.CharField(max_length=256 unique=<true>)) ('created_at' models.DateTimeField(auto_now_add=<true>)) ] ) migrations.CreateModel(name='PkgInfoName' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('name' models.CharField(max_length=256 unique=<true>)) ('created_at' models.DateTimeField(auto_now_add=<true>)) ] ) migrations.CreateModel(name='SubManifest' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('name' models.CharField(max_length=256)) ('description' models.TextField(blank=<true>)) ('created_at' models.DateTimeField(auto_now_add=<true>)) ('updated_at' models.DateTimeField(auto_now=<true>)) ('managed_installs' models.ManyToManyField(related_name='_submanifest_managed_installs_+' to='monolith.PkgInfoName')) ('managed_uninstalls' models.ManyToManyField(related_name='_submanifest_managed_uninstalls_+' to='monolith.PkgInfoName')) ('managed_updates' models.ManyToManyField(related_name='_submanifest_managed_updates_+' to='monolith.PkgInfoName')) ('meta_business_unit' models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE to='inventory.MetaBusinessUnit')) ('optional_installs' models.ManyToManyField(related_name='_submanifest_optional_installs_+' to='monolith.PkgInfoName')) ] ) migrations.AddField(model_name='pkginfo' name='category' field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='monolith.PkgInfoCategory') ) migrations.AddField(model_name='pkginfo' name='name' field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to='monolith.PkgInfoName') ) migrations.AddField(model_name='manifest' name='sub_manifests' field=models.ManyToManyField(to='monolith.SubManifest') ) ]<block_end>
# Generated by Django 3.1.12 on 2021-07-16 13:04 <import_from_stmt>django.contrib.postgres.indexes GinIndex<import_from_stmt>django.contrib.postgres.operations TrigramExtension<import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("posthog" "0160_organization_domain_whitelist") ]<line_sep>operations=[TrigramExtension() migrations.AddIndex(model_name="eventdefinition" index=GinIndex(fields=["name"] name="index_event_definition_name" opclasses=["gin_trgm_ops"]) ) migrations.AddIndex(model_name="propertydefinition" index=GinIndex(fields=["name"] name="index_property_definition_name" opclasses=["gin_trgm_ops"]) ) ]<block_end>
# Time: O(n^2) # Space: O(n) <class_stmt>Solution(object)<block_start><def_stmt>minSkips self dist speed hoursBefore<block_start>""" :type dist: List[int] :type speed: int :type hoursBefore: int :rtype: int """<def_stmt>ceil a b<block_start><return>(a+b-1)<floordiv>b<block_end>dp=[0]<times>((len(dist)-1)+1)# dp[i]: (min time by i skips) * speed <for_stmt>i,d enumerate(dist)<block_start><for_stmt>j reversed(xrange(len(dp)))<block_start>dp[j]=ceil(dp[j]+d speed)<times>speed<if>i<l>len(dist)-1<else>dp[j]+d<if_stmt>j-1<ge>0<block_start>dp[j]=min(dp[j] dp[j-1]+d)<block_end><block_end><block_end>target=hoursBefore<times>speed<for_stmt>i xrange(len(dist))<block_start><if_stmt>dp[i]<le>target<block_start><return>i<block_end><block_end><return>-1<block_end><block_end>
<import_from_future_stmt> print_function absolute_import division<class_stmt>CudaDriverError(Exception)<block_start><pass><block_end><class_stmt>CudaSupportError(ImportError)<block_start><pass><block_end><class_stmt>NvvmError(Exception)<block_start><def_stmt>__str__ self<block_start><return>'\n'.join(map(str self.args))<block_end><block_end><class_stmt>NvvmSupportError(ImportError)<block_start><pass><block_end>
<import_from_stmt>django.contrib admin<import_from_stmt>raspberryio.userprofile models<as>userprofile<class_stmt>ProfileAdmin(admin.ModelAdmin)<block_start>model=userprofile.Profile<block_end>admin.site.register(userprofile.Profile ProfileAdmin)<line_sep>
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>hashlib<import_stmt>typing<import_from_stmt>typing Union<import_from_stmt>fate_arch.common Party profile<import_from_stmt>fate_arch.common.log getLogger<import_from_stmt>fate_arch.federation.transfer_variable._auth _check_variable_auth_conf<import_from_stmt>fate_arch.federation.transfer_variable._cleaner IterationGC<import_from_stmt>fate_arch.federation.transfer_variable._namespace FederationTagNamespace<import_from_stmt>fate_arch.session get_latest_opened<line_sep>__all__=["Variable" "BaseTransferVariables"]<line_sep>LOGGER=getLogger()<class_stmt>Variable(object)<block_start>""" variable to distinguish federation by name """<line_sep>__disable_auth_check=<false><line_sep>__instances:typing.MutableMapping[str 'Variable']={}<line_sep>@classmethod<def_stmt>_disable_auth_check cls<block_start>""" used in auth conf generation, don't call this in real application """<line_sep>cls.__disable_auth_check=<true><block_end>@classmethod<def_stmt>get_or_create cls name create_func:typing.Callable[[] 'Variable']<arrow>'Variable'<block_start><if_stmt>name<not><in>cls.__instances<block_start>value=create_func()<line_sep>cls.__instances[name]=value<block_end><return>cls.__instances[name]<block_end><def_stmt>__init__ self name:str src:typing.Tuple[str <ellipsis>] dst:typing.Tuple[str <ellipsis>]<block_start><if_stmt>name<in>self.__instances<block_start><raise>RuntimeError(f"{self.__instances[name]} with {name} already initialized, which expected to be an singleton object.")<block_end><if_stmt><not>self.__disable_auth_check<block_start>auth_src,auth_dst=_check_variable_auth_conf(name)<if_stmt>set(src)<ne>set(auth_src)<or>set(dst)<ne>set(auth_dst)<block_start><raise>RuntimeError(f"Variable {name} auth error, "<concat>f"acquired: src={src}, dst={dst}, allowed: src={auth_src}, dst={auth_dst}")<block_end><block_end><assert_stmt>len(name.split("."))<ge>3 "incorrect name format, should be `module_name.class_name.variable_name`"<line_sep>self._name=name<line_sep>self._src=src<line_sep>self._dst=dst<line_sep>self._get_gc=IterationGC()<line_sep>self._remote_gc=IterationGC()<line_sep>self._use_short_name=<true><line_sep>self._short_name=self._get_short_name(self._name)<block_end>@staticmethod<def_stmt>_get_short_name name<block_start>fix_sized=hashlib.blake2b(name.encode('utf-8') digest_size=10).hexdigest()<line_sep>_,right=name.rsplit('.' 1)<line_sep><return>f"hash.{fix_sized}.{right}"<block_end># copy never create a new instance <def_stmt>__copy__ self<block_start><return>self<block_end># deepcopy never create a new instance <def_stmt>__deepcopy__ self memo<block_start><return>self<block_end><def_stmt>set_preserve_num self n<block_start>self._get_gc.set_capacity(n)<line_sep>self._remote_gc.set_capacity(n)<line_sep><return>self<block_end><def_stmt>disable_auto_clean self<block_start>self._get_gc.disable()<line_sep>self._remote_gc.disable()<line_sep><return>self<block_end><def_stmt>clean self<block_start>self._get_gc.clean()<line_sep>self._remote_gc.clean()<block_end><def_stmt>remote_parties self obj parties:Union[typing.List[Party] Party] suffix:Union[typing.Any typing.Tuple]=tuple()<block_start>""" remote object to specified parties Parameters ---------- obj: object or table object or table to remote parties: typing.List[Party] parties to remote object/table to suffix: str or tuple of str suffix used to distinguish federation with in variable Returns ------- None """<line_sep>session=get_latest_opened()<if_stmt>isinstance(parties Party)<block_start>parties=[parties]<block_end><if_stmt><not>isinstance(suffix tuple)<block_start>suffix=(suffix )<block_end>tag=FederationTagNamespace.generate_tag(*suffix)<for_stmt>party parties<block_start><if_stmt>party.role<not><in>self._dst<block_start><raise>RuntimeError(f"not allowed to remote object to {party} using {self._name}")<block_end><block_end>local=session.parties.local_party.role<if_stmt>local<not><in>self._src<block_start><raise>RuntimeError(f"not allowed to remote object from {local} using {self._name}")<block_end>name=self._short_name<if>self._use_short_name<else>self._name<line_sep>timer=profile.federation_remote_timer(name self._name tag local parties)<line_sep>session.federation.remote(v=obj name=name tag=tag parties=parties gc=self._remote_gc)<line_sep>timer.done(session.federation)<line_sep>self._remote_gc.gc()<block_end><def_stmt>get_parties self parties:Union[typing.List[Party] Party] suffix:Union[typing.Any typing.Tuple]=tuple()<block_start>""" get objects/tables from specified parties Parameters ---------- parties: typing.List[Party] parties to remote object/table to suffix: str or tuple of str suffix used to distinguish federation with in variable Returns ------- list a list of objects/tables get from parties with same order of ``parties`` """<line_sep>session=get_latest_opened()<if_stmt><not>isinstance(parties list)<block_start>parties=[parties]<block_end><if_stmt><not>isinstance(suffix tuple)<block_start>suffix=(suffix )<block_end>tag=FederationTagNamespace.generate_tag(*suffix)<for_stmt>party parties<block_start><if_stmt>party.role<not><in>self._src<block_start><raise>RuntimeError(f"not allowed to get object from {party} using {self._name}")<block_end><block_end>local=session.parties.local_party.role<if_stmt>local<not><in>self._dst<block_start><raise>RuntimeError(f"not allowed to get object to {local} using {self._name}")<block_end>name=self._short_name<if>self._use_short_name<else>self._name<line_sep>timer=profile.federation_get_timer(name self._name tag local parties)<line_sep>rtn=session.federation.get(name=name tag=tag parties=parties gc=self._get_gc)<line_sep>timer.done(session.federation)<line_sep>self._get_gc.gc()<line_sep><return>rtn<block_end><def_stmt>remote self obj role=<none> idx=-1 suffix=tuple()<block_start>""" send obj to other parties. Args: obj: object to be sent role: role of parties to sent to, use one of ['Host', 'Guest', 'Arbiter', None]. The default is None, means sent values to parties regardless their party role idx: id of party to sent to. The default is -1, which means sent values to parties regardless their party id suffix: additional tag suffix, the default is tuple() """<line_sep>party_info=get_latest_opened().parties<if_stmt>idx<ge>0<and>role<is><none><block_start><raise>ValueError("role cannot be None if idx specified")<block_end># get subset of dst roles in runtime conf <if_stmt>role<is><none><block_start>parties=party_info.roles_to_parties(self._dst strict=<false>)<block_end><else_stmt><block_start><if_stmt>isinstance(role str)<block_start>role=[role]<block_end>parties=party_info.roles_to_parties(role)<block_end><if_stmt>idx<ge>0<block_start>parties=parties[idx]<block_end><return>self.remote_parties(obj=obj parties=parties suffix=suffix)<block_end><def_stmt>get self idx=-1 suffix=tuple()<block_start>""" get obj from other parties. Args: idx: id of party to get from. The default is -1, which means get values from parties regardless their party id suffix: additional tag suffix, the default is tuple() Returns: object or list of object """<line_sep>src_parties=get_latest_opened().parties.roles_to_parties(roles=self._src strict=<false>)<if_stmt>isinstance(idx list)<block_start>rtn=self.get_parties(parties=[src_parties[i]<for>i idx] suffix=suffix)<block_end><elif_stmt>isinstance(idx int)<block_start>rtn=self.get_parties(parties=src_parties suffix=suffix)<if>idx<l>0<else>self.get_parties(parties=src_parties[idx] suffix=suffix)[0]<block_end><else_stmt><block_start><raise>ValueError(f"illegal idx type: {type(idx)}, supported types: int or list of int")<block_end><return>rtn<block_end><block_end><class_stmt>BaseTransferVariables(object)<block_start><def_stmt>__init__ self *args<block_start><pass><block_end><def_stmt>__copy__ self<block_start><return>self<block_end><def_stmt>__deepcopy__ self memo<block_start><return>self<block_end>@staticmethod<def_stmt>set_flowid flowid<block_start>""" set global namespace for federations. Parameters ---------- flowid: str namespace Returns ------- None """<line_sep>FederationTagNamespace.set_namespace(str(flowid))<block_end><def_stmt>_create_variable self name:str src:typing.Iterable[str] dst:typing.Iterable[str]<arrow>Variable<block_start>full_name=f"{self.__module__}.{self.__class__.__name__}.{name}"<line_sep><return>Variable.get_or_create(full_name <lambda>:Variable(name=full_name src=tuple(src) dst=tuple(dst)))<block_end>@staticmethod<def_stmt>all_parties <block_start>""" get all parties Returns ------- list list of parties """<line_sep><return>get_latest_opened().parties.all_parties<block_end>@staticmethod<def_stmt>local_party <block_start>""" indicate local party Returns ------- Party party this program running on """<line_sep><return>get_latest_opened().parties.local_party<block_end><block_end>
<import_from_stmt>scrapy.spiders Spider<import_from_stmt>scrapy.http Request<import_from_stmt>scrapy.selector Selector<import_from_stmt>crawler.items ProxyIPItem<class_stmt>CnProxySpider(Spider)<block_start>name="cnproxy"<line_sep>allowed_domains=["cn-proxy.com"]<line_sep>start_urls=["http://cn-proxy.com/" "http://cn-proxy.com/archives/218"]<line_sep>referer="http://cn-proxy.com/"<def_stmt>start_requests self<block_start><for_stmt>item self.start_urls<block_start><yield>Request(url=item headers={'Referer':self.referer})<block_end><block_end><def_stmt>parse self response<block_start>ip_list=response.xpath('//table[@class="sortable"]/tbody/tr')<for_stmt>ip ip_list<block_start>item=ProxyIPItem()<line_sep>item['ip']=ip.xpath('td[1]/text()').extract()[0]<line_sep>item['port']=ip.xpath('td[2]/text()').extract()[0]<line_sep>item['type']='http'<line_sep><yield>item<block_end><block_end><block_end>
# flake8: noqa <import_from_stmt>nebullvm.installers.installers install_tvm install_tensor_rt install_openvino install_onnxruntime <line_sep>__all__=[k<for>k globals().keys()<if><not>k.startswith("_")]<line_sep>
<import_stmt>functools<import_from_stmt>collections namedtuple<import_from_stmt>datetime datetime<import_from_stmt>typing Any Callable Iterable Optional Tuple Union<import_stmt>grpc<import_from_stmt>about_time about_time<import_from_stmt>about_time.core HandleStats<line_sep># noinspection PyProtectedMember <import_from_stmt>celery.states FAILURE PROPAGATE_STATES REJECTED RETRY REVOKED SUCCESS<import_from_stmt>.code_highlighter traceback_highlighter_factory typed_code<import_from_stmt>.display_modes ModeTask ModeWorker find_mode<import_from_stmt>..protos.clearly_pb2 CaptureRequest FilterTasksRequest FilterWorkersRequest Null PatternFilter TaskMessage WorkerMessage<import_from_stmt>..protos.clearly_pb2_grpc ClearlyServerStub<import_from_stmt>..utils.colors Colors<import_from_stmt>..utils.env_params get_env_int_tuple<import_from_stmt>..utils.safe_compiler safe_compile_text<import_from_stmt>..utils.worker_states HEARTBEAT ONLINE<line_sep>HEADER_SIZE=8<line_sep>HEADER_PADDING,HEADER_ALIGN=' '<times>HEADER_SIZE '>{}'.format(HEADER_SIZE)<line_sep>EMPTY=Colors.DIM(':)')<line_sep>DIM_NONE=Colors.CYAN_DIM('None')<line_sep>TRACEBACK_HIGHLIGHTER=traceback_highlighter_factory()<line_sep>Modes=namedtuple('Modes' 'tasks workers')<def_stmt>set_user_friendly_errors fn:Callable[<ellipsis> <none>]<arrow>Callable[<ellipsis> <none>]<block_start>@functools.wraps(fn)<def_stmt>inner self:'ClearlyClient' *args **kwargs<block_start><try_stmt><block_start>fn(self *args **kwargs)<block_end><except_stmt>grpc.RpcError<as>e<block_start><if_stmt>self._debug<block_start><raise><block_end># noinspection PyUnresolvedReferences print('{}: {} ({})'.format(Colors.BOLD('Server communication error') Colors.RED(e.details()) Colors.DIM(e.code())))<block_end><except_stmt>UserWarning<as>e<block_start>print(Colors.RED(e))<block_end><block_end><return>inner<block_end><class_stmt>ClearlyClient<block_start>"""Main client object, which interfaces with the Clearly server backend, sends commands and displays captured events. Attributes: _debug: if True, let the ugly errors be seen, humanizes them otherwise _stub: the rpc communication stub instance _modes: the current tasks and workers display modes """<def_stmt>__init__ self host:str='localhost' port:int=12223 debug:bool=<false><block_start>"""Construct a Clearly Client instance. Args: host: the hostname of the server port: the port of the server """<line_sep>self._debug=debug<line_sep>channel=grpc.insecure_channel('{}:{}'.format(host port))<line_sep>self._stub=ClearlyServerStub(channel)<line_sep>self._modes=Modes(ModeTask.FAILURE ModeWorker.WORKER)<line_sep>self._modes=self._get_display_modes(get_env_int_tuple('CLI_DISPLAY_MODES' <none>))<block_end><def_stmt>capture_tasks self tasks:Optional[str]=<none> mode:Union[<none> int ModeTask]=<none><arrow><none><block_start>"""Start capturing task events in real time, so you can instantly see exactly what your publishers and workers are doing. Filter as much as you can to find what you need, and don't worry as the Clearly Server will still seamlessly handle all tasks updates. Currently, you can filter tasks by name, uuid, routing key or state. Insert an '!' in the first position to select those that do not match criteria. This runs in the foreground. Press CTRL+C at any time to stop it. Args: tasks: a simple pattern to filter tasks ex.: 'email' to find values containing that word anywhere 'failure|rejected|revoked' to find tasks with problem '^trigger|^email' to find values starting with any of those words 'trigger.*123456' to find values with those words in that sequence '!^trigger|^email' to filter values not starting with both those words mode: an optional display mode to present data See Also: ClearlyClient#display_modes() """<line_sep>self.capture(tasks=tasks modes=mode workers='!')<block_end><def_stmt>capture_workers self workers:Optional[str]=<none> mode:Union[<none> int ModeWorker]=<none><arrow><none><block_start>"""Start capturing worker events in real time, so you can instantly see exactly what your workers states are. Filter as much as you can to find what you need, and don't worry as the Clearly Server will still seamlessly handle all tasks and workers updates. Currently, you can filter workers by hostname. Insert an '!' in the first position to select those that do not match criteria. This runs in the foreground. Press CTRL+C at any time to stop it. Args: workers: a simple pattern to filter workers ex.: 'email' to find values containing that word anywhere 'service|priority' to find values containing any of those words '!service|priority' to find values not containing both those words mode: an optional display mode to present data See Also: ClearlyClient#display_modes() """<line_sep>self.capture(workers=workers modes=mode tasks='!')<block_end>@set_user_friendly_errors<def_stmt>capture self tasks:Optional[str]=<none> workers:Optional[str]=<none> modes:Union[<none> int ModeTask ModeWorker Tuple]=<none><arrow><none><block_start>"""Start capturing all events in real time, so you can instantly see exactly what your publishers and workers are doing. Filter as much as you can to find what you need, and don't worry as the Clearly Server will still seamlessly handle all tasks and workers updates. This runs in the foreground. Press CTRL+C at any time to stop it. Args: tasks: the pattern to filter tasks workers: the pattern to filter workers modes: optional display modes to present data send one or a tuple, as described in display_modes() See Also: ClearlyClient#capture_tasks() ClearlyClient#capture_workers() ClearlyClient#display_modes() """<line_sep>tasks_filter=ClearlyClient._parse_pattern(tasks)<line_sep>workers_filter=ClearlyClient._parse_pattern(workers)<if_stmt><not>tasks_filter<and><not>workers_filter<block_start><raise>UserWarning('Nothing would be selected.')<block_end>mode=self._get_display_modes(modes)<line_sep>request=CaptureRequest(tasks_capture=tasks_filter workers_capture=workers_filter )<try_stmt><block_start><for_stmt>realtime self._stub.capture_realtime(request)<block_start><if_stmt>realtime.HasField('task')<block_start>self._display_task(realtime.task mode.tasks)<block_end><elif_stmt>realtime.HasField('worker')<block_start>self._display_worker(realtime.worker mode.workers)<block_end><else_stmt><block_start>print('unknown event:' realtime)<line_sep><break><block_end><block_end><block_end><except_stmt>KeyboardInterrupt# pragma: no cover <block_start><pass><block_end><block_end>@set_user_friendly_errors<def_stmt>tasks self tasks:Optional[str]=<none> mode:Union[<none> int ModeTask]=<none> limit:Optional[int]=<none> reverse:bool=<true><arrow><none><block_start>"""Fetch current data from past tasks. Note that the `limit` field is just a hint, it may not be accurate. Also, the total number of tasks fetched may be slightly different from the server `max_tasks` setting. Args: tasks: the pattern to filter tasks mode: an optional display mode to present data limit: the maximum number of events to fetch, fetches all if None or 0 (default) reverse: if True (default), shows the most recent first See Also: ClearlyClient#capture_tasks() ClearlyClient#display_modes() """<line_sep>tasks_filter=ClearlyClient._parse_pattern(tasks)<if_stmt><not>tasks_filter<block_start><raise>UserWarning('Nothing would be selected.')<block_end>mode=self._get_display_modes(mode)<line_sep>request=FilterTasksRequest(tasks_filter=tasks_filter limit=limit reverse=reverse)<line_sep>at=about_time(self._stub.filter_tasks(request))<for_stmt>task at<block_start>self._display_task(task mode.tasks)<block_end>ClearlyClient._fetched_info(at)<block_end>@set_user_friendly_errors<def_stmt>workers self workers:Optional[str]=<none> mode:Union[<none> int ModeWorker]=<none><arrow><none><block_start>"""Fetch current data from known workers. Args: workers: the pattern to filter workers mode: an optional display mode to present data See Also: ClearlyClient#capture_workers() ClearlyClient#display_modes() """<line_sep>workers_filter=ClearlyClient._parse_pattern(workers)<if_stmt><not>workers_filter<block_start><raise>UserWarning('Nothing would be selected.')<block_end>mode=self._get_display_modes(mode)<line_sep>request=FilterWorkersRequest(workers_filter=workers_filter)<line_sep>at=about_time(self._stub.filter_workers(request))<for_stmt>worker at<block_start>self._display_worker(worker mode.workers)<block_end>ClearlyClient._fetched_info(at)<block_end>@set_user_friendly_errors<def_stmt>seen_tasks self<arrow><none><block_start>"""Fetch a list of seen task types."""<line_sep>task_types=self._stub.seen_tasks(Null()).task_types<for_stmt>i,task_type enumerate(task_types 1)<block_start>print(Colors.DIM(i) Colors.BLUE(task_type))<block_end><block_end>@set_user_friendly_errors<def_stmt>reset_tasks self<arrow><none><block_start>"""Reset stored tasks."""<line_sep>self._stub.reset_tasks(Null())<line_sep>print(Colors.BLUE('Ok'))<block_end>@set_user_friendly_errors<def_stmt>metrics self<arrow><none><block_start>"""List some metrics about the celery cluster and Clearly itself. Shows: Tasks processed: actual number of tasks processed, including retries Events processed: total number of events processed Tasks stored: number of currently stored tasks Workers stored: number of workers seen, including offline """<line_sep>stats=self._stub.get_metrics(Null())<line_sep>print(Colors.DIM('Processed:') '\ttasks' Colors.RED(stats.task_count) '\tevents' Colors.RED(stats.event_count))<line_sep>print(Colors.DIM('Stored:') '\ttasks' Colors.RED(stats.len_tasks) '\tworkers' Colors.RED(stats.len_workers))<block_end><def_stmt>_get_display_modes self modes:Union[<none> int ModeTask ModeWorker Tuple]=<none><arrow>Modes<block_start><if_stmt><not>isinstance(modes tuple)<block_start>modes=(modes )<block_end><elif_stmt>len(modes)<g>2<block_start><raise>UserWarning('At most two display modes, was sent {}'.format(len(modes)))<block_end>modes=sorted(x<for>x (find_mode(to)<for>to modes)<if>x)<if_stmt><not>modes<block_start><return>self._modes<block_end><if_stmt>len(modes)<eq>2<and>isinstance(modes[0] type(modes[1]))<block_start><raise>UserWarning('Two modes of the same type?')<block_end><if_stmt>isinstance(modes[0] ModeTask)<block_start><return>Modes(modes[0] modes[1]<if>len(modes)<eq>2<else>self._modes.workers)<block_end><return>Modes(self._modes.tasks modes[0])<block_end>@set_user_friendly_errors<def_stmt>display_modes self *modes:Union[<none> int ModeTask ModeWorker Tuple]<arrow><none><block_start>"""Show available display modes, including the currently selected ones, or change the current task/worker modes, sending one or two arguments of any type. See that constant number beside modes? You can rapidly set modes with them! Args: modes: a display mode to set, either task or worker, or its constant number send two to set both display modes in one call """<if_stmt>modes<block_start>self._modes=self._get_display_modes(modes)<block_end>modes=('tasks' ModeTask self._modes.tasks) ('workers' ModeWorker self._modes.workers)<for_stmt>title,klass,var_mode modes<block_start>print(Colors.BLUE(title))<for_stmt>d klass<block_start>print(' {} {:8} {}: {}'.format(d<eq>var_mode<and>'*'<or>' ' d.name Colors.ORANGE_BOLD(d.value '>2') Colors.YELLOW(d.description)))<block_end><block_end><block_end>@staticmethod<def_stmt>_fetched_info at:HandleStats<arrow><none># pragma: no cover <block_start>print('{} {} in {} ({})'.format(Colors.DIM('fetched:') Colors.BOLD(at.count) Colors.GREEN(at.duration_human) Colors.GREEN(at.throughput_human)))<block_end>@staticmethod<def_stmt>_parse_pattern pattern:str<arrow>PatternFilter<block_start><if_stmt><not>isinstance(pattern (type(<none>) str))<block_start><raise>UserWarning('Invalid pattern.')<block_end>pattern=(pattern<or>'').strip()<line_sep>negate=pattern.startswith('!')<line_sep>pattern=pattern[negate:].strip()<or>'.'<if_stmt>negate<and>pattern<eq>'.'<block_start><return><block_end><return>PatternFilter(pattern=pattern negate=negate)<block_end>@staticmethod<def_stmt>_display_task task:TaskMessage mode:ModeTask<arrow><none><block_start>params,success,error=mode.spec<line_sep>ts=datetime.fromtimestamp(task.timestamp)<line_sep>print(Colors.DIM(ts.strftime('%H:%M:%S.%f')[:-3]) end=' ')<if_stmt><not>task.state<block_start>routing_key=task.routing_key<or>EMPTY<line_sep>print(Colors.BLUE(task.name) Colors.MAGENTA(routing_key[len(task.name):]<or>'-'<if>routing_key.startswith(task.name)<else>routing_key) Colors.DIM(task.uuid))<block_end><else_stmt><block_start>print(ClearlyClient._task_state(task.state) Colors.BLUE_DIM(task.retries) end=' ')<line_sep>print(Colors.BLUE(task.name) Colors.DIM(task.uuid))<block_end>show_outcome=(task.state<in>PROPAGATE_STATES<and>error)<or>(task.state<eq>SUCCESS<and>success)<line_sep>first_seen=bool(params)<and><not>task.state<line_sep>params_outcome=params<is><not><false><and>show_outcome<if_stmt>first_seen<or>params_outcome<block_start>print(Colors.DIM('args:' HEADER_ALIGN) typed_code(safe_compile_text(task.args) wrap=<false>)<or>EMPTY)<line_sep>print(Colors.DIM('kwargs:' HEADER_ALIGN) typed_code(safe_compile_text(task.kwargs) wrap=<false>)<or>EMPTY)<block_end><if_stmt>show_outcome<block_start><if_stmt>task.state<eq>SUCCESS<block_start>result=safe_compile_text(task.result)<line_sep>outcome=' '.join((Colors.CYAN_DIM('<{}>'.format(task.result_meta)) EMPTY<if>result<is><none><else>typed_code(result)))<block_end><else_stmt><block_start>outcome=TRACEBACK_HIGHLIGHTER(task.traceback).replace('\n' '\n'+HEADER_PADDING).strip()<block_end>print(Colors.DIM('==>' HEADER_ALIGN) outcome)<block_end><block_end>@staticmethod<def_stmt>_display_worker worker:WorkerMessage mode:ModeWorker<arrow><none><block_start>stats,=mode.spec<if_stmt>worker.timestamp<block_start>ts=datetime.fromtimestamp(worker.timestamp)<line_sep>print(Colors.DIM(ts.strftime('%H:%M:%S.%f')[:-3]) end=' ')<block_end>print(ClearlyClient._worker_state(worker.state) Colors.CYAN_DIM(worker.hostname) Colors.YELLOW_DIM(str(worker.pid)))<if_stmt>stats<block_start>print(Colors.DIM('sw:' HEADER_ALIGN) Colors.CYAN_DIM(' '.join((worker.sw_sys worker.sw_ident))) Colors.ORANGE(worker.sw_ver))<line_sep>print(Colors.DIM('load:' HEADER_ALIGN) ClearlyClient._item_list(worker.loadavg) Colors.DIM('processed:') worker.processed<or>DIM_NONE)<line_sep>heartbeats=[datetime.fromtimestamp(x).strftime('%H:%M:%S.%f')[:-3]<for>x worker.heartbeats<or>[]]<line_sep>print(Colors.DIM('heartb:' HEADER_ALIGN) '{}{}'.format(Colors.ORANGE(worker.freq) Colors.DIM('s')) ClearlyClient._item_list(heartbeats))<block_end><block_end>@staticmethod<def_stmt>_item_list items:Iterable[Any] color:Callable[[str] str]=str<arrow>str<block_start><return>'{}{}{}'.format(Colors.MAGENTA('[') Colors.MAGENTA(', ').join(map(color items)) Colors.MAGENTA(']') )<block_end>@staticmethod<def_stmt>_task_state state:str<arrow><none><block_start><if_stmt>state<eq>SUCCESS# final state in BOLD <block_start><return>Colors.GREEN_BOLD(state HEADER_ALIGN)<block_end><if_stmt>state<in>(FAILURE REVOKED REJECTED)# final too <block_start><return>Colors.RED_BOLD(state HEADER_ALIGN)<block_end><if_stmt>state<eq>RETRY# transient state with a failure. <block_start><return>Colors.ORANGE(state HEADER_ALIGN)<block_end><return>Colors.YELLOW(state HEADER_ALIGN)<block_end># transient states @staticmethod<def_stmt>_worker_state state:str<arrow><none><block_start>result=state<if_stmt>state<eq>HEARTBEAT<block_start><return>Colors.GREEN(result)<block_end><if_stmt>state<eq>ONLINE<block_start><return>Colors.GREEN_BOLD(result)<block_end><return>Colors.RED_BOLD(result)<block_end><block_end>
<import_stmt>smtplib<import_stmt>datetime<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>email.mime.text MIMEText<import_from_stmt>yahoo_fin stock_info<as>si<import_from_stmt>pandas_datareader DataReader<import_from_stmt>email.mime.multipart MIMEMultipart<import_from_stmt>bs4 BeautifulSoup<import_from_stmt>urllib.request urlopen Request<import_from_stmt>nltk.sentiment.vader SentimentIntensityAnalyzer<import_from_stmt>time sleep<import_from_stmt>selenium webdriver<import_from_stmt>selenium.webdriver.chrome.options Options<import_stmt>talib<line_sep># Define list of stocks stock_list=['AAPL' 'MSFT' 'AMZN']<line_sep># for the tradingview recommendation # options are: '1m', '5m', '15m', '1h', '4h', '1D', '1W', '1M interval="1M"<line_sep># Chromedriver Path path='/Users/shashank/Documents/Code/Python/Finance/chromedriver.exe'<line_sep># Chromedriver Options options=Options()<line_sep>options.add_argument("--headless")<line_sep>webdriver=webdriver.Chrome(executable_path=path options=options)<line_sep># Define start and end dates start=datetime.datetime.now()-datetime.timedelta(days=365)<line_sep>end=datetime.datetime.now()<def_stmt>sendMessage text<block_start>message=text<line_sep>email=""<line_sep>pas=""<line_sep>sms_gateway=''<line_sep>smtp="smtp.gmail.com"<line_sep>port=587<line_sep>server=smtplib.SMTP(smtp port)<line_sep>server.starttls()<line_sep>server.login(email pas)<line_sep>msg=MIMEMultipart()<line_sep>msg['From']=email<line_sep>msg['To']=sms_gateway<line_sep>msg['Subject']="Stock Data\n"<line_sep>body="{}\n".format(message)<line_sep>msg.attach(MIMEText(body 'plain'))<line_sep>sms=msg.as_string()<line_sep>server.sendmail(email sms_gateway sms)<line_sep>server.quit()<line_sep>print('done')<block_end><def_stmt>getData list_of_stocks<block_start><for_stmt>stock list_of_stocks<block_start>df=DataReader(stock 'yahoo' start end)<line_sep>print(stock)<line_sep># Current Price price=si.get_live_price('{}'.format(stock))<line_sep>price=round(price 2)<line_sep># Sharpe Ratio x=5000<line_sep>y=(x)<line_sep>stock_df=df<line_sep>stock_df['Norm return']=stock_df['Adj Close']/stock_df.iloc[0]['Adj Close']<line_sep>allocation=float(x/y)<line_sep>stock_df['Allocation']=stock_df['Norm return']<times>allocation<line_sep>stock_df['Position']=stock_df['Allocation']<times>x<line_sep>pos=[df['Position']]<line_sep>val=pd.concat(pos axis=1)<line_sep>val.columns=['WMT Pos']<line_sep>val['Total Pos']=val.sum(axis=1)<line_sep>val.tail(1)<line_sep>val['Daily Return']=val['Total Pos'].pct_change(1)<line_sep>Sharpe_Ratio=val['Daily Return'].mean()/val['Daily Return'].std()<line_sep>A_Sharpe_Ratio=(252<power>0.5)<times>Sharpe_Ratio<line_sep>A_Sharpe_Ratio=round(A_Sharpe_Ratio 2)<line_sep># News Sentiment finwiz_url='https://finviz.com/quote.ashx?t='<line_sep>news_tables={}<line_sep>url=finwiz_url+stock<line_sep>req=Request(url=url headers={'user-agent':'my-app/0.0.1'})<line_sep>response=urlopen(req)<line_sep>html=BeautifulSoup(response features="lxml")<line_sep>news_table=html.find(id='news-table')<line_sep>news_tables[stock]=news_table<line_sep>parsed_news=[]<line_sep># Iterate through the news <for_stmt>file_name,news_table news_tables.items()<block_start><for_stmt>x news_table.findAll('tr')<block_start>text=x.a.get_text()<line_sep>date_scrape=x.td.text.split()<if_stmt>len(date_scrape)<eq>1<block_start>time=date_scrape[0]<block_end><else_stmt><block_start>date=date_scrape[0]<line_sep>time=date_scrape[1]<block_end>ticker=file_name.split('_')[0]<line_sep>parsed_news.append([ticker date time text])<block_end><block_end>vader=SentimentIntensityAnalyzer()<line_sep>columns=['ticker' 'date' 'time' 'headline']<line_sep>dataframe=pd.DataFrame(parsed_news columns=columns)<line_sep>scores=dataframe['headline'].apply(vader.polarity_scores).tolist()<line_sep>scores_df=pd.DataFrame(scores)<line_sep>dataframe=dataframe.join(scores_df rsuffix='_right')<line_sep>dataframe['date']=pd.to_datetime(dataframe.date).dt.date<line_sep>dataframe=dataframe.set_index('ticker')<line_sep>sentiment=round(dataframe['compound'].mean() 2)<line_sep># TradingView Recommendation <try_stmt>#Declare variable <block_start>analysis=[]<line_sep>#Open tradingview's site webdriver.get("https://s.tradingview.com/embed-widget/technical-analysis/?locale=en#%7B%22interval%22%3A%22{}%22%2C%22width%22%3A%22100%25%22%2C%22isTransparent%22%3Afalse%2C%22height%22%3A%22100%25%22%2C%22symbol%22%3A%22{}%22%2C%22showIntervalTabs%22%3Atrue%2C%22colorTheme%22%3A%22dark%22%2C%22utm_medium%22%3A%22widget_new%22%2C%22utm_campaign%22%3A%22technical-analysis%22%7D".format(interval ticker))<line_sep>webdriver.refresh()<line_sep>#Wait for site to load elements <while_stmt>len(webdriver.find_elements_by_class_name("speedometerSignal-pyzN--tL"))<eq>0<block_start>sleep(0.1)<block_end>#Recommendation recommendation_element=webdriver.find_element_by_class_name("speedometerSignal-pyzN--tL")<line_sep>analysis.append(recommendation_element.get_attribute('innerHTML'))<line_sep>#Counters counter_elements=webdriver.find_elements_by_class_name("counterNumber-3l14ys0C")<line_sep>#Sell analysis.append(int(counter_elements[0].get_attribute('innerHTML')))<line_sep>#Neutral analysis.append(int(counter_elements[1].get_attribute('innerHTML')))<line_sep>#Buy analysis.append(int(counter_elements[2].get_attribute('innerHTML')))<line_sep>signal=analysis[0]<block_end><except_stmt><block_start>signal='None'<block_end># Beta df=DataReader(stock 'yahoo' start end)<line_sep>dfb=DataReader('^GSPC' 'yahoo' start end)<line_sep>rts=df.resample('M').last()<line_sep>rbts=dfb.resample('M').last()<line_sep>dfsm=pd.DataFrame({'s_adjclose':rts['Adj Close'] 'b_adjclose':rbts['Adj Close']} index=rts.index)<line_sep>dfsm[['s_returns' 'b_returns']]=dfsm[['s_adjclose' 'b_adjclose']]/dfsm[['s_adjclose' 'b_adjclose']].shift(1)-1<line_sep>dfsm=dfsm.dropna()<line_sep>covmat=np.cov(dfsm["s_returns"] dfsm["b_returns"])<line_sep>beta=covmat[0 1]/covmat[1 1]<line_sep>beta=round(beta 2)<line_sep># Relative Strength Index df["rsi"]=talib.RSI(df["Close"])<line_sep>values=df["rsi"].tail(14)<line_sep>value=values.mean()<line_sep>rsi=round(value 2)<line_sep>output=("\nTicker: "+str(stock)+"\nCurrent Price : "+str(price)+"\nSharpe Ratio: "+str(A_Sharpe_Ratio)+"\nNews Sentiment: "+str(sentiment)+"\nTradingView Rec for {}: ".format(interval)+str(signal)+"\nRelative Strength Index: "+str(rsi)+"\nBeta Value for 1 Year: "+str(beta))<line_sep>sendMessage(output)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>getData(stock_list)<block_end>
# -*- coding: utf-8 -*- # snapshottest: v1 - https://goo.gl/zC4yUc <import_from_future_stmt> unicode_literals<import_from_stmt>snapshottest Snapshot<line_sep>snapshots=Snapshot()<line_sep>snapshots['test_repr[eager] 1']="<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=<lambda> numpy=0.0>"<line_sep>snapshots['test_repr[eager] 2']="<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=Quantizer numpy=0.0>"<line_sep>snapshots['test_repr[eager] 3']="<QuantizedVariable 'x:0' shape=() dtype=float32 precision=1 numpy=0.0>"<line_sep>snapshots['test_repr[graph] 1']="<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=<lambda>>"<line_sep>snapshots['test_repr[graph] 2']="<QuantizedVariable 'x:0' shape=() dtype=float32 quantizer=Quantizer>"<line_sep>snapshots['test_repr[graph] 3']="<QuantizedVariable 'x:0' shape=() dtype=float32 precision=1>"<line_sep>
# -*- coding: utf-8 -*- ### # (C) Copyright (2012-2017) Hewlett Packard Enterprise Development LP # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. ### <import_from_stmt>pprint pprint<import_from_stmt>hpOneView.oneview_client OneViewClient<import_from_stmt>hpOneView.exceptions HPOneViewException<import_from_stmt>examples.config_loader try_load_from_file<line_sep>config={"ip":"<oneview_ip>" "credentials":{"userName":"<username>" "password":"<password>"}}<line_sep># To run this example, a logical interconnect name is required logical_interconnect_name=""<line_sep># To install the firmware driver, a firmware driver name is required firmware_driver_name=""<line_sep># An Enclosure name must be set to create/delete an interconnect at a given location enclosure_name=""<line_sep># Define the scope name to add the logical interconnect to it scope_name=""<line_sep># Try load config from a file (if there is a config file) config=try_load_from_file(config)<line_sep>oneview_client=OneViewClient(config)<line_sep># Get by name print("\nGet a logical interconnect by name")<line_sep>logical_interconnect=oneview_client.logical_interconnects.get_by_name(logical_interconnect_name)<line_sep>pprint(logical_interconnect)<line_sep># Get installed firmware print("\nGet the installed firmware for a logical interconnect that matches the specified name.")<line_sep>firmwares=oneview_client.firmware_drivers.get_by('name' firmware_driver_name)<line_sep>firmware=<none><if_stmt>firmwares<block_start>firmware=firmwares[0]<block_end># Get scope to be added print("\nGet the scope that matches the specified name.")<line_sep>scope=oneview_client.scopes.get_by_name(scope_name)<line_sep>print("\nGet the enclosure that matches the specified name.")<line_sep>enclosures=oneview_client.enclosures.get_by('name' enclosure_name)<line_sep>enclosure=<none><if_stmt>enclosures<block_start>enclosure=enclosures[0]<block_end># Install the firmware to a logical interconnect <if_stmt>firmware<block_start>print("\nInstall the firmware to a logical interconnect that matches the specified ID.")<line_sep>firmware_to_install=dict(command="Update" sppUri=firmware['uri'])<line_sep>installed_firmware=oneview_client.logical_interconnects.install_firmware(firmware_to_install logical_interconnect['uri'])<line_sep>pprint(installed_firmware)<block_end># Performs a patch operation # Endpoint supported only in api-versions 500 and below. <if_stmt>scope<and>(oneview_client.api_version<le>500)<block_start>print("\nPatches the logical interconnect adding one scope to it")<line_sep>updated_logical_interconnect=oneview_client.logical_interconnects.patch(logical_interconnect['uri'] 'replace' '/scopeUris' [scope['uri']])<line_sep>pprint(updated_logical_interconnect)<block_end># Get all logical interconnects print("\nGet all logical interconnects")<line_sep>logical_interconnects=oneview_client.logical_interconnects.get_all()<for_stmt>logical_interconnect logical_interconnects<block_start>print(' Name: {name}').format(**logical_interconnect)<block_end>logical_interconnect=logical_interconnects[0]<line_sep># Get a logical interconnect by name logical_interconnect=oneview_client.logical_interconnects.get_by_name(logical_interconnect['name'])<line_sep>print("\nFound logical interconnect by name {name}.\n URI: {uri}").format(**logical_interconnect)<line_sep>print("\nGet the Ethernet interconnect settings for the logical interconnect")<line_sep>ethernet_settings=oneview_client.logical_interconnects.get_ethernet_settings(logical_interconnect['uri'])<line_sep>pprint(ethernet_settings)<line_sep># Update the Ethernet interconnect settings for the logical interconnect ethernet_settings=logical_interconnect['ethernetSettings'].copy()<line_sep>ethernet_settings['macRefreshInterval']=10<line_sep>logical_interconnect=oneview_client.logical_interconnects.update_ethernet_settings(logical_interconnect['uri'] ethernet_settings force=<true>)<line_sep>print("\nUpdated the ethernet settings")<line_sep>print(" with attribute 'macRefreshInterval' = {macRefreshInterval}".format(**logical_interconnect['ethernetSettings']))<line_sep># Update the internal networks on the logical interconnect ethernet_network_options={"name":"OneViewSDK Test Ethernet Network on Logical Interconnect" "vlanId":200 "ethernetNetworkType":"Tagged" "purpose":"General" "smartLink":<false> "privateNetwork":<false> "connectionTemplateUri":<none> }<line_sep>ethernet_networks=oneview_client.ethernet_networks.get_by('name' ethernet_network_options['name'])<if_stmt>len(ethernet_networks)<g>0<block_start>ethernet_network=ethernet_networks[0]<block_end><else_stmt><block_start>ethernet_network=oneview_client.ethernet_networks.create(ethernet_network_options)<block_end>logical_interconnect=oneview_client.logical_interconnects.update_internal_networks(logical_interconnect['uri'] [ethernet_network['uri']])<line_sep>print("\nUpdated internal networks on the logical interconnect")<line_sep>print(" with attribute 'internalNetworkUris' = {internalNetworkUris}".format(**logical_interconnect))<line_sep># Get the internal VLAN IDs print("\nGet the internal VLAN IDs for the provisioned networks on the logical interconnect")<line_sep>internal_vlans=oneview_client.logical_interconnects.get_internal_vlans(logical_interconnect['uri'])<line_sep>pprint(internal_vlans)<line_sep># Update the interconnect settings # End-point supported only in api-versions 500 and below. <if_stmt>oneview_client.api_version<le>500<block_start>print("\nUpdates the interconnect settings on the logical interconnect")<line_sep>interconnect_settings={'ethernetSettings':logical_interconnect['ethernetSettings'].copy() 'fcoeSettings':{}}<line_sep>interconnect_settings['ethernetSettings']['macRefreshInterval']=7<line_sep>logical_interconnect=oneview_client.logical_interconnects.update_settings(logical_interconnect['uri'] interconnect_settings)<line_sep>print("Updated interconnect settings on the logical interconnect")<line_sep>print(" with attribute 'macRefreshInterval' = {macRefreshInterval}".format(**logical_interconnect['ethernetSettings']))<line_sep>pprint(logical_interconnect)<block_end># Get the SNMP configuration for the logical interconnect print("\nGet the SNMP configuration for the logical interconnect")<line_sep>snmp_configuration=oneview_client.logical_interconnects.get_snmp_configuration(logical_interconnect['uri'])<line_sep>pprint(snmp_configuration)<line_sep># Update the SNMP configuration for the logical interconnect <try_stmt><block_start>print("\nUpdate the SNMP configuration for the logical interconnect")<line_sep>snmp_configuration['enabled']=<true><line_sep>logical_interconnect=oneview_client.logical_interconnects.update_snmp_configuration(logical_interconnect['uri'] snmp_configuration)<line_sep>interconnect_snmp=logical_interconnect['snmpConfiguration']<line_sep>print(" Updated SNMP configuration at uri: {uri}\n with 'enabled': '{enabled}'".format(**interconnect_snmp))<block_end><except_stmt>HPOneViewException<as>e<block_start>print(e.msg)<block_end># Get a collection of uplink ports from the member interconnects which are eligible for assignment to an analyzer port print("\nGet a collection of uplink ports from the member interconnects which are eligible for assignment to "<concat>"an analyzer port on the logical interconnect")<line_sep>unassigned_uplink_ports=oneview_client.logical_interconnects.get_unassigned_uplink_ports(logical_interconnect['uri'])<line_sep>pprint(unassigned_uplink_ports)<line_sep># Get the port monitor configuration of a logical interconnect print("\nGet the port monitor configuration of a logical interconnect")<line_sep>monitor_configuration=oneview_client.logical_interconnects.get_port_monitor(logical_interconnect['uri'])<line_sep>pprint(monitor_configuration)<line_sep># Update port monitor configuration of a logical interconnect <try_stmt><block_start>print("\nUpdate the port monitor configuration of a logical interconnect")<line_sep>monitor_configuration['enablePortMonitor']=<true><line_sep>logical_interconnect=oneview_client.logical_interconnects.update_port_monitor(logical_interconnect['uri'] monitor_configuration)<line_sep>print(" Updated port monitor at uri: {uri}\n with 'enablePortMonitor': '{enablePortMonitor}'".format(**logical_interconnect['portMonitor']))<block_end><except_stmt>HPOneViewException<as>e<block_start>print(e.msg)<block_end># Get the telemetry configuration of the logical interconnect print("\nGet the telemetry configuration of the logical interconnect")<line_sep>telemetry_configuration_uri=logical_interconnect['telemetryConfiguration']['uri']<line_sep>telemetry_configuration=oneview_client.logical_interconnects.get_telemetry_configuration(telemetry_configuration_uri)<line_sep>pprint(telemetry_configuration)<line_sep>print("\nUpdate the telemetry configuration")<line_sep>telemetry_config={"sampleCount":12 "enableTelemetry":<true> "sampleInterval":300}<line_sep>logical_interconnect_updated=oneview_client.logical_interconnects.update_telemetry_configurations(configuration=telemetry_config tc_id_or_uri=telemetry_configuration_uri)<line_sep>pprint(logical_interconnect_updated)<line_sep># Update the configuration on the logical interconnect print("\nUpdate the configuration on the logical interconnect")<line_sep>logical_interconnect=oneview_client.logical_interconnects.update_configuration(logical_interconnect['uri'])<line_sep>print(" Done.")<line_sep># Return the logical interconnect to a consistent state print("\nReturn the logical interconnect to a consistent state")<line_sep>logical_interconnect=oneview_client.logical_interconnects.update_compliance(logical_interconnect['uri'])<line_sep>print(" Done. The current consistency state is {consistencyStatus}.".format(**logical_interconnect))<line_sep># Create an interconnect at a specified location <if_stmt>enclosure['uri']<block_start>print("\nCreate an interconnect at the specified location")<line_sep>bay=1<line_sep>location={"locationEntries":[{"type":"Enclosure" "value":enclosure['uri']} {"type":"Bay" "value":bay}]}<line_sep>interconnect=oneview_client.logical_interconnects.create_interconnect(location)<line_sep>pprint(interconnect)<line_sep>oneview_client.logical_interconnects.delete_interconnect(enclosure['uri'] bay)<line_sep>print("\nThe interconnect was successfully deleted.")<block_end># Generate the forwarding information base dump file for the logical interconnect print("\nGenerate the forwarding information base dump file for the logical interconnect")<line_sep>fwd_info_datainfo=oneview_client.logical_interconnects.create_forwarding_information_base(logical_interconnect['uri'])<line_sep>pprint(fwd_info_datainfo)<line_sep># Get the forwarding information base data for the logical interconnect print("\nGet the forwarding information base data for the logical interconnect")<line_sep>fwd_information=oneview_client.logical_interconnects.get_forwarding_information_base(logical_interconnect['uri'])<line_sep>pprint(fwd_information)<line_sep># Get the QoS aggregated configuration for the logical interconnect. print("\nGets the QoS aggregated configuration for the logical interconnect.")<line_sep>qos=oneview_client.logical_interconnects.get_qos_aggregated_configuration(logical_interconnect['uri'])<line_sep>pprint(qos)<line_sep># Update the QOS aggregated configuration <try_stmt><block_start>print("\nUpdate QoS aggregated settings on the logical interconnect")<line_sep>qos['activeQosConfig']['configType']='Passthrough'<line_sep>li=oneview_client.logical_interconnects.update_qos_aggregated_configuration(logical_interconnect['uri'] qos)<line_sep>pprint(li['qosConfiguration'])<block_end><except_stmt>HPOneViewException<as>e<block_start>print(e.msg)<block_end>
# SPDX-FileCopyrightText: 2021 iteratec GmbH # # SPDX-License-Identifier: Apache-2.0 """ context A Python package containing secureCodeBox specific ZAPv2 Client extensions to configure ZAP API contexts. """<line_sep>__all__=['zap_context' 'zap_context_authentication']<import_from_stmt>.zap_context ZapConfigureContext<import_from_stmt>.zap_context_authentication ZapConfigureContextAuthentication<line_sep>
# built-in <import_stmt>json<line_sep># external <import_stmt>pytest<line_sep># project <import_from_stmt>dephell.commands PackageShowCommand<import_from_stmt>dephell.config Config<line_sep>@pytest.mark.allow_hosts()<def_stmt>test_package_show_command capsys<block_start>config=Config()<line_sep>config.attach({'level':'WARNING' 'silent':<true> 'nocolors':<true> })<line_sep>command=PackageShowCommand(argv=['textdistance'] config=config)<line_sep>result=command()<assert_stmt>result<is><true><line_sep>captured=capsys.readouterr()<line_sep>output=json.loads(captured.out)<assert_stmt>output['name']<eq>'textdistance'<assert_stmt>output['license']<eq>'MIT'<block_end>
<import_from_future_stmt> print_function<line_sep>print(float(1)/2)<line_sep>
<import_from_stmt>stp_core.common.log getlogger<import_from_stmt>stp_core.loop.looper Prodable<import_from_stmt>stp_core.loop.startable Status<line_sep>logger=getlogger()<line_sep># TODO: move it to plenum-util repo <class_stmt>Motor(Prodable)<block_start>""" Base class for Prodable that includes status management. Subclasses are responsible for changing status from starting to started. """<def_stmt>__init__ self<block_start>""" Motor is initialized with a status of Stopped. """<line_sep>self._status=Status.stopped<block_end><def_stmt>get_status self<arrow>Status<block_start>""" Return the current status """<line_sep><return>self._status<block_end><def_stmt>set_status self value<block_start>""" Set the status of the motor to the specified value if not already set. """<if_stmt><not>self._status<eq>value<block_start>old=self._status<line_sep>self._status=value<line_sep>logger.info("{} changing status from {} to {}".format(self old.name value.name))<line_sep>self._statusChanged(old value)<block_end><block_end>status=property(fget=get_status fset=set_status)<def_stmt>isReady self<block_start>""" Is the status in Status.ready()? """<line_sep><return>self.status<in>Status.ready()<block_end><def_stmt>isGoing self<block_start>""" Is the status in Status.going()? """<line_sep><return>self.status<in>Status.going()<block_end><def_stmt>start self loop<block_start>""" Set the status to Status.starting """<line_sep>self.status=Status.starting<block_end><def_stmt>stop self *args **kwargs<block_start>""" Set the status to Status.stopping and also call `onStopping` with the provided args and kwargs. """<if_stmt>self.status<in>(Status.stopping Status.stopped)<block_start>logger.debug("{} is already {}".format(self self.status.name))<block_end><else_stmt><block_start>self.status=Status.stopping<line_sep>self.onStopping(*args **kwargs)<line_sep>self.status=Status.stopped<block_end><block_end><def_stmt>_statusChanged self old new<block_start>""" Perform some actions based on whether this node is ready or not. :param old: the previous status :param new: the current status """<line_sep><raise>NotImplementedError("{} must implement this method".format(self))<block_end><def_stmt>onStopping self *args **kwargs<block_start>""" A series of actions to be performed when stopping the motor. """<line_sep><raise>NotImplementedError("{} must implement this method".format(self))<block_end><async_keyword><def_stmt>prod self limit<arrow>int<block_start><raise>NotImplementedError("{} must implement this method".format(self))<block_end><block_end>
default_app_config='ralph.data_center.apps.DataCenterConfig'<line_sep>
<import_stmt>itertools<import_stmt>decimal<import_stmt>datetime<import_from_stmt>datapackage Package Resource<import_from_stmt>tableschema.storage Storage<import_from_stmt>.. DataStreamProcessor<class_stmt>iterable_storage(Storage)<block_start>SAMPLE_SIZE=100<def_stmt>__init__ self iterable<block_start>super(iterable_storage self).__init__()<line_sep>self.iterable=iterable<line_sep>self.schema=<none><block_end><def_stmt>connect self name<block_start><pass><block_end><def_stmt>buckets self<block_start><pass><block_end><def_stmt>create self<block_start><pass><block_end><def_stmt>delete self<block_start><pass><block_end><def_stmt>read self<block_start><pass><block_end><def_stmt>write self<block_start><pass><block_end><def_stmt>field_type self values<block_start>types=set()<for_stmt>value values<block_start><if_stmt>isinstance(value str)<block_start>types.add('string')<block_end><elif_stmt>isinstance(value bool)<block_start>types.add('boolean')<block_end><elif_stmt>isinstance(value int)<block_start>types.add('integer')<block_end><elif_stmt>isinstance(value (float decimal.Decimal))<block_start>types.add('number')<block_end><elif_stmt>isinstance(value list)<block_start>types.add('array')<block_end><elif_stmt>isinstance(value dict)<block_start>types.add('object')<block_end><elif_stmt>isinstance(value datetime.datetime)<block_start>types.add('datetime')<block_end><elif_stmt>isinstance(value datetime.date)<block_start>types.add('date')<block_end><elif_stmt>value<is><none><block_start><pass><block_end><else_stmt><block_start><assert_stmt>'Unknown Python type: %r'%value<block_end><block_end><if_stmt>len(types)<ne>1<block_start><return>'any'<block_end><else_stmt><block_start><return>types.pop()<block_end><block_end><def_stmt>describe self _ descriptor=<none><block_start><if_stmt>descriptor<is><not><none><block_start><return>descriptor<block_end><if_stmt>self.schema<is><none><block_start><try_stmt><block_start>sample=list(itertools.islice(self.iterable self.SAMPLE_SIZE))<line_sep>rec=sample[0]<line_sep>self.iterable=itertools.chain(sample self.iterable)<line_sep>self.schema=dict(fields=[dict(name=name type=self.field_type([s.get(name)<for>s sample]))<for>name rec.keys()])<block_end><except_stmt>Exception<block_start>self.schema=dict(fields=[])<block_end><block_end><return>self.schema<block_end><def_stmt>iter self _<block_start><return>self.iterable<block_end><block_end><class_stmt>iterable_loader(DataStreamProcessor)<block_start><def_stmt>__init__ self iterable name=<none><block_start>super(iterable_loader self).__init__()<line_sep>self.iterable=iterable<line_sep>self.name=name<line_sep>self.exc=<none><block_end><def_stmt>handle_iterable self<block_start>mode=<none><try_stmt><block_start><for_stmt>x self.iterable<block_start><if_stmt>mode<is><none><block_start><assert_stmt>isinstance(x (dict list tuple)) 'Bad item %r'%x<line_sep>mode=dict<if>isinstance(x dict)<else>list<block_end><assert_stmt>isinstance(x mode)<if_stmt>mode<eq>dict<block_start><yield>x<block_end><else_stmt><block_start><yield>dict(zip(('col{}'.format(i)<for>i range(len(x))) x))<block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>self.exc=e<line_sep><raise><block_end><block_end><def_stmt>process_datapackage self dp:Package<block_start>name=self.name<if_stmt>name<is><none><block_start>name='res_{}'.format(len(dp.resources)+1)<block_end>self.res=Resource(dict(name=name path='{}.csv'.format(name)) storage=iterable_storage(self.handle_iterable()))<line_sep>self.res.infer()<if_stmt>self.exc<is><not><none><block_start><raise>self.exc<block_end>dp.descriptor.setdefault('resources' []).append(self.res.descriptor)<line_sep><return>dp<block_end><def_stmt>process_resources self resources<block_start><yield><from>super(iterable_loader self).process_resources(resources)<line_sep><yield>self.res.iter(keyed=<true>)<block_end><block_end>
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve. # #Licensed under the Apache License, Version 2.0 (the "License"); #you may not use this file except in compliance with the License. #You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # #Unless required by applicable law or agreed to in writing, software #distributed under the License is distributed on an "AS IS" BASIS, #WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. #See the License for the specific language governing permissions and #limitations under the License. <import_stmt>math<import_stmt>paddle<import_stmt>paddle.nn<as>nn<import_from_stmt>.builder GENERATORS<def_stmt>default_conv in_channels out_channels kernel_size bias=<true><block_start><return>nn.Conv2D(in_channels out_channels kernel_size padding=(kernel_size<floordiv>2) bias_attr=bias)<block_end><class_stmt>MeanShift(nn.Conv2D)<block_start><def_stmt>__init__ self rgb_range rgb_mean rgb_std sign=-1<block_start>super(MeanShift self).__init__(3 3 kernel_size=1)<line_sep>std=paddle.to_tensor(rgb_std)<line_sep>self.weight.set_value(paddle.eye(3).reshape([3 3 1 1]))<line_sep>self.weight.set_value(self.weight/(std.reshape([3 1 1 1])))<line_sep>mean=paddle.to_tensor(rgb_mean)<line_sep>self.bias.set_value(sign<times>rgb_range<times>mean/std)<line_sep>self.weight.trainable=<false><line_sep>self.bias.trainable=<false><block_end><block_end><class_stmt>DownBlock(nn.Layer)<block_start><def_stmt>__init__ self negval n_feats n_colors scale nFeat=<none> in_channels=<none> out_channels=<none><block_start>super(DownBlock self).__init__()<if_stmt>nFeat<is><none><block_start>nFeat=n_feats<block_end><if_stmt>in_channels<is><none><block_start>in_channels=n_colors<block_end><if_stmt>out_channels<is><none><block_start>out_channels=n_colors<block_end>dual_block=[nn.Sequential(nn.Conv2D(in_channels nFeat kernel_size=3 stride=2 padding=1 bias_attr=<false>) nn.LeakyReLU(negative_slope=negval))]<for_stmt>_ range(1 int(math.log2(scale)))<block_start>dual_block.append(nn.Sequential(nn.Conv2D(nFeat nFeat kernel_size=3 stride=2 padding=1 bias_attr=<false>) nn.LeakyReLU(negative_slope=negval)))<block_end>dual_block.append(nn.Conv2D(nFeat out_channels kernel_size=3 stride=1 padding=1 bias_attr=<false>))<line_sep>self.dual_module=nn.Sequential(*dual_block)<block_end><def_stmt>forward self x<block_start>x=self.dual_module(x)<line_sep><return>x<block_end><block_end>## Channel Attention (CA) Layer <class_stmt>CALayer(nn.Layer)<block_start><def_stmt>__init__ self channel reduction=16<block_start>super(CALayer self).__init__()<line_sep># global average pooling: feature --> point self.avg_pool=nn.AdaptiveAvgPool2D(1)<line_sep># feature channel downscale and upscale --> channel weight self.conv_du=nn.Sequential(nn.Conv2D(channel channel<floordiv>reduction 1 padding=0 bias_attr=<true>) nn.ReLU() nn.Conv2D(channel<floordiv>reduction channel 1 padding=0 bias_attr=<true>) nn.Sigmoid())<block_end><def_stmt>forward self x<block_start>y=self.avg_pool(x)<line_sep>y=self.conv_du(y)<line_sep><return>x<times>y<block_end><block_end><class_stmt>RCAB(nn.Layer)<block_start><def_stmt>__init__ self conv n_feat kernel_size reduction=16 bias=<true> bn=<false> act=nn.ReLU() res_scale=1<block_start>super(RCAB self).__init__()<line_sep>modules_body=[]<for_stmt>i range(2)<block_start>modules_body.append(conv(n_feat n_feat kernel_size bias=bias))<if_stmt>bn<block_start>modules_body.append(nn.BatchNorm2D(n_feat))<block_end><if_stmt>i<eq>0<block_start>modules_body.append(act)<block_end><block_end>modules_body.append(CALayer(n_feat reduction))<line_sep>self.body=nn.Sequential(*modules_body)<line_sep>self.res_scale=res_scale<block_end><def_stmt>forward self x<block_start>res=self.body(x)<line_sep>res<augadd>x<line_sep><return>res<block_end><block_end><class_stmt>Upsampler(nn.Sequential)<block_start><def_stmt>__init__ self conv scale n_feats bn=<false> act=<false> bias=<true><block_start>m=[]<if_stmt>(scale&(scale-1))<eq>0# Is scale = 2^n? <block_start><for_stmt>_ range(int(math.log(scale 2)))<block_start>m.append(conv(n_feats 4<times>n_feats 3 bias))<line_sep>m.append(nn.PixelShuffle(2))<if_stmt>bn<block_start>m.append(nn.BatchNorm2D(n_feats))<block_end><if_stmt>act<eq>'relu'<block_start>m.append(nn.ReLU())<block_end><elif_stmt>act<eq>'prelu'<block_start>m.append(nn.PReLU(n_feats))<block_end><block_end><block_end><elif_stmt>scale<eq>3<block_start>m.append(conv(n_feats 9<times>n_feats 3 bias))<line_sep>m.append(nn.PixelShuffle(3))<if_stmt>bn<block_start>m.append(nn.BatchNorm2D(n_feats))<block_end><if_stmt>act<eq>'relu'<block_start>m.append(nn.ReLU())<block_end><elif_stmt>act<eq>'prelu'<block_start>m.append(nn.PReLU(n_feats))<block_end><block_end><else_stmt><block_start><raise>NotImplementedError<block_end>super(Upsampler self).__init__(*m)<block_end><block_end>@GENERATORS.register()<class_stmt>DRNGenerator(nn.Layer)<block_start>"""DRNGenerator"""<def_stmt>__init__ self scale n_blocks=30 n_feats=16 n_colors=3 rgb_range=255 negval=0.2 kernel_size=3 conv=default_conv <block_start>super(DRNGenerator self).__init__()<line_sep>self.scale=scale<line_sep>self.phase=len(scale)<line_sep>act=nn.ReLU()<line_sep>self.upsample=nn.Upsample(scale_factor=max(scale) mode='bicubic' align_corners=<false>)<line_sep>rgb_mean=(0.4488 0.4371 0.4040)<line_sep>rgb_std=(1.0 1.0 1.0)<line_sep>self.sub_mean=MeanShift(rgb_range rgb_mean rgb_std)<line_sep>self.head=conv(n_colors n_feats kernel_size)<line_sep>self.down=[DownBlock(negval n_feats n_colors 2 n_feats<times>pow(2 p) n_feats<times>pow(2 p) n_feats<times>pow(2 p+1))<for>p range(self.phase)]<line_sep>self.down=nn.LayerList(self.down)<line_sep>up_body_blocks=[[RCAB(conv n_feats<times>pow(2 p) kernel_size act=act)<for>_ range(n_blocks)]<for>p range(self.phase 1 -1)]<line_sep>up_body_blocks.insert(0 [RCAB(conv n_feats<times>pow(2 self.phase) kernel_size act=act)<for>_ range(n_blocks)])<line_sep># The fisrt upsample block up=[[Upsampler(conv 2 n_feats<times>pow(2 self.phase) act=<false>) conv(n_feats<times>pow(2 self.phase) n_feats<times>pow(2 self.phase-1) kernel_size=1)]]<line_sep># The rest upsample blocks <for_stmt>p range(self.phase-1 0 -1)<block_start>up.append([Upsampler(conv 2 2<times>n_feats<times>pow(2 p) act=<false>) conv(2<times>n_feats<times>pow(2 p) n_feats<times>pow(2 p-1) kernel_size=1)])<block_end>self.up_blocks=nn.LayerList()<for_stmt>idx range(self.phase)<block_start>self.up_blocks.append(nn.Sequential(*up_body_blocks[idx] *up[idx]))<block_end># tail conv that output sr imgs tail=[conv(n_feats<times>pow(2 self.phase) n_colors kernel_size)]<for_stmt>p range(self.phase 0 -1)<block_start>tail.append(conv(n_feats<times>pow(2 p) n_colors kernel_size))<block_end>self.tail=nn.LayerList(tail)<line_sep>self.add_mean=MeanShift(rgb_range rgb_mean rgb_std 1)<block_end><def_stmt>forward self x<block_start>"""Forward function. Args: x (Tensor): Input tensor with shape (n, c, h, w). Returns: Tensor: Forward results. """<line_sep># upsample x to target sr size x=self.upsample(x)<line_sep># preprocess x=self.sub_mean(x)<line_sep>x=self.head(x)<line_sep># down phases, copies=[]<for_stmt>idx range(self.phase)<block_start>copies.append(x)<line_sep>x=self.down[idx](x)<block_end># up phases sr=self.tail[0](x)<line_sep>sr=self.add_mean(sr)<line_sep>results=[sr]<for_stmt>idx range(self.phase)# upsample to SR features <block_start>x=self.up_blocks[idx](x)<line_sep># concat down features and upsample features x=paddle.concat((x copies[self.phase-idx-1]) 1)<line_sep># output sr imgs sr=self.tail[idx+1](x)<line_sep>sr=self.add_mean(sr)<line_sep>results.append(sr)<block_end><return>results<block_end><block_end>
# Copyright 2013 Viewfinder Inc. All Rights Reserved. """Run analysis over all merged user analytics logs. Computes speed percentiles for full asset scans (only those lasting more than 1s for more accurate numbers). Automatically finds the list of merged logs in S3. If --start_date=YYYY-MM-DD is specified, only analyze logs starting from a week before that date (we give user logs that much time to get uploaded). Usage: # Analyze all logs. python -m viewfinder.backend.logs.analyze_analytics_logs # Analyze logs from a specific start date. python -m viewfinder.backend.logs.analyze_analytics_logs --start_date=2012-12-15 Other options: -require_lock: default=True: hold the job:analyze_analytics lock during processing. -smart_scan: default=False: determine the start date from previous run summaries. -hours_between_runs: default=0: don't run if last successful run started less than this many hours ago. """<line_sep>__author__='<EMAIL> (<NAME>)'<import_stmt>cStringIO<import_stmt>json<import_stmt>logging<import_stmt>numpy<import_stmt>os<import_stmt>sys<import_stmt>time<import_stmt>traceback<import_from_stmt>collections defaultdict Counter<import_from_stmt>tornado gen options<import_from_stmt>viewfinder.backend.base constants main statistics util<import_from_stmt>viewfinder.backend.base.dotdict DotDict<import_from_stmt>viewfinder.backend.db db_client<import_from_stmt>viewfinder.backend.db.job Job<import_from_stmt>viewfinder.backend.logs logs_util<import_from_stmt>viewfinder.backend.storage.object_store ObjectStore<import_from_stmt>viewfinder.backend.storage store_utils<line_sep># TODO(marc): automatic date detection (eg: find latest metric entry and process from 30 days before). options.define('start_date' default=<none> help='Start date (filename start key). May be overridden by smart_scan.')<line_sep>options.define('dry_run' default=<true> help='Do not update dynamodb metrics table')<line_sep>options.define('compute_today' default=<false> help='Do not compute statistics for today, logs will be partial')<line_sep>options.define('require_lock' type=bool default=<true> help='attempt to grab the job:analyze_analytics lock before running. Exit if acquire fails.')<line_sep>options.define('smart_scan' type=bool default=<false> help='determine start_date from previous successful runs.')<line_sep>options.define('hours_between_runs' type=int default=0 help='minimum time since start of last successful run (with dry_run=False)')<class_stmt>DayStats(object)<block_start><def_stmt>__init__ self day<block_start>self.day=day<line_sep>self._scan_durations=[]<line_sep>self._long_scan_speeds=[]<line_sep>self._photos_scanned=[]<line_sep># Number of unique users recording an event on this day. self.event_users=Counter()<line_sep># Number of occurrences of an event aggregated across all users. self.total_events=Counter()<block_end><def_stmt>AddScan self version photos duration<block_start>self._scan_durations.append(duration)<line_sep>self._photos_scanned.append(photos)<if_stmt>duration<g>1.0<block_start>self._long_scan_speeds.append(photos/duration)<block_end><block_end><def_stmt>AddEvents self counters<block_start><for_stmt>name,count counters.iteritems()<block_start>self.total_events[name]<augadd>count<line_sep>self.event_users[name]<augadd>1<block_end><block_end><def_stmt>PrintSummary self<block_start>logging.info('Day: %s\n %s'%(self.day statistics.FormatStats(self._long_scan_speeds percentiles=[90 95 99])))<block_end><def_stmt>ScanDurationPercentile self percentile<block_start><return>numpy.percentile(self._scan_durations percentile)<block_end><def_stmt>LongScanSpeedPercentile self percentile<block_start><return>numpy.percentile(self._long_scan_speeds percentile)<block_end><def_stmt>PhotosScannedPercentile self percentile<block_start><return>numpy.percentile(self._photos_scanned percentile)<block_end><block_end>@gen.engine<def_stmt>ProcessFiles merged_store filenames callback<block_start>"""Fetch and process each file contained in 'filenames'."""<line_sep>@gen.engine<def_stmt>_ProcessOneFile contents day_stats<block_start>"""Iterate over the contents of a processed file: one entry per line. Increment stats for specific entries."""<line_sep>buf=cStringIO.StringIO(contents)<line_sep>buf.seek(0)<line_sep>ui_events=Counter()<while_stmt><true><block_start>line=buf.readline()<if_stmt><not>line<block_start><break><block_end>parsed=json.loads(line)<if_stmt><not>parsed<block_start><continue><block_end><if_stmt>'version'<not><in>parsed<block_start><continue><block_end># TODO(marc): lookup the user's device ID in dynamodb and get device model. payload=parsed['payload']<if_stmt>'name'<in>payload<block_start><if_stmt>payload['name']<eq>'/assets/scan'<and>payload['type']<eq>'full'<block_start>day_stats.AddScan(parsed['version'] payload['num_scanned'] payload['elapsed'])<block_end><elif_stmt>payload['name'].startswith('/ui/')<block_start>ui_events[payload['name']]<augadd>1<block_end><block_end><block_end><if_stmt>ui_events<block_start>ui_events['/ui/anything']<augadd>1<block_end>day_stats.AddEvents(ui_events)<line_sep>buf.close()<block_end>today=util.NowUTCToISO8601()<line_sep># Group filenames by day. files_by_day=defaultdict(list)<for_stmt>filename filenames<block_start>_,day,user=filename.split('/')<if_stmt>options.options.compute_today<or>today<ne>day<block_start>files_by_day[day].append(filename)<block_end><block_end># Compute per-day totals. Toss them into a list, we'll want it sorted. stats_by_day={}<for_stmt>day sorted(files_by_day.keys())# We don't really need to process days in-order, but it's nicer. <block_start>files=files_by_day[day]<line_sep>day_stats=DayStats(day)<for_stmt>f files<block_start>contents=''<try_stmt><block_start>contents=<yield>gen.Task(merged_store.Get f)<block_end><except_stmt>Exception<as>e<block_start>logging.error('Error fetching file %s: %r'%(f e))<line_sep><continue><block_end>_ProcessOneFile(contents day_stats)<block_end><if_stmt>len(day_stats._long_scan_speeds)<eq>0<block_start><continue><block_end>dd=DotDict()<for_stmt>p [1 5 10 25 50 75 90 95 99]<block_start>dd['user_analytics.scans_gt1s_speed_percentile.%.2d'%p]=day_stats.LongScanSpeedPercentile(p)<line_sep>dd['user_analytics.scans_duration_percentile.%.2d'%p]=day_stats.ScanDurationPercentile(p)<line_sep>dd['user_analytics.scans_num_photos_percentile.%.2d'%p]=day_stats.PhotosScannedPercentile(p)<block_end>dd['user_analytics.ui.event_users']=day_stats.event_users<line_sep>dd['user_analytics.ui.total_events']=day_stats.total_events<line_sep>stats_by_day[day]=dd<block_end>callback(stats_by_day)<block_end>@gen.engine<def_stmt>GetMergedLogsFileList merged_store marker callback<block_start>"""Fetch the list of file names from S3."""<line_sep>registry_dir=os.path.join(logs_util.UserAnalyticsLogsPaths.kMergedLogsPrefix logs_util.UserAnalyticsLogsPaths.kRegistryDir)<def_stmt>_WantFile filename<block_start><return><not>filename.startswith(registry_dir)<block_end>base_path=logs_util.UserAnalyticsLogsPaths.kMergedLogsPrefix+'/'<line_sep>marker=os.path.join(base_path marker)<if>marker<is><not><none><else><none><line_sep>file_list=<yield>gen.Task(store_utils.ListAllKeys merged_store prefix=base_path marker=marker)<line_sep>files=[f<for>f file_list<if>_WantFile(f)]<line_sep>files.sort()<line_sep>logging.info('found %d merged log files, analyzing %d'%(len(file_list) len(files)))<line_sep>callback(files)<block_end>@gen.engine<def_stmt>RunOnce client job callback<block_start>"""Get list of files and call processing function."""<line_sep>merged_store=ObjectStore.GetInstance(logs_util.UserAnalyticsLogsPaths.MERGED_LOGS_BUCKET)<line_sep>start_date=options.options.start_date<if_stmt>options.options.smart_scan# Search for successful full-scan run in the last week. <block_start>last_run=<yield>gen.Task(job.FindLastSuccess with_payload_key='stats.last_day')<if_stmt>last_run<is><none><block_start>logging.info('No previous successful scan found, rerun with --start_date')<line_sep>callback(<none>)<line_sep><return><block_end>last_run_start=last_run['start_time']<if_stmt>util.HoursSince(last_run_start)<l>options.options.hours_between_runs<block_start>logging.info('Last successful run started at %s, less than %d hours ago; skipping.'%(time.asctime(time.localtime(last_run_start)) options.options.hours_between_runs))<line_sep>callback(<none>)<line_sep><return><block_end>last_day=last_run['stats.last_day']<line_sep># Set scan_start to start of previous run - 30d (we need 30 days' worth of data to properly compute # 30-day active users. Add an extra 3 days just in case we had some missing logs during the last run. start_time=util.ISO8601ToUTCTimestamp(last_day hour=12)-constants.SECONDS_PER_WEEK<line_sep>start_date=util.TimestampUTCToISO8601(start_time)<line_sep>logging.info('Last successful analyze_analytics run (%s) scanned up to %s, setting analysis start date to %s'%(time.asctime(time.localtime(last_run_start)) last_day start_date))<block_end># Fetch list of merged logs. files=<yield>gen.Task(GetMergedLogsFileList merged_store start_date)<line_sep>day_stats=<yield>gen.Task(ProcessFiles merged_store files)<line_sep># Write per-day stats to dynamodb. <if_stmt>len(day_stats)<g>0<block_start>hms=logs_util.kDailyMetricsTimeByLogType['analytics_logs']<line_sep><yield>gen.Task(logs_util.UpdateMetrics client day_stats dry_run=options.options.dry_run hms_tuple=hms)<line_sep>last_day=sorted(day_stats.keys())[-1]<line_sep>callback(last_day)<block_end><else_stmt><block_start>callback(<none>)<block_end><block_end>@gen.engine<def_stmt>_Start callback<block_start>"""Grab a lock on job:analyze_analytics and call RunOnce. If we get a return value, write it to the job summary."""<line_sep>client=db_client.DBClient.Instance()<line_sep>job=Job(client 'analyze_analytics')<if_stmt>options.options.require_lock<block_start>got_lock=<yield>gen.Task(job.AcquireLock)<if_stmt>got_lock<eq><false><block_start>logging.warning('Failed to acquire job lock: exiting.')<line_sep>callback()<line_sep><return><block_end><block_end>result=<none><line_sep>job.Start()<try_stmt><block_start>result=<yield>gen.Task(RunOnce client job)<block_end><except_stmt># Failure: log run summary with trace. <block_start>typ,val,tb=sys.exc_info()<line_sep>msg=''.join(traceback.format_exception(typ val tb))<line_sep>logging.info('Registering failed run with message: %s'%msg)<line_sep><yield>gen.Task(job.RegisterRun Job.STATUS_FAILURE failure_msg=msg)<block_end><else_stmt><block_start><if_stmt>result<is><not><none><and><not>options.options.dry_run# Successful run with data processed and not in dry-run mode: write run summary. <block_start>stats=DotDict()<line_sep>stats['last_day']=result<line_sep>logging.info('Registering successful run with stats: %r'%stats)<line_sep><yield>gen.Task(job.RegisterRun Job.STATUS_SUCCESS stats=stats)<block_end><block_end><finally_stmt><block_start><yield>gen.Task(job.ReleaseLock)<block_end>callback()<block_end><if_stmt>__name__<eq>'__main__'<block_start>sys.exit(main.InitAndRun(_Start))<block_end>
<import_stmt>time<import_stmt>tkinter<as>tk<import_from_stmt>picktrue.gui.toolkit ProgressBar StatusBar NamedInput FileBrowse info FilePathBrowse PasswordInput<import_from_stmt>picktrue.pinry.importer PinryImporter<import_from_stmt>picktrue.utils run_as_thread<class_stmt>PinryImporterGUI(tk.Frame)<block_start>title="导入到Pinry"<def_stmt>__init__ self *args **kwargs<block_start>super(PinryImporterGUI self).__init__(*args **kwargs)<line_sep>self._url=NamedInput(self name="Pinry部署地址")<line_sep>self._min_size=NamedInput(self name="最小上传大小(KB)(低于此值的文件不上传,不限制请留空)")<line_sep>self._username=NamedInput(self name="用户名")<line_sep>self._password=PasswordInput(self name="密码")<line_sep>self._csv_file=FilePathBrowse(self store_name="import_csv" text_label="CSV文件文件路径")<line_sep>self.btn_group=self.build_buttons()<line_sep>self._importer=<none><line_sep>self.progress=ProgressBar(self)<line_sep>self.status=StatusBar(self)<line_sep>self.start_update()<block_end><def_stmt>_get_importer self<block_start>min_size=self._min_size.get_input()<if_stmt>min_size<block_start><try_stmt><block_start>min_size=int(min_size)<block_end><except_stmt>Exception<block_start>info("最小文件上传大小应该是整数")<block_end><block_end><else_stmt><block_start>min_size=<none><block_end><return>PinryImporter(base_url=self._url.get_input() username=self._username.get_input() password=self._password.get_input() min_upload_size_kb=min_size )<block_end><def_stmt>build_buttons self<block_start>btn_args=dict(height=1 )<line_sep>btn_group=tk.Frame(self)<line_sep>buttons=[tk.Button(btn_group text=text command=command **btn_args)<for>text,command (("测试登录" self._test_login) ("开始导入" self._start_import) )]<for_stmt>index,btn enumerate(buttons)<block_start>btn.grid(column=index row=0 sticky=tk.N)<block_end>btn_group.pack(fill=tk.BOTH expand=1)<line_sep><return>btn_group<block_end><def_stmt>_test_login self<block_start>importer=self._get_importer()<if_stmt>importer.test_login()<is><true><block_start>info("登录成功")<block_end><else_stmt><block_start>info("情检查用户名密码以及部署路径是否可访问")<block_end><block_end><def_stmt>_start_import self<block_start>self._importer=self._get_importer()<line_sep>run_as_thread(self._importer.do_import self._csv_file.get_path() name="import2pinry")<block_end><def_stmt>start_update self<block_start>run_as_thread(self._update_loop)<block_end><def_stmt>_update_loop self<block_start><while_stmt><true><block_start>time.sleep(0.1)<line_sep>self.update_progress()<block_end><block_end><def_stmt>update_progress self<block_start><if_stmt>self._importer<is><not><none><block_start>self.progress.update_progress(self._importer.done_pins self._importer.total_pins )<line_sep>self.status.set(self._importer.status_text())<block_end><else_stmt><block_start>self.progress.update_progress(0 0)<line_sep>self.status.set("待机...")<block_end><block_end><block_end>
<import_from_stmt>datapackage_pipelines.wrapper ingest spew<import_stmt>os<import_from_stmt>datapackage_pipelines.utilities.resources PATH_PLACEHOLDER PROP_STREAMED_FROM<line_sep>parameters,datapackage,res_iter=ingest()<if_stmt>datapackage<is><none><block_start>datapackage={}<block_end>datapackage.setdefault('resources' [])<for_stmt>param ['url' 'name']<block_start><assert_stmt>param<in>parameters "You must define {} in your parameters".format(param)<block_end>url=parameters.pop('url')<if_stmt>url.startswith('env://')<block_start>env_var=url[6:]<line_sep>env_url=os.environ.get(env_var)<assert_stmt>env_url<is><not><none> "Missing Value - "<concat>"Please set your '%s' environment variable"%env_var<line_sep>url=env_url<block_end><if_stmt>'path'<not><in>parameters<block_start>parameters['path']=PATH_PLACEHOLDER<block_end>parameters[PROP_STREAMED_FROM]=url<line_sep>datapackage['resources'].append(parameters)<line_sep>spew(datapackage res_iter)<line_sep>
#------------------------------------------------------------------------------ # Copyright (c) 2013-2017, Nucleic Development Team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file LICENSE, distributed with this software. #------------------------------------------------------------------------------ <import_from_stmt>atom.api Atom Bool Int Float Typed<import_from_stmt>enaml.qt.QtCore Qt QPoint QRect QTimer QPropertyAnimation<import_from_stmt>enaml.qt.QtGui QPainter<import_from_stmt>enaml.qt.QtWidgets QWidget QStyle QStyleOption<import_from_stmt>.q_guide_rose QGuideRose<import_from_stmt>.q_dock_bar QDockBar<import_from_stmt>.q_dock_container QDockContainer<import_from_stmt>.q_dock_splitter QDockSplitterHandle<import_from_stmt>.q_dock_tab_widget QDockTabWidget<class_stmt>QDockRubberBand(QWidget)<block_start>""" A custom rubber band widget for use with the dock overlay. This class is stylable from Qt style sheets. """<def_stmt>__init__ self parent=<none><block_start>""" Initialize a QDockRubberBand. Parameters ---------- parent : QWidget, optional The parent of the dock rubber band. """<line_sep>super(QDockRubberBand self).__init__(parent)<line_sep>self.setWindowFlags(Qt.ToolTip|Qt.FramelessWindowHint)<line_sep>self.setAttribute(Qt.WA_TranslucentBackground)<block_end><def_stmt>paintEvent self event<block_start>""" Handle the paint event for the dock rubber band. """<line_sep>painter=QPainter(self)<line_sep>opt=QStyleOption()<line_sep>opt.initFrom(self)<line_sep>self.style().drawPrimitive(QStyle.PE_Widget opt painter self)<block_end><block_end><class_stmt>DockOverlay(Atom)<block_start>""" An object which manages the overlays for dock widgets. This manager handles the state transitions for the overlays. The transitions are performed on a slightly-delayed timer to provide a more fluid user interaction experience. """<line_sep># PySide requires weakrefs for using bound methods as slots. # PyQt doesn't, but executes unsafe code if not using weakrefs. __slots__='__weakref__'<line_sep>#: The size of the rubber band when docking on the border, in px. border_size=Int(60)<line_sep>#: The delay to use when triggering the rose timer, in ms. rose_delay=Int(30)<line_sep>#: The delay to use when triggering the band timer, in ms. band_delay=Int(50)<line_sep>#: The target opacity to use when making the band visible. band_target_opacity=Float(1.0)<line_sep>#: The duration of the band visibilty animation, in ms. band_vis_duration=Int(100)<line_sep>#: the duration of the band geometry animation, in ms. band_geo_duration=Int(100)<line_sep>#: The overlayed guide rose. _rose=Typed(QGuideRose ())<line_sep>#: The overlayed rubber band. _band=Typed(QDockRubberBand ())<line_sep>#: The property animator for the rubber band geometry. _geo_animator=Typed(QPropertyAnimation)<line_sep>#: The property animator for the rubber band visibility. _vis_animator=Typed(QPropertyAnimation)<line_sep>#: The target mode to apply to the rose on timeout. _target_rose_mode=Int(QGuideRose.Mode.NoMode)<line_sep>#: The target geometry to apply to rubber band on timeout. _target_band_geo=Typed(QRect factory=<lambda>:QRect())<line_sep>#: The value of the last guide which was hit in the rose. _last_guide=Int(-1)<line_sep>#: A flag indicating whether it is safe to show the band. _show_band=Bool(<false>)<line_sep>#: The hover position of the mouse to use for state changes. _hover_pos=Typed(QPoint factory=<lambda>:QPoint())<line_sep>#: The timer for changing the state of the rose. _rose_timer=Typed(QTimer)<line_sep>#: The timer for changing the state of the band. _band_timer=Typed(QTimer)<def_stmt>__init__ self parent=<none><block_start>""" Initialize a DockOverlay. Parameters ---------- parent : QWidget, optional The parent of the overlay. This will be used as the parent widget for the dock rubber band. The overlay guides do not have a parent. """<line_sep>self._band=QDockRubberBand(parent)<block_end>#-------------------------------------------------------------------------- # Default Value Methods #-------------------------------------------------------------------------- <def_stmt>_default__rose_timer self<block_start>""" Create the default timer for the rose state changes. """<line_sep>timer=QTimer()<line_sep>timer.setSingleShot(<true>)<line_sep>timer.timeout.connect(self._on_rose_timer)<line_sep><return>timer<block_end><def_stmt>_default__band_timer self<block_start>""" Create the default timer for the band state changes. """<line_sep>timer=QTimer()<line_sep>timer.setSingleShot(<true>)<line_sep>timer.timeout.connect(self._on_band_timer)<line_sep><return>timer<block_end><def_stmt>_default__geo_animator self<block_start>""" Create the default property animator for the rubber band. """<line_sep>p=QPropertyAnimation(self._band b'geometry')<line_sep>p.setDuration(self.band_geo_duration)<line_sep><return>p<block_end><def_stmt>_default__vis_animator self<block_start>""" Create the default property animator for the rubber band. """<line_sep>p=QPropertyAnimation(self._band b'windowOpacity')<line_sep>p.setDuration(self.band_vis_duration)<line_sep>p.finished.connect(self._on_vis_finished)<line_sep><return>p<block_end>#-------------------------------------------------------------------------- # Timer Handlers #-------------------------------------------------------------------------- <def_stmt>_on_rose_timer self<block_start>""" Handle the timeout event for the internal rose timer. This handler transitions the rose to its new state and updates the position of the rubber band. """<line_sep>rose=self._rose<line_sep>rose.setMode(self._target_rose_mode)<line_sep>rose.mouseOver(self._hover_pos)<line_sep>self._show_band=<true><line_sep>self._update_band_state()<block_end><def_stmt>_on_band_timer self<block_start>""" Handle the timeout event for the internal band timer. This handler updates the position of the rubber band. """<line_sep>self._update_band_state()<block_end>#-------------------------------------------------------------------------- # Animation Handlers #-------------------------------------------------------------------------- <def_stmt>_on_vis_finished self<block_start>""" Handle the 'finished' signal from the visibility animator. This handle will hide the rubber band when its opacity is 0. """<line_sep>band=self._band<if_stmt>band.windowOpacity()<eq>0.0<block_start>band.hide()<block_end><block_end>#-------------------------------------------------------------------------- # Private API #-------------------------------------------------------------------------- <def_stmt>_update_band_state self<block_start>""" Refresh the geometry and visible state of the rubber band. The state will be updated using animated properties to provide a nice fluid user experience. """<line_sep># A valid geometry indicates that the rubber should be shown on # the screen. An invalid geometry means it should be hidden. If # the validity is changed during animation, the animators are # restarted using the current state as their starting point. band=self._band<line_sep>geo=self._target_band_geo<if_stmt>geo.isValid()<and>self._show_band# If the band is already hidden, the geometry animation can # be bypassed since the band can be located anywhere. <block_start><if_stmt>band.isHidden()<block_start>band.setGeometry(geo)<line_sep>self._start_vis_animator(self.band_target_opacity)<line_sep>self._rose.raise_()<block_end><else_stmt><block_start>self._start_vis_animator(self.band_target_opacity)<line_sep>self._start_geo_animator(geo)<block_end><block_end><else_stmt><block_start>self._start_vis_animator(0.0)<block_end><block_end><def_stmt>_start_vis_animator self opacity<block_start>""" (Re)start the visibility animator. Parameters ---------- opacity : float The target opacity of the target object. """<line_sep>animator=self._vis_animator<if_stmt>animator.state()<eq>animator.Running<block_start>animator.stop()<block_end>target=animator.targetObject()<if_stmt>target.isHidden()<and>opacity<ne>0.0<block_start>target.setWindowOpacity(0.0)<line_sep>target.show()<block_end>animator.setStartValue(target.windowOpacity())<line_sep>animator.setEndValue(opacity)<line_sep>animator.start()<block_end><def_stmt>_start_geo_animator self geo<block_start>""" (Re)start the visibility animator. Parameters ---------- geo : QRect The target geometry for the target object. """<line_sep>animator=self._geo_animator<if_stmt>animator.state()<eq>animator.Running<block_start>animator.stop()<block_end>target=animator.targetObject()<line_sep>animator.setStartValue(target.geometry())<line_sep>animator.setEndValue(geo)<line_sep>animator.start()<block_end><def_stmt>_band_geometry self widget guide<block_start>""" Compute the geometry for an overlay rubber band. Parameters ---------- widget : QWidget The widget to which the band geometry should be fit. guide : Guide The rose guide under the mouse. This determines how the geometry of the band will be fit to the widget. """<line_sep>Guide=QGuideRose.Guide<if_stmt>guide<eq>Guide.NoGuide<block_start><return>QRect()<block_end># border hits border_size=self.border_size<line_sep>rect=widget.contentsRect()<if_stmt>guide<eq>Guide.BorderNorth<block_start>rect.setHeight(border_size)<block_end><elif_stmt>guide<eq>Guide.BorderEast<block_start>rect.setLeft(rect.right()+1-border_size)<block_end><elif_stmt>guide<eq>Guide.BorderSouth<block_start>rect.setTop(rect.bottom()+1-border_size)<block_end><elif_stmt>guide<eq>Guide.BorderWest<block_start>rect.setWidth(border_size)<block_end># For the next 4 conditions `widget` will be a QDockArea <elif_stmt>guide<eq>Guide.BorderExNorth<block_start>bar_rect=widget.dockBarGeometry(QDockBar.North)<if_stmt>bar_rect.isValid()<block_start>rect=bar_rect<block_end><else_stmt><block_start>rect.setHeight(border_size/2)<block_end><block_end><elif_stmt>guide<eq>Guide.BorderExEast<block_start>bar_rect=widget.dockBarGeometry(QDockBar.East)<if_stmt>bar_rect.isValid()<block_start>rect=bar_rect<block_end><else_stmt><block_start>rect.setLeft(rect.right()+1-border_size/2)<block_end><block_end><elif_stmt>guide<eq>Guide.BorderExSouth<block_start>bar_rect=widget.dockBarGeometry(QDockBar.South)<if_stmt>bar_rect.isValid()<block_start>rect=bar_rect<block_end><else_stmt><block_start>rect.setTop(rect.bottom()+1-border_size/2)<block_end><block_end><elif_stmt>guide<eq>Guide.BorderExWest<block_start>bar_rect=widget.dockBarGeometry(QDockBar.West)<if_stmt>bar_rect.isValid()<block_start>rect=bar_rect<block_end><else_stmt><block_start>rect.setWidth(border_size/2)<block_end><block_end># compass hits <elif_stmt>guide<eq>Guide.CompassNorth<block_start>rect.setHeight(rect.height()/3)<block_end><elif_stmt>guide<eq>Guide.CompassEast<block_start>rect.setLeft(2<times>rect.width()/3)<block_end><elif_stmt>guide<eq>Guide.CompassSouth<block_start>rect.setTop(2<times>rect.height()/3)<block_end><elif_stmt>guide<eq>Guide.CompassWest<block_start>rect.setWidth(rect.width()/3)<block_end><elif_stmt>guide<eq>Guide.CompassCenter<block_start><pass># nothing to do <block_end><elif_stmt>guide<eq>Guide.CompassExNorth<block_start><pass># nothing to do <block_end><elif_stmt>guide<eq>Guide.CompassExEast<block_start><pass># nothing to do <block_end><elif_stmt>guide<eq>Guide.CompassExSouth<block_start><pass># nothing to do <block_end><elif_stmt>guide<eq>Guide.CompassExWest<block_start><pass><block_end># nothing to do # splitter handle hits <elif_stmt>guide<eq>Guide.SplitHorizontal<block_start>wo,r=divmod(border_size-rect.width() 2)<line_sep>rect.setWidth(2<times>(wo+r)+rect.width())<line_sep>rect.moveLeft(rect.x()-(wo+r))<block_end><elif_stmt>guide<eq>Guide.SplitVertical<block_start>ho,r=divmod(border_size-widget.height() 2)<line_sep>rect.setHeight(2<times>(ho+r)+rect.height())<line_sep>rect.moveTop(rect.y()-(ho+r))<block_end># single center <elif_stmt>guide<eq>Guide.AreaCenter<block_start><pass><block_end># nothing to do # default no-op <else_stmt><block_start><return>QRect()<block_end>pt=widget.mapToGlobal(rect.topLeft())<line_sep><return>QRect(pt rect.size())<block_end>#-------------------------------------------------------------------------- # Public API #-------------------------------------------------------------------------- <def_stmt>guide_at self pos<block_start>""" Get the dock guide for a given position. Parameters ---------- pos : QPoint The position of interest, expressed in global coordinates. Returns ------- result : Guide The guide enum which lies under the given point. """<line_sep>rose=self._rose<line_sep>pos=rose.mapFromGlobal(pos)<line_sep><return>rose.guideAt(pos)<block_end><def_stmt>hide self<block_start>""" Hide the overlay. This method will stop the timers and set the visibility of the guide rose and the rubber band to False. """<line_sep>self._rose_timer.stop()<line_sep>self._band_timer.stop()<line_sep>self._rose.hide()<line_sep>self._band.hide()<block_end><def_stmt>mouse_over_widget self widget pos empty=<false><block_start>""" Update the overlays based on the mouse position. This handler should be invoked when the mouse hovers over a single widget (such as a floating dock container) as opposed to an area of docked widgets. The guide rose will be displayed in the center of the widget with no border guides. Parameters ---------- widget : QWidget The widget under the mouse. pos : QPoint The hover position, expressed in the local coordinates of the widget. empty : bool, optional Whether the widget represents an empty widget. If this is True, a single center guide will be shown instead of the guide rose. """<line_sep>Mode=QGuideRose.Mode<line_sep>rose=self._rose<line_sep>target_mode=Mode.AreaCenter<if>empty<else>Mode.CompassEx<line_sep>self._target_rose_mode=target_mode<if_stmt>rose.mode()<ne>target_mode<block_start>rose.setMode(Mode.NoMode)<line_sep>self._rose_timer.start(self.rose_delay)<line_sep>self._band_timer.start(self.band_delay)<block_end>origin=widget.mapToGlobal(QPoint(0 0))<line_sep>geo=QRect(origin widget.size())<line_sep>dirty=rose.geometry()<ne>geo<if_stmt>dirty<block_start>rose.hide()<line_sep>rose.setMode(Mode.NoMode)<line_sep>rose.setGeometry(geo)<block_end>guide=rose.guideAt(pos target_mode)<if_stmt>dirty<or>guide<ne>self._last_guide<block_start>self._last_guide=guide<line_sep>self._target_band_geo=self._band_geometry(widget guide)<line_sep>self._band_timer.start(self.band_delay)<block_end>rose.setCenterPoint(QPoint(geo.width()/2 geo.height()/2))<line_sep>rose.mouseOver(pos)<line_sep>rose.show()<block_end><def_stmt>mouse_over_area self area widget pos<block_start>""" Update the overlays based on the mouse position. Parameters ---------- area : QDockArea The dock area which contains the dock items onto which the overlay will be displayed. widget : QWidget The dock widget in the area which is under the mouse, or None if there is no relevant widget. pos : QPoint The hover position, expressed in the local coordinates of the overlayed dock area. """<line_sep>Mode=QGuideRose.Mode<line_sep>Guide=QGuideRose.Guide<line_sep>pane=area.centralPane()<line_sep>pos=pane.mapFrom(area pos)<if_stmt>widget<is><none><block_start><if_stmt>area.centralWidget()<is><none><block_start>self.mouse_over_widget(pane pos empty=<true>)<block_end><return><block_end># Compute the target mode for the guide rose based on the dock # widget which lies under the mouse position. target_mode=Mode.Border<if_stmt>isinstance(widget QDockContainer)<block_start>target_mode<augor>Mode.CompassEx<block_end><elif_stmt>isinstance(widget QDockTabWidget)<block_start>target_mode<augor>Mode.Compass<block_end><elif_stmt>isinstance(widget QDockSplitterHandle)<block_start><if_stmt>widget.orientation()<eq>Qt.Horizontal<block_start>target_mode<augor>Mode.SplitHorizontal<block_end><else_stmt><block_start>target_mode<augor>Mode.SplitVertical<block_end><block_end># Get the local area coordinates for the center of the widget. center=widget.mapTo(pane QPoint(0 0))<line_sep>center<augadd>QPoint(widget.width()/2 widget.height()/2)<line_sep># Update the state of the rose. If it is to be hidden, it is # done so immediately. If the target mode is different from # the current mode, the rose is hidden and the state changes # are collapsed on a timer. rose=self._rose<line_sep>self._hover_pos=pos<line_sep>self._show_band=<true><line_sep>self._target_rose_mode=target_mode<if_stmt>target_mode<ne>rose.mode()<block_start>rose.setMode(Mode.Border)<line_sep>self._rose_timer.start(self.rose_delay)<line_sep>self._show_band=<false><block_end># Update the geometry of the rose if needed. This ensures that # the rose does not change geometry while visible. origin=pane.mapToGlobal(QPoint(0 0))<line_sep>geo=QRect(origin pane.size())<line_sep>dirty=rose.geometry()<ne>geo<if_stmt>dirty<block_start>rose.hide()<line_sep>rose.setMode(Mode.NoMode)<line_sep>rose.setGeometry(geo)<block_end># Hit test the rose and update the target geometry for the # rubber band if the target guide has changed. rose.setCenterPoint(center)<line_sep>guide=rose.guideAt(pos target_mode)<if_stmt>dirty<or>guide<ne>self._last_guide<block_start>self._last_guide=guide<if_stmt>guide<ge>Guide.BorderNorth<and>guide<le>Guide.BorderWest<block_start>band_geo=self._band_geometry(pane guide)<block_end><elif_stmt>guide<ge>Guide.BorderExNorth<and>guide<le>Guide.BorderExWest<block_start>band_geo=self._band_geometry(area guide)<block_end><else_stmt><block_start>band_geo=self._band_geometry(widget guide)<block_end>self._target_band_geo=band_geo<line_sep>self._band_timer.start(self.band_delay)<block_end># Finally, make the rose visible and issue a mouseover command # so that the guides are highlighted. rose.mouseOver(pos)<line_sep>rose.show()<block_end><block_end>
<import_from_stmt>.main TA_Handler TradingView Analysis Interval Exchange get_multiple_analysis __version__<import_from_stmt>.technicals Recommendation Compute<line_sep>
#coding:utf-8 <import_from_stmt>datetime date<import_from_stmt>flask request Response redirect url_for current_app<import_from_stmt>flask_admin.model typefmt<import_from_stmt>flask_admin.contrib.sqla ModelView<import_from_stmt>flask_babelex lazy_gettext<as>_<import_from_stmt>flask_login current_user<import_stmt>os<import_from_stmt>werkzeug.utils secure_filename<import_from_stmt>. main<import_from_stmt>.. admin db<import_from_stmt>..models Permission User Role Report Department<import_from_stmt>..utils permission_required is_allowed_file clean_html<import_from_stmt>sqlalchemy.exc OperationalError<line_sep>@main.route('/' methods=['GET' 'POST'])<def_stmt>index # check if the database is initialized. <block_start><try_stmt><block_start>User.query.all()<block_end><except_stmt>OperationalError<block_start>db.create_all()<line_sep>Role.insert_roles()<line_sep>Department.insert_departments()<block_end><if_stmt><not>current_user.is_authenticated<block_start><return>redirect(url_for('auth.login'))<block_end><return>redirect(url_for('report.read'))<block_end>@main.route("/upload/" methods=["POST"])@permission_required(Permission.WRITE_REPORT)<def_stmt>upload <block_start>img=request.files.get('image')<if_stmt>img<and>is_allowed_file(img.filename)<block_start>filename=secure_filename(img.filename)<line_sep>img.save(os.path.join(current_app.config['UPLOAD_FOLDER'] filename))<line_sep>img_url=request.url_root+current_app.config['IMAGE_UPLOAD_DIR']+filename<line_sep>res=Response(img_url)<line_sep>current_app.logger.info('{} uploaded image'.format(current_user.email))<block_end><else_stmt><block_start>res=Response(_("Failed Uploading"))<block_end>res.headers["ContentType"]="text/html"<line_sep>res.headers["Charset"]="utf-8"<line_sep>current_app.logger.error('{} failed uploading image'.format(current_user.email))<line_sep><return>res<block_end><class_stmt>WeeklyReportModelView(ModelView)<block_start>base_template='/base.html'<def_stmt>is_accessible self<block_start><return>current_user.is_admin<block_end><def_stmt>inaccessible_callback self name **kwargs<block_start><return>redirect(url_for('main.index'))<block_end><block_end><class_stmt>UserAdminView(WeeklyReportModelView)<block_start>column_labels=dict(email='邮箱' username='姓名' is_ignored='不参与统计' role='角色' department='部门')<line_sep>form_columns=column_list=['email' 'username' 'is_ignored' 'role' 'department']<line_sep>can_delete=<true><line_sep>can_create=<false><line_sep>form_widget_args={'email':{'readonly':<true>} }<def_stmt>on_model_delete self model<block_start>current_app.logger.info('{} deleted user:{}'.format(current_user.email model))<for_stmt>report Report.query.filter_by(author_id=model.id)<block_start>db.session.delete(report)<block_end>db.session.commit()<block_end><block_end><class_stmt>RoleAdminView(WeeklyReportModelView)<block_start>column_labels=dict(name='名称' users='成员')<line_sep>form_columns=['name' 'users']<line_sep>column_list=['name']<line_sep>can_create=<false><line_sep>can_edit=<true><line_sep>can_delete=<false><line_sep>form_widget_args={'name':{'readonly':<true>} }<block_end><class_stmt>DepartmentAdminView(WeeklyReportModelView)<block_start>column_labels=dict(name='名称' users='成员')<line_sep>form_columns=['name' 'users']<line_sep>can_edit=<true><line_sep>can_delete=<false><block_end><class_stmt>ReportAdminView(WeeklyReportModelView)<block_start>column_labels=dict(year=u'年份' week_count=u'周次' created_at=u'创建时间' last_content=u'上周计划' content=u'内容' author=u'员工' department=u'部门')<line_sep>column_list=('author' 'department' 'year' 'week_count' 'last_content' 'content' 'created_at')<line_sep>column_default_sort=('created_at' <true>)<line_sep>column_searchable_list=('week_count' )<line_sep>form_columns=['created_at' 'week_count' 'year' 'content']<line_sep>list_template='/admin/model/report_list_template.html'<line_sep>can_edit=<true><line_sep>can_export=<true><line_sep>export_types=['xls']<line_sep>form_widget_args={'year':{'readonly':<true>} 'last_content':{'readonly':<true>} 'created_at':{'readonly':<true>} }<def_stmt>date_format view value<block_start><return>value.strftime('%Y-%m-%d')<block_end><def_stmt>author_format v c m p<block_start><return>str(m.author)<block_end><def_stmt>department_format v c m p<block_start><return>str(m.department)<block_end><def_stmt>format_last_content v c m p<block_start><if_stmt>m.last_content<block_start><block_end><block_end><return>clean_html(m.last_content)<line_sep><return>''<def_stmt>format_content v c m p<block_start><if_stmt>m.content<block_start><block_end><block_end><return>clean_html(m.content)<line_sep><return>''<def_stmt>format_created_at v c m p<block_start><return>m.created_at.strftime('%Y-%m-%d')<block_end>REPORT_FORMATTERS=dict(typefmt.BASE_FORMATTERS)<line_sep>REPORT_FORMATTERS.update({date:date_format })<line_sep>column_type_formatters=REPORT_FORMATTERS<line_sep>EXPORT_REPORT_FORMATTERS=dict(typefmt.BASE_FORMATTERS)<line_sep>EXPORT_REPORT_FORMATTERS.update({"author":author_format "department":department_format "last_content":format_last_content "content":format_content "created_at":format_created_at })<line_sep>column_formatters_export=EXPORT_REPORT_FORMATTERS<block_end>admin.add_view(UserAdminView(User db.session name='用户'))<line_sep>admin.add_view(RoleAdminView(Role db.session name='角色'))<line_sep>admin.add_view(ReportAdminView(Report db.session name='周报' endpoint="reports"))<line_sep>admin.add_view(DepartmentAdminView(Department db.session name='部门'))<line_sep>
# Copyright 2020 - 2021 MONAI Consortium # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # http://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>hashlib<import_stmt>logging<import_stmt>mimetypes<import_stmt>os<import_stmt>pathlib<import_stmt>shutil<import_stmt>subprocess<import_stmt>torch.cuda<line_sep>logger=logging.getLogger(__name__)<def_stmt>file_ext name<arrow>str<block_start><return>"".join(pathlib.Path(name).suffixes)<block_end><def_stmt>remove_file path:str<arrow><none><block_start><if_stmt>path<and>os.path.exists(path)<block_start><if_stmt>os.path.isdir(path)<block_start>shutil.rmtree(path)<block_end><else_stmt><block_start>os.unlink(path)<block_end><block_end><block_end><def_stmt>get_basename path<block_start>"""Gets the basename of a file. Ref: https://stackoverflow.com/questions/8384737/extract-file-name-from-path-no-matter-what-the-os-path-format """<line_sep>head,tail=os.path.split(path)<line_sep><return>tail<or>os.path.basename(head)<block_end><def_stmt>run_command command args=<none> plogger=<none><block_start>plogger=plogger<if>plogger<else>logger<line_sep>cmd=[command]<if_stmt>args<block_start>args=[str(a)<for>a args]<line_sep>cmd.extend(args)<block_end>plogger.info("Running Command:: {}".format(" ".join(cmd)))<line_sep>process=subprocess.Popen(cmd # stderr=subprocess.PIPE, stdout=subprocess.PIPE universal_newlines=<true> env=os.environ.copy() )<while_stmt>process.poll()<is><none><block_start>line=process.stdout.readline()<line_sep>line=line.rstrip()<if_stmt>line<block_start>plogger.info(line.rstrip())<if>plogger<else>print(line)<block_end><block_end>plogger.info("Return code: {}".format(process.returncode))<line_sep>process.stdout.close()<line_sep><return>process.returncode<block_end><def_stmt>init_log_config log_config app_dir log_file<block_start><if_stmt><not>log_config<or><not>os.path.exists(log_config)<block_start>default_log_dir=os.path.dirname(os.path.dirname(os.path.dirname(__file__)))<line_sep>default_config=os.path.realpath(os.path.join(default_log_dir "logging.json"))<line_sep>log_dir=os.path.join(app_dir "logs")<line_sep>log_config=os.path.join(log_dir "logging.json")<line_sep>os.makedirs(log_dir exist_ok=<true>)<line_sep># if not os.path.exists(log_config): shutil.copy(default_config log_config)<with_stmt>open(log_config "r")<as>f<block_start>c=f.read()<block_end>c=c.replace("${LOGDIR}" log_dir.replace("\\" r"\\"))<line_sep>c=c.replace("${LOGFILE}" os.path.join(log_dir log_file).replace("\\" r"\\"))<with_stmt>open(log_config "w")<as>f<block_start>f.write(c)<block_end><block_end><return>log_config<block_end><def_stmt>get_mime_type file<block_start>m_type=mimetypes.guess_type(file strict=<false>)<line_sep>logger.debug(f"Guessed Mime Type for Image: {m_type}")<if_stmt>m_type<is><none><or>m_type[0]<is><none><block_start>m_type="application/octet-stream"<block_end><else_stmt><block_start>m_type=m_type[0]<block_end>logger.debug(f"Final Mime Type: {m_type}")<line_sep><return>m_type<block_end><def_stmt>file_checksum file algo="SHA256"<block_start><if_stmt>algo<not><in>["SHA256" "SHA512" "MD5"]<block_start><raise>ValueError("unsupported hashing algorithm %s"%algo)<block_end><with_stmt>open(file "rb")<as>content<block_start>hash=hashlib.new(algo)<while_stmt><true><block_start>chunk=content.read(8192)<if_stmt><not>chunk<block_start><break><block_end>hash.update(chunk)<block_end><return>f"{algo}:{hash.hexdigest()}"<block_end><block_end><def_stmt>gpu_memory_map <block_start>"""Get the current gpu usage. Returns ------- usage: dict Keys are device ids as integers. Values are memory usage as integers in MB. """<line_sep>logger.info("Using nvidia-smi command")<if_stmt>shutil.which("nvidia-smi")<is><none><block_start>logger.info("nvidia-smi command didn't work! - Using default image size [128, 128, 64]")<line_sep><return>{0:4300}<block_end>result=subprocess.check_output(["nvidia-smi" "--query-gpu=memory.free" "--format=csv,nounits,noheader"] encoding="utf-8")<line_sep># Convert lines into a dictionary gpu_memory=[int(x)<for>x result.strip().split("\n")]<line_sep>gpu_memory_map=dict(zip(range(len(gpu_memory)) gpu_memory))<line_sep><return>gpu_memory_map<block_end><def_stmt>gpu_count <block_start><return>torch.cuda.device_count()<block_end>
# vim: expandtab:ts=4:sw=4 <import_from_future_stmt> absolute_import<import_stmt>numpy<as>np<import_from_stmt>linear_assignment min_marg_matching<import_stmt>pdb<def_stmt>get_unmatched all_idx matches i marginalization=<none><block_start>assigned=[match[i]<for>match matches]<line_sep>unmatched=set(all_idx)-set(assigned)<if_stmt>marginalization<is><not><none># from 1 for dummy node <block_start>in_gate_dets=np.nonzero(np.sum(marginalization[: 1:] axis=0))[0].tolist()<line_sep>unmatched=[d<for>d unmatched<if>d<not><in>in_gate_dets]<block_end><return>list(unmatched)<block_end><class_stmt>Matcher<block_start><def_stmt>__init__ self detections marginalizations confirmed_tracks matching_strategy assignment_threshold=<none><block_start>self.detections=detections<line_sep>self.marginalizations=marginalizations<line_sep>self.confirmed_tracks=confirmed_tracks<line_sep>self.assignment_threshold=assignment_threshold<line_sep>self.detection_indices=np.arange(len(detections))<line_sep>self.matching_strategy=matching_strategy<block_end><def_stmt>match self<block_start>self.get_matches()<line_sep>self.get_unmatched_tracks()<line_sep>self.get_unmatched_detections()<line_sep><return>self.matches self.unmatched_tracks self.unmatched_detections<block_end><def_stmt>get_matches self<block_start><if_stmt>self.matching_strategy<eq>"max_and_threshold"<block_start>self.max_and_threshold_matching()<block_end><elif_stmt>self.matching_strategy<eq>"hungarian"<block_start>self.hungarian()<block_end><elif_stmt>self.matching_strategy<eq>"max_match"<block_start>self.max_match()<block_end><elif_stmt>self.matching_strategy<eq>"none"<block_start>self.matches=[]<block_end><else_stmt><block_start><raise>Exception('Unrecognized matching strategy: {}'.format(self.matching_strategy))<block_end><block_end><def_stmt>get_unmatched_tracks self<block_start>self.unmatched_tracks=get_unmatched(self.confirmed_tracks self.matches 0)<block_end><def_stmt>get_unmatched_detections self<block_start>self.unmatched_detections=get_unmatched(self.detection_indices self.matches 1 self.marginalizations)<block_end><def_stmt>max_match self<block_start>self.matches=[]<if_stmt>self.marginalizations.shape[0]<eq>0<block_start><return><block_end>detection_map={}<for_stmt>i,track_idx enumerate(self.confirmed_tracks)<block_start>marginalization=self.marginalizations[i :]<line_sep>detection_id=np.argmax(marginalization)-1# subtract one for dummy <if_stmt>detection_id<l>0<block_start><continue><block_end><if_stmt>detection_id<not><in>detection_map.keys()<block_start>detection_map[detection_id]=track_idx<block_end><else_stmt><block_start>cur_track=detection_map[detection_id]<line_sep>track_update=track_idx<if>self.marginalizations[track_idx detection_id]<g>self.marginalizations[cur_track detection_id]<else>cur_track<line_sep>detection_map[detection_id]=track_update<block_end>threshold_p=marginalization[detection_id+1]<if_stmt>threshold_p<l>self.assignment_threshold<block_start><continue><block_end><block_end><for_stmt>detection detection_map.keys()<block_start>self.matches.append((detection_map[detection] detection))<block_end><block_end><def_stmt>max_and_threshold_matching self<block_start>self.matches=[]<if_stmt>self.marginalizations.shape[0]<eq>0<block_start><return><block_end><for_stmt>i,track_idx enumerate(self.confirmed_tracks)<block_start>marginalization=self.marginalizations[i :]<line_sep>detection_id=np.argmax(marginalization)-1# subtract one for dummy <if_stmt>detection_id<l>0<block_start><continue><block_end>threshold_p=marginalization[detection_id+1]<if_stmt>threshold_p<l>self.assignment_threshold<block_start><continue><block_end>self.matches.append((track_idx detection_id))<block_end><block_end><def_stmt>hungarian self<block_start>self.matches,_,_=min_marg_matching(self.marginalizations self.confirmed_tracks self.assignment_threshold)<block_end><block_end>
database="sqlite_file:hcalPulse.db"<line_sep>tag="test"<line_sep>outputfile="hcalPulse_dbread.bbin"<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process('HcalInterpolatedPulseDBRead')<line_sep>process.source=cms.Source('EmptySource')<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(1))<line_sep>process.load("CondCore.CondDB.CondDB_cfi")<line_sep>process.CondDB.connect=database<line_sep>process.PoolDBESSource=cms.ESSource("PoolDBESSource" process.CondDB toGet=cms.VPSet(cms.PSet(record=cms.string("HcalInterpolatedPulseCollRcd") tag=cms.string(tag))))<line_sep>process.dumper=cms.EDAnalyzer('HcalInterpolatedPulseDBReader' outputFile=cms.string(outputfile))<line_sep>process.p=cms.Path(process.dumper)<line_sep>
<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>wagtail.documents get_document_model<import_from_stmt>._sync_storage_base SyncStorageCommandMixin<class_stmt>Command(SyncStorageCommandMixin BaseCommand)<block_start><def_stmt>get_storage_directories self<block_start><return>["documents"]<block_end><def_stmt>get_queryset self<block_start><return>get_document_model().objects.all()<block_end><block_end>
# This sample tests the use of `Self` when used within a property # or class property. <import_from_stmt>typing_extensions Self<class_stmt>A<block_start>@property<def_stmt>one self<arrow>Self<block_start><ellipsis><block_end>@classmethod@property<def_stmt>two cls<arrow>type[Self]<block_start><ellipsis><block_end><block_end><class_stmt>B(A)<block_start><ellipsis><block_end>reveal_type(A().one expected_text="A")<line_sep>reveal_type(A.two expected_text="Type[A]")<line_sep>reveal_type(B().one expected_text="B")<line_sep>reveal_type(B.two expected_text="Type[B]")<line_sep>
# Licensed under a 3-clause BSD style license - see LICENSE.rst <import_from_stmt>gammapy __version__<import_from_stmt>gammapy.scripts.main cli<import_from_stmt>gammapy.utils.testing run_cli<def_stmt>test_cli_no_args # No arguments should print help <block_start>result=run_cli(cli [])<assert_stmt>"Usage"<in>result.output<block_end><def_stmt>test_cli_help <block_start>result=run_cli(cli ["--help"])<assert_stmt>"Usage"<in>result.output<block_end><def_stmt>test_cli_version <block_start>result=run_cli(cli ["--version"])<assert_stmt>f"gammapy version {__version__}"<in>result.output<block_end><def_stmt>test_check_logging <block_start>result=run_cli(cli ["check" "logging"])<assert_stmt>f"output"<in>result.output<block_end>
# Copyright 2020 DeepMind Technologies Limited. # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # https://www.apache.org/licenses/LICENSE-2.0 # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. r"""A basic graphnet example. This example just explains the bare mechanics of the library. """<import_stmt>logging<import_from_stmt>absl app<import_stmt>jax<import_stmt>jraph<import_stmt>numpy<as>np<line_sep>MASK_BROKEN_MSG=("Support for jax.mask is currently broken. This is not a "<concat>"jraph error.")<def_stmt>run <block_start>"""Runs basic example."""<line_sep># Creating graph tuples. # Creates a GraphsTuple from scratch containing a single graph. # The graph has 3 nodes and 2 edges. # Each node has a 4-dimensional feature vector. # Each edge has a 5-dimensional feature vector. # The graph itself has a 6-dimensional feature vector. single_graph=jraph.GraphsTuple(n_node=np.asarray([3]) n_edge=np.asarray([2]) nodes=np.ones((3 4)) edges=np.ones((2 5)) globals=np.ones((1 6)) senders=np.array([0 1]) receivers=np.array([2 2]))<line_sep>logging.info("Single graph %r" single_graph)<line_sep># Creates a GraphsTuple from scratch containing a single graph with nested # feature vectors. # The graph has 3 nodes and 2 edges. # The feature vector can be arbitrary nested types of dict, list and tuple, # or any other type you registered with jax.tree_util.register_pytree_node. nested_graph=jraph.GraphsTuple(n_node=np.asarray([3]) n_edge=np.asarray([2]) nodes={"a":np.ones((3 4))} edges={"b":np.ones((2 5))} globals={"c":np.ones((1 6))} senders=np.array([0 1]) receivers=np.array([2 2]))<line_sep>logging.info("Nested graph %r" nested_graph)<line_sep># Creates a GraphsTuple from scratch containing a 2 graphs using an implicit # batch dimension. # The first graph has 3 nodes and 2 edges. # The second graph has 1 nodes and 1 edges. # Each node has a 4-dimensional feature vector. # Each edge has a 5-dimensional feature vector. # The graph itself has a 6-dimensional feature vector. implicitly_batched_graph=jraph.GraphsTuple(n_node=np.asarray([3 1]) n_edge=np.asarray([2 1]) nodes=np.ones((4 4)) edges=np.ones((3 5)) globals=np.ones((2 6)) senders=np.array([0 1 3]) receivers=np.array([2 2 3]))<line_sep>logging.info("Implicitly batched graph %r" implicitly_batched_graph)<line_sep># Batching graphs can be challenging. There are in general two approaches: # 1. Implicit batching: Independent graphs are combined into the same # GraphsTuple first, and the padding is added to the combined graph. # 2. Explicit batching: Pad all graphs to a maximum size, stack them together # using an explicit batch dimension followed by jax.vmap. # Both approaches are shown below. # Creates a GraphsTuple from two existing GraphsTuple using an implicit # batch dimension. # The GraphsTuple will contain three graphs. implicitly_batched_graph=jraph.batch([single_graph implicitly_batched_graph])<line_sep>logging.info("Implicitly batched graph %r" implicitly_batched_graph)<line_sep># Creates multiple GraphsTuples from an existing GraphsTuple with an implicit # batch dimension. graph_1,graph_2,graph_3=jraph.unbatch(implicitly_batched_graph)<line_sep>logging.info("Unbatched graphs %r %r %r" graph_1 graph_2 graph_3)<line_sep># Creates a padded GraphsTuple from an existing GraphsTuple. # The padded GraphsTuple will contain 10 nodes, 5 edges, and 4 graphs. # Three graphs are added for the padding. # First an dummy graph which contains the padding nodes and edges and secondly # two empty graphs without nodes or edges to pad out the graphs. padded_graph=jraph.pad_with_graphs(single_graph n_node=10 n_edge=5 n_graph=4)<line_sep>logging.info("Padded graph %r" padded_graph)<line_sep># Creates a GraphsTuple from an existing padded GraphsTuple. # The previously added padding is removed. single_graph=jraph.unpad_with_graphs(padded_graph)<line_sep>logging.info("Unpadded graph %r" single_graph)<line_sep># Creates a GraphsTuple containing a 2 graphs using an explicit batch # dimension. # An explicit batch dimension requires more memory, but can simplify # the definition of functions operating on the graph. # Explicitly batched graphs require the GraphNetwork to be transformed # by jax.vmap. # Using an explicit batch requires padding all feature vectors to # the maximum size of nodes and edges. # The first graph has 3 nodes and 2 edges. # The second graph has 1 nodes and 1 edges. # Each node has a 4-dimensional feature vector. # Each edge has a 5-dimensional feature vector. # The graph itself has a 6-dimensional feature vector. explicitly_batched_graph=jraph.GraphsTuple(n_node=np.asarray([[3] [1]]) n_edge=np.asarray([[2] [1]]) nodes=np.ones((2 3 4)) edges=np.ones((2 2 5)) globals=np.ones((2 1 6)) senders=np.array([[0 1] [0 -1]]) receivers=np.array([[2 2] [0 -1]]))<line_sep>logging.info("Explicitly batched graph %r" explicitly_batched_graph)<line_sep># Running a graph propagation steps. # First define the update functions for the edges, nodes and globals. # In this example we use the identity everywhere. # For Graph neural networks, each update function is typically a neural # network. <def_stmt>update_edge_fn edge_features sender_node_features receiver_node_features globals_<block_start>"""Returns the update edge features."""<del_stmt>sender_node_features<del_stmt>receiver_node_features<del_stmt>globals_<line_sep><return>edge_features<block_end><def_stmt>update_node_fn node_features aggregated_sender_edge_features aggregated_receiver_edge_features globals_<block_start>"""Returns the update node features."""<del_stmt>aggregated_sender_edge_features<del_stmt>aggregated_receiver_edge_features<del_stmt>globals_<line_sep><return>node_features<block_end><def_stmt>update_globals_fn aggregated_node_features aggregated_edge_features globals_<block_start><del_stmt>aggregated_node_features<del_stmt>aggregated_edge_features<line_sep><return>globals_<block_end># Optionally define custom aggregation functions. # In this example we use the defaults (so no need to define them explicitly). aggregate_edges_for_nodes_fn=jraph.segment_sum<line_sep>aggregate_nodes_for_globals_fn=jraph.segment_sum<line_sep>aggregate_edges_for_globals_fn=jraph.segment_sum<line_sep># Optionally define attention logit function and attention reduce function. # This can be used for graph attention. # The attention function calculates attention weights, and the apply # attention function calculates the new edge feature given the weights. # We don't use graph attention here, and just pass the defaults. attention_logit_fn=<none><line_sep>attention_reduce_fn=<none><line_sep># Creates a new GraphNetwork in its most general form. # Most of the arguments have defaults and can be omitted if a feature # is not used. # There are also predefined GraphNetworks available (see models.py) network=jraph.GraphNetwork(update_edge_fn=update_edge_fn update_node_fn=update_node_fn update_global_fn=update_globals_fn attention_logit_fn=attention_logit_fn aggregate_edges_for_nodes_fn=aggregate_edges_for_nodes_fn aggregate_nodes_for_globals_fn=aggregate_nodes_for_globals_fn aggregate_edges_for_globals_fn=aggregate_edges_for_globals_fn attention_reduce_fn=attention_reduce_fn)<line_sep># Runs graph propagation on (implicitly batched) graphs. updated_graph=network(single_graph)<line_sep>logging.info("Updated graph from single graph %r" updated_graph)<line_sep>updated_graph=network(nested_graph)<line_sep>logging.info("Updated graph from nested graph %r" nested_graph)<line_sep>updated_graph=network(implicitly_batched_graph)<line_sep>logging.info("Updated graph from implicitly batched graph %r" updated_graph)<line_sep>updated_graph=network(padded_graph)<line_sep>logging.info("Updated graph from padded graph %r" updated_graph)<line_sep># JIT-compile graph propagation. # Use padded graphs to avoid re-compilation at every step! jitted_network=jax.jit(network)<line_sep>updated_graph=jitted_network(padded_graph)<line_sep>logging.info("(JIT) updated graph from padded graph %r" updated_graph)<line_sep>logging.info("basic.py complete!")<block_end><def_stmt>main argv<block_start><if_stmt>len(argv)<g>1<block_start><raise>app.UsageError("Too many command-line arguments.")<block_end>run()<block_end><if_stmt>__name__<eq>"__main__"<block_start>app.run(main)<block_end>
<import_from_stmt>.StringEnum StringEnum<line_sep>
# Copyright 2016-present CERN – European Organization for Nuclear Research # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>qf_lib.backtesting.contract.contract Contract<import_from_stmt>qf_lib.backtesting.contract.contract_to_ticker_conversion.base ContractTickerMapper<import_from_stmt>qf_lib.common.tickers.tickers BloombergTicker<class_stmt>IB_Bloomberg_ContractTickerMapper(ContractTickerMapper)<block_start>""" BloombergTicker - IB Contract mapper that can be used for live trading. It is using the "SMART" exchange for all products Parameters ----------- bbg_suffix: str suffix added after the first part of the BBG ticker. For example: "US Equity", "PW Equity", etc security_type: str corresponds to the security type that is used to create Contract. For example: use "STK" for stocks, ETFs and ETNs, use "CMDTY" for commodities, use "BOND" for bonds use "OPT" for options use "FUT" for futures """<def_stmt>__init__ self bbg_suffix:str security_type:str<block_start>self.bbg_suffix=bbg_suffix<line_sep>self.security_type=security_type<block_end><def_stmt>contract_to_ticker self contract:Contract strictly_to_specific_ticker=<true><arrow>BloombergTicker<block_start><return>BloombergTicker(ticker=contract.symbol)<block_end><def_stmt>ticker_to_contract self ticker:BloombergTicker<arrow>Contract<block_start>split_ticker=ticker.ticker.split()<line_sep><return>Contract(symbol=split_ticker[0] security_type=self.security_type exchange="SMART")<block_end><block_end>
# Generated by Django 3.0.10 on 2020-10-17 00:14 <import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('reporting' '0015_auto_20201016_1756') ]<line_sep>operations=[migrations.RemoveField(model_name='report' name='template' ) migrations.AddField(model_name='report' name='docx_template' field=models.ForeignKey(help_text='Select the Word template to use for this report' limit_choices_to={'doc_type__iexact':'docx'} null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='reporttemplate_docx_set' to='reporting.ReportTemplate') ) migrations.AddField(model_name='report' name='pptx_template' field=models.ForeignKey(help_text='Select the PowerPoint template to use for this report' limit_choices_to={'doc_type__iexact':'pptx'} null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='reporttemplate_pptx_set' to='reporting.ReportTemplate') ) migrations.AlterField(model_name='finding' name='finding_guidance' field=models.TextField(blank=<true> help_text='Provide notes for your team that describes how the finding is intended to be used or edited during editing' null=<true> verbose_name='Finding Guidance') ) ]<block_end>
<import_from_stmt>typing Dict<import_from_stmt>typing List<import_from_stmt>botocore.paginate Paginator<class_stmt>ListAWSServiceAccessForOrganization(Paginator)<block_start><def_stmt>paginate self PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_aws_service_access_for_organization`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAWSServiceAccessForOrganization>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'EnabledServicePrincipals': [ { 'ServicePrincipal': 'string', 'DateEnabled': datetime(2015, 1, 1) }, ], } **Response Structure** - *(dict) --* - **EnabledServicePrincipals** *(list) --* A list of the service principals for the services that are enabled to integrate with your organization. Each principal is a structure that includes the name and the date that it was enabled for integration with AWS Organizations. - *(dict) --* A structure that contains details of a service principal that is enabled to integrate with AWS Organizations. - **ServicePrincipal** *(string) --* The name of the service principal. This is typically in the form of a URL, such as: `` *servicename* .amazonaws.com`` . - **DateEnabled** *(datetime) --* The date that the service principal was enabled for integration with AWS Organizations. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListAccounts(Paginator)<block_start><def_stmt>paginate self PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_accounts`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAccounts>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Accounts': [ { 'Id': 'string', 'Arn': 'string', 'Email': 'string', 'Name': 'string', 'Status': 'ACTIVE'|'SUSPENDED', 'JoinedMethod': 'INVITED'|'CREATED', 'JoinedTimestamp': datetime(2015, 1, 1) }, ], } **Response Structure** - *(dict) --* - **Accounts** *(list) --* A list of objects in the organization. - *(dict) --* Contains information about an AWS account that is a member of an organization. - **Id** *(string) --* The unique identifier (ID) of the account. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for an account ID string requires exactly 12 digits. - **Arn** *(string) --* The Amazon Resource Name (ARN) of the account. For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* . - **Email** *(string) --* The email address associated with the AWS account. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for this parameter is a string of characters that represents a standard Internet email address. - **Name** *(string) --* The friendly name of the account. The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range. - **Status** *(string) --* The status of the account in the organization. - **JoinedMethod** *(string) --* The method by which the account joined the organization. - **JoinedTimestamp** *(datetime) --* The date the account became a part of the organization. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListAccountsForParent(Paginator)<block_start><def_stmt>paginate self ParentId:str PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_accounts_for_parent`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListAccountsForParent>`_ **Request Syntax** :: response_iterator = paginator.paginate( ParentId='string', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Accounts': [ { 'Id': 'string', 'Arn': 'string', 'Email': 'string', 'Name': 'string', 'Status': 'ACTIVE'|'SUSPENDED', 'JoinedMethod': 'INVITED'|'CREATED', 'JoinedTimestamp': datetime(2015, 1, 1) }, ], } **Response Structure** - *(dict) --* - **Accounts** *(list) --* A list of the accounts in the specified root or OU. - *(dict) --* Contains information about an AWS account that is a member of an organization. - **Id** *(string) --* The unique identifier (ID) of the account. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for an account ID string requires exactly 12 digits. - **Arn** *(string) --* The Amazon Resource Name (ARN) of the account. For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* . - **Email** *(string) --* The email address associated with the AWS account. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for this parameter is a string of characters that represents a standard Internet email address. - **Name** *(string) --* The friendly name of the account. The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range. - **Status** *(string) --* The status of the account in the organization. - **JoinedMethod** *(string) --* The method by which the account joined the organization. - **JoinedTimestamp** *(datetime) --* The date the account became a part of the organization. :type ParentId: string :param ParentId: **[REQUIRED]** The unique identifier (ID) for the parent root or organization unit (OU) whose accounts you want to list. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListChildren(Paginator)<block_start><def_stmt>paginate self ParentId:str ChildType:str PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_children`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListChildren>`_ **Request Syntax** :: response_iterator = paginator.paginate( ParentId='string', ChildType='ACCOUNT'|'ORGANIZATIONAL_UNIT', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Children': [ { 'Id': 'string', 'Type': 'ACCOUNT'|'ORGANIZATIONAL_UNIT' }, ], } **Response Structure** - *(dict) --* - **Children** *(list) --* The list of children of the specified parent container. - *(dict) --* Contains a list of child entities, either OUs or accounts. - **Id** *(string) --* The unique identifier (ID) of this child entity. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a child ID string requires one of the following: * Account: a string that consists of exactly 12 digits. * Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits. - **Type** *(string) --* The type of this child entity. :type ParentId: string :param ParentId: **[REQUIRED]** The unique identifier (ID) for the parent root or OU whose children you want to list. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a parent ID string requires one of the following: * Root: a string that begins with \"r-\" followed by from 4 to 32 lower-case letters or digits. * Organizational unit (OU): a string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits. :type ChildType: string :param ChildType: **[REQUIRED]** Filters the output to include only the specified child type. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListCreateAccountStatus(Paginator)<block_start><def_stmt>paginate self States:List=<none> PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_create_account_status`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListCreateAccountStatus>`_ **Request Syntax** :: response_iterator = paginator.paginate( States=[ 'IN_PROGRESS'|'SUCCEEDED'|'FAILED', ], PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'CreateAccountStatuses': [ { 'Id': 'string', 'AccountName': 'string', 'State': 'IN_PROGRESS'|'SUCCEEDED'|'FAILED', 'RequestedTimestamp': datetime(2015, 1, 1), 'CompletedTimestamp': datetime(2015, 1, 1), 'AccountId': 'string', 'GovCloudAccountId': 'string', 'FailureReason': 'ACCOUNT_LIMIT_EXCEEDED'|'EMAIL_ALREADY_EXISTS'|'INVALID_ADDRESS'|'INVALID_EMAIL'|'CONCURRENT_ACCOUNT_MODIFICATION'|'INTERNAL_FAILURE' }, ], } **Response Structure** - *(dict) --* - **CreateAccountStatuses** *(list) --* A list of objects with details about the requests. Certain elements, such as the accountId number, are present in the output only after the account has been successfully created. - *(dict) --* Contains the status about a CreateAccount or CreateGovCloudAccount request to create an AWS account or an AWS GovCloud (US) account in an organization. - **Id** *(string) --* The unique identifier (ID) that references this request. You get this value from the response of the initial CreateAccount request to create the account. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for an create account request ID string requires "car-" followed by from 8 to 32 lower-case letters or digits. - **AccountName** *(string) --* The account name given to the account when it was created. - **State** *(string) --* The status of the request. - **RequestedTimestamp** *(datetime) --* The date and time that the request was made for the account creation. - **CompletedTimestamp** *(datetime) --* The date and time that the account was created and the request completed. - **AccountId** *(string) --* If the account was created successfully, the unique identifier (ID) of the new account. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for an account ID string requires exactly 12 digits. - **GovCloudAccountId** *(string) --* - **FailureReason** *(string) --* If the request failed, a description of the reason for the failure. * ACCOUNT_LIMIT_EXCEEDED: The account could not be created because you have reached the limit on the number of accounts in your organization. * EMAIL_ALREADY_EXISTS: The account could not be created because another AWS account with that email address already exists. * INVALID_ADDRESS: The account could not be created because the address you provided is not valid. * INVALID_EMAIL: The account could not be created because the email address you provided is not valid. * INTERNAL_FAILURE: The account could not be created because of an internal failure. Try again later. If the problem persists, contact Customer Support. :type States: list :param States: A list of one or more states that you want included in the response. If this parameter is not present, then all requests are included in the response. - *(string) --* :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListHandshakesForAccount(Paginator)<block_start><def_stmt>paginate self Filter:Dict=<none> PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_handshakes_for_account`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForAccount>`_ **Request Syntax** :: response_iterator = paginator.paginate( Filter={ 'ActionType': 'INVITE'|'ENABLE_ALL_FEATURES'|'APPROVE_ALL_FEATURES'|'ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE', 'ParentHandshakeId': 'string' }, PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Handshakes': [ { 'Id': 'string', 'Arn': 'string', 'Parties': [ { 'Id': 'string', 'Type': 'ACCOUNT'|'ORGANIZATION'|'EMAIL' }, ], 'State': 'REQUESTED'|'OPEN'|'CANCELED'|'ACCEPTED'|'DECLINED'|'EXPIRED', 'RequestedTimestamp': datetime(2015, 1, 1), 'ExpirationTimestamp': datetime(2015, 1, 1), 'Action': 'INVITE'|'ENABLE_ALL_FEATURES'|'APPROVE_ALL_FEATURES'|'ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE', 'Resources': [ { 'Value': 'string', 'Type': 'ACCOUNT'|'ORGANIZATION'|'ORGANIZATION_FEATURE_SET'|'EMAIL'|'MASTER_EMAIL'|'MASTER_NAME'|'NOTES'|'PARENT_HANDSHAKE', 'Resources': {'... recursive ...'} }, ] }, ], } **Response Structure** - *(dict) --* - **Handshakes** *(list) --* A list of Handshake objects with details about each of the handshakes that is associated with the specified account. - *(dict) --* Contains information that must be exchanged to securely establish a relationship between two accounts (an *originator* and a *recipient* ). For example, when a master account (the originator) invites another account (the recipient) to join its organization, the two accounts exchange information as a series of handshake requests and responses. **Note:** Handshakes that are CANCELED, ACCEPTED, or DECLINED show up in lists for only 30 days after entering that state After that they are deleted. - **Id** *(string) --* The unique identifier (ID) of a handshake. The originating account creates the ID when it initiates the handshake. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires "h-" followed by from 8 to 32 lower-case letters or digits. - **Arn** *(string) --* The Amazon Resource Name (ARN) of a handshake. For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* . - **Parties** *(list) --* Information about the two accounts that are participating in the handshake. - *(dict) --* Identifies a participant in a handshake. - **Id** *(string) --* The unique identifier (ID) for the party. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires "h-" followed by from 8 to 32 lower-case letters or digits. - **Type** *(string) --* The type of party. - **State** *(string) --* The current state of the handshake. Use the state to trace the flow of the handshake through the process from its creation to its acceptance. The meaning of each of the valid values is as follows: * **REQUESTED** : This handshake was sent to multiple recipients (applicable to only some handshake types) and not all recipients have responded yet. The request stays in this state until all recipients respond. * **OPEN** : This handshake was sent to multiple recipients (applicable to only some policy types) and all recipients have responded, allowing the originator to complete the handshake action. * **CANCELED** : This handshake is no longer active because it was canceled by the originating account. * **ACCEPTED** : This handshake is complete because it has been accepted by the recipient. * **DECLINED** : This handshake is no longer active because it was declined by the recipient account. * **EXPIRED** : This handshake is no longer active because the originator did not receive a response of any kind from the recipient before the expiration time (15 days). - **RequestedTimestamp** *(datetime) --* The date and time that the handshake request was made. - **ExpirationTimestamp** *(datetime) --* The date and time that the handshake expires. If the recipient of the handshake request fails to respond before the specified date and time, the handshake becomes inactive and is no longer valid. - **Action** *(string) --* The type of handshake, indicating what action occurs when the recipient accepts the handshake. The following handshake types are supported: * **INVITE** : This type of handshake represents a request to join an organization. It is always sent from the master account to only non-member accounts. * **ENABLE_ALL_FEATURES** : This type of handshake represents a request to enable all features in an organization. It is always sent from the master account to only *invited* member accounts. Created accounts do not receive this because those accounts were created by the organization's master account and approval is inferred. * **APPROVE_ALL_FEATURES** : This type of handshake is sent from the Organizations service when all member accounts have approved the ``ENABLE_ALL_FEATURES`` invitation. It is sent only to the master account and signals the master that it can finalize the process to enable all features. - **Resources** *(list) --* Additional information that is needed to process the handshake. - *(dict) --* Contains additional data that is needed to process a handshake. - **Value** *(string) --* The information that is passed to the other party in the handshake. The format of the value string must match the requirements of the specified type. - **Type** *(string) --* The type of information being passed, specifying how the value is to be interpreted by the other party: * ``ACCOUNT`` - Specifies an AWS account ID number. * ``ORGANIZATION`` - Specifies an organization ID number. * ``EMAIL`` - Specifies the email address that is associated with the account that receives the handshake. * ``OWNER_EMAIL`` - Specifies the email address associated with the master account. Included as information about an organization. * ``OWNER_NAME`` - Specifies the name associated with the master account. Included as information about an organization. * ``NOTES`` - Additional text provided by the handshake initiator and intended for the recipient to read. - **Resources** *(list) --* When needed, contains an additional array of ``HandshakeResource`` objects. :type Filter: dict :param Filter: Filters the handshakes that you want included in the response. The default is all types. Use the ``ActionType`` element to limit the output to only a specified type, such as ``INVITE`` , ``ENABLE_ALL_FEATURES`` , or ``APPROVE_ALL_FEATURES`` . Alternatively, for the ``ENABLE_ALL_FEATURES`` handshake that generates a separate child handshake for each member account, you can specify ``ParentHandshakeId`` to see only the handshakes that were generated by that parent request. - **ActionType** *(string) --* Specifies the type of handshake action. If you specify ``ActionType`` , you cannot also specify ``ParentHandshakeId`` . - **ParentHandshakeId** *(string) --* Specifies the parent handshake. Only used for handshake types that are a child of another type. If you specify ``ParentHandshakeId`` , you cannot also specify ``ActionType`` . The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires \"h-\" followed by from 8 to 32 lower-case letters or digits. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListHandshakesForOrganization(Paginator)<block_start><def_stmt>paginate self Filter:Dict=<none> PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_handshakes_for_organization`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListHandshakesForOrganization>`_ **Request Syntax** :: response_iterator = paginator.paginate( Filter={ 'ActionType': 'INVITE'|'ENABLE_ALL_FEATURES'|'APPROVE_ALL_FEATURES'|'ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE', 'ParentHandshakeId': 'string' }, PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Handshakes': [ { 'Id': 'string', 'Arn': 'string', 'Parties': [ { 'Id': 'string', 'Type': 'ACCOUNT'|'ORGANIZATION'|'EMAIL' }, ], 'State': 'REQUESTED'|'OPEN'|'CANCELED'|'ACCEPTED'|'DECLINED'|'EXPIRED', 'RequestedTimestamp': datetime(2015, 1, 1), 'ExpirationTimestamp': datetime(2015, 1, 1), 'Action': 'INVITE'|'ENABLE_ALL_FEATURES'|'APPROVE_ALL_FEATURES'|'ADD_ORGANIZATIONS_SERVICE_LINKED_ROLE', 'Resources': [ { 'Value': 'string', 'Type': 'ACCOUNT'|'ORGANIZATION'|'ORGANIZATION_FEATURE_SET'|'EMAIL'|'MASTER_EMAIL'|'MASTER_NAME'|'NOTES'|'PARENT_HANDSHAKE', 'Resources': {'... recursive ...'} }, ] }, ], } **Response Structure** - *(dict) --* - **Handshakes** *(list) --* A list of Handshake objects with details about each of the handshakes that are associated with an organization. - *(dict) --* Contains information that must be exchanged to securely establish a relationship between two accounts (an *originator* and a *recipient* ). For example, when a master account (the originator) invites another account (the recipient) to join its organization, the two accounts exchange information as a series of handshake requests and responses. **Note:** Handshakes that are CANCELED, ACCEPTED, or DECLINED show up in lists for only 30 days after entering that state After that they are deleted. - **Id** *(string) --* The unique identifier (ID) of a handshake. The originating account creates the ID when it initiates the handshake. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires "h-" followed by from 8 to 32 lower-case letters or digits. - **Arn** *(string) --* The Amazon Resource Name (ARN) of a handshake. For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* . - **Parties** *(list) --* Information about the two accounts that are participating in the handshake. - *(dict) --* Identifies a participant in a handshake. - **Id** *(string) --* The unique identifier (ID) for the party. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires "h-" followed by from 8 to 32 lower-case letters or digits. - **Type** *(string) --* The type of party. - **State** *(string) --* The current state of the handshake. Use the state to trace the flow of the handshake through the process from its creation to its acceptance. The meaning of each of the valid values is as follows: * **REQUESTED** : This handshake was sent to multiple recipients (applicable to only some handshake types) and not all recipients have responded yet. The request stays in this state until all recipients respond. * **OPEN** : This handshake was sent to multiple recipients (applicable to only some policy types) and all recipients have responded, allowing the originator to complete the handshake action. * **CANCELED** : This handshake is no longer active because it was canceled by the originating account. * **ACCEPTED** : This handshake is complete because it has been accepted by the recipient. * **DECLINED** : This handshake is no longer active because it was declined by the recipient account. * **EXPIRED** : This handshake is no longer active because the originator did not receive a response of any kind from the recipient before the expiration time (15 days). - **RequestedTimestamp** *(datetime) --* The date and time that the handshake request was made. - **ExpirationTimestamp** *(datetime) --* The date and time that the handshake expires. If the recipient of the handshake request fails to respond before the specified date and time, the handshake becomes inactive and is no longer valid. - **Action** *(string) --* The type of handshake, indicating what action occurs when the recipient accepts the handshake. The following handshake types are supported: * **INVITE** : This type of handshake represents a request to join an organization. It is always sent from the master account to only non-member accounts. * **ENABLE_ALL_FEATURES** : This type of handshake represents a request to enable all features in an organization. It is always sent from the master account to only *invited* member accounts. Created accounts do not receive this because those accounts were created by the organization's master account and approval is inferred. * **APPROVE_ALL_FEATURES** : This type of handshake is sent from the Organizations service when all member accounts have approved the ``ENABLE_ALL_FEATURES`` invitation. It is sent only to the master account and signals the master that it can finalize the process to enable all features. - **Resources** *(list) --* Additional information that is needed to process the handshake. - *(dict) --* Contains additional data that is needed to process a handshake. - **Value** *(string) --* The information that is passed to the other party in the handshake. The format of the value string must match the requirements of the specified type. - **Type** *(string) --* The type of information being passed, specifying how the value is to be interpreted by the other party: * ``ACCOUNT`` - Specifies an AWS account ID number. * ``ORGANIZATION`` - Specifies an organization ID number. * ``EMAIL`` - Specifies the email address that is associated with the account that receives the handshake. * ``OWNER_EMAIL`` - Specifies the email address associated with the master account. Included as information about an organization. * ``OWNER_NAME`` - Specifies the name associated with the master account. Included as information about an organization. * ``NOTES`` - Additional text provided by the handshake initiator and intended for the recipient to read. - **Resources** *(list) --* When needed, contains an additional array of ``HandshakeResource`` objects. :type Filter: dict :param Filter: A filter of the handshakes that you want included in the response. The default is all types. Use the ``ActionType`` element to limit the output to only a specified type, such as ``INVITE`` , ``ENABLE-ALL-FEATURES`` , or ``APPROVE-ALL-FEATURES`` . Alternatively, for the ``ENABLE-ALL-FEATURES`` handshake that generates a separate child handshake for each member account, you can specify the ``ParentHandshakeId`` to see only the handshakes that were generated by that parent request. - **ActionType** *(string) --* Specifies the type of handshake action. If you specify ``ActionType`` , you cannot also specify ``ParentHandshakeId`` . - **ParentHandshakeId** *(string) --* Specifies the parent handshake. Only used for handshake types that are a child of another type. If you specify ``ParentHandshakeId`` , you cannot also specify ``ActionType`` . The `regex pattern <http://wikipedia.org/wiki/regex>`__ for handshake ID string requires \"h-\" followed by from 8 to 32 lower-case letters or digits. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListOrganizationalUnitsForParent(Paginator)<block_start><def_stmt>paginate self ParentId:str PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_organizational_units_for_parent`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListOrganizationalUnitsForParent>`_ **Request Syntax** :: response_iterator = paginator.paginate( ParentId='string', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'OrganizationalUnits': [ { 'Id': 'string', 'Arn': 'string', 'Name': 'string' }, ], } **Response Structure** - *(dict) --* - **OrganizationalUnits** *(list) --* A list of the OUs in the specified root or parent OU. - *(dict) --* Contains details about an organizational unit (OU). An OU is a container of AWS accounts within a root of an organization. Policies that are attached to an OU apply to all accounts contained in that OU and in any child OUs. - **Id** *(string) --* The unique identifier (ID) associated with this OU. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for an organizational unit ID string requires "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits. - **Arn** *(string) --* The Amazon Resource Name (ARN) of this OU. For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* . - **Name** *(string) --* The friendly name of this OU. The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range. :type ParentId: string :param ParentId: **[REQUIRED]** The unique identifier (ID) of the root or OU whose child OUs you want to list. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a parent ID string requires one of the following: * Root: a string that begins with \"r-\" followed by from 4 to 32 lower-case letters or digits. * Organizational unit (OU): a string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListParents(Paginator)<block_start><def_stmt>paginate self ChildId:str PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_parents`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListParents>`_ **Request Syntax** :: response_iterator = paginator.paginate( ChildId='string', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Parents': [ { 'Id': 'string', 'Type': 'ROOT'|'ORGANIZATIONAL_UNIT' }, ], } **Response Structure** - *(dict) --* - **Parents** *(list) --* A list of parents for the specified child account or OU. - *(dict) --* Contains information about either a root or an organizational unit (OU) that can contain OUs or accounts in an organization. - **Id** *(string) --* The unique identifier (ID) of the parent entity. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a parent ID string requires one of the following: * Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits. * Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits. - **Type** *(string) --* The type of the parent entity. :type ChildId: string :param ChildId: **[REQUIRED]** The unique identifier (ID) of the OU or account whose parent containers you want to list. Do not specify a root. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a child ID string requires one of the following: * Account: a string that consists of exactly 12 digits. * Organizational unit (OU): a string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that contains the OU) followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListPolicies(Paginator)<block_start><def_stmt>paginate self Filter:str PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_policies`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPolicies>`_ **Request Syntax** :: response_iterator = paginator.paginate( Filter='SERVICE_CONTROL_POLICY', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Policies': [ { 'Id': 'string', 'Arn': 'string', 'Name': 'string', 'Description': 'string', 'Type': 'SERVICE_CONTROL_POLICY', 'AwsManaged': True|False }, ], } **Response Structure** - *(dict) --* - **Policies** *(list) --* A list of policies that match the filter criteria in the request. The output list does not include the policy contents. To see the content for a policy, see DescribePolicy . - *(dict) --* Contains information about a policy, but does not include the content. To see the content of a policy, see DescribePolicy . - **Id** *(string) --* The unique identifier (ID) of the policy. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a policy ID string requires "p-" followed by from 8 to 128 lower-case letters or digits. - **Arn** *(string) --* The Amazon Resource Name (ARN) of the policy. For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* . - **Name** *(string) --* The friendly name of the policy. The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range. - **Description** *(string) --* The description of the policy. - **Type** *(string) --* The type of policy. - **AwsManaged** *(boolean) --* A boolean value that indicates whether the specified policy is an AWS managed policy. If true, then you can attach the policy to roots, OUs, or accounts, but you cannot edit it. :type Filter: string :param Filter: **[REQUIRED]** Specifies the type of policy that you want to include in the response. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListPoliciesForTarget(Paginator)<block_start><def_stmt>paginate self TargetId:str Filter:str PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_policies_for_target`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListPoliciesForTarget>`_ **Request Syntax** :: response_iterator = paginator.paginate( TargetId='string', Filter='SERVICE_CONTROL_POLICY', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Policies': [ { 'Id': 'string', 'Arn': 'string', 'Name': 'string', 'Description': 'string', 'Type': 'SERVICE_CONTROL_POLICY', 'AwsManaged': True|False }, ], } **Response Structure** - *(dict) --* - **Policies** *(list) --* The list of policies that match the criteria in the request. - *(dict) --* Contains information about a policy, but does not include the content. To see the content of a policy, see DescribePolicy . - **Id** *(string) --* The unique identifier (ID) of the policy. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a policy ID string requires "p-" followed by from 8 to 128 lower-case letters or digits. - **Arn** *(string) --* The Amazon Resource Name (ARN) of the policy. For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* . - **Name** *(string) --* The friendly name of the policy. The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range. - **Description** *(string) --* The description of the policy. - **Type** *(string) --* The type of policy. - **AwsManaged** *(boolean) --* A boolean value that indicates whether the specified policy is an AWS managed policy. If true, then you can attach the policy to roots, OUs, or accounts, but you cannot edit it. :type TargetId: string :param TargetId: **[REQUIRED]** The unique identifier (ID) of the root, organizational unit, or account whose policies you want to list. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a target ID string requires one of the following: * Root: a string that begins with \"r-\" followed by from 4 to 32 lower-case letters or digits. * Account: a string that consists of exactly 12 digits. * Organizational unit (OU): a string that begins with \"ou-\" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second \"-\" dash and from 8 to 32 additional lower-case letters or digits. :type Filter: string :param Filter: **[REQUIRED]** The type of policy that you want to include in the returned list. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListRoots(Paginator)<block_start><def_stmt>paginate self PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_roots`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListRoots>`_ **Request Syntax** :: response_iterator = paginator.paginate( PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Roots': [ { 'Id': 'string', 'Arn': 'string', 'Name': 'string', 'PolicyTypes': [ { 'Type': 'SERVICE_CONTROL_POLICY', 'Status': 'ENABLED'|'PENDING_ENABLE'|'PENDING_DISABLE' }, ] }, ], } **Response Structure** - *(dict) --* - **Roots** *(list) --* A list of roots that are defined in an organization. - *(dict) --* Contains details about a root. A root is a top-level parent node in the hierarchy of an organization that can contain organizational units (OUs) and accounts. Every root contains every AWS account in the organization. Each root enables the accounts to be organized in a different way and to have different policy types enabled for use in that root. - **Id** *(string) --* The unique identifier (ID) for the root. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a root ID string requires "r-" followed by from 4 to 32 lower-case letters or digits. - **Arn** *(string) --* The Amazon Resource Name (ARN) of the root. For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* . - **Name** *(string) --* The friendly name of the root. The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range. - **PolicyTypes** *(list) --* The types of policies that are currently enabled for the root and therefore can be attached to the root or to its OUs or accounts. .. note:: Even if a policy type is shown as available in the organization, you can separately enable and disable them at the root level by using EnablePolicyType and DisablePolicyType . Use DescribeOrganization to see the availability of the policy types in that organization. - *(dict) --* Contains information about a policy type and its status in the associated root. - **Type** *(string) --* The name of the policy type. - **Status** *(string) --* The status of the policy type as it relates to the associated root. To attach a policy of the specified type to a root or to an OU or account in that root, it must be available in the organization and enabled for that root. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end><class_stmt>ListTargetsForPolicy(Paginator)<block_start><def_stmt>paginate self PolicyId:str PaginationConfig:Dict=<none><arrow>Dict<block_start>""" Creates an iterator that will paginate through responses from :py:meth:`Organizations.Client.list_targets_for_policy`. See also: `AWS API Documentation <https://docs.aws.amazon.com/goto/WebAPI/organizations-2016-11-28/ListTargetsForPolicy>`_ **Request Syntax** :: response_iterator = paginator.paginate( PolicyId='string', PaginationConfig={ 'MaxItems': 123, 'PageSize': 123, 'StartingToken': 'string' } ) **Response Syntax** :: { 'Targets': [ { 'TargetId': 'string', 'Arn': 'string', 'Name': 'string', 'Type': 'ACCOUNT'|'ORGANIZATIONAL_UNIT'|'ROOT' }, ], } **Response Structure** - *(dict) --* - **Targets** *(list) --* A list of structures, each of which contains details about one of the entities to which the specified policy is attached. - *(dict) --* Contains information about a root, OU, or account that a policy is attached to. - **TargetId** *(string) --* The unique identifier (ID) of the policy target. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a target ID string requires one of the following: * Root: a string that begins with "r-" followed by from 4 to 32 lower-case letters or digits. * Account: a string that consists of exactly 12 digits. * Organizational unit (OU): a string that begins with "ou-" followed by from 4 to 32 lower-case letters or digits (the ID of the root that the OU is in) followed by a second "-" dash and from 8 to 32 additional lower-case letters or digits. - **Arn** *(string) --* The Amazon Resource Name (ARN) of the policy target. For more information about ARNs in Organizations, see `ARN Formats Supported by Organizations <https://docs.aws.amazon.com/organizations/latest/userguide/orgs_permissions.html#orgs-permissions-arns>`__ in the *AWS Organizations User Guide* . - **Name** *(string) --* The friendly name of the policy target. The `regex pattern <http://wikipedia.org/wiki/regex>`__ that is used to validate this parameter is a string of any of the characters in the ASCII character range. - **Type** *(string) --* The type of the policy target. :type PolicyId: string :param PolicyId: **[REQUIRED]** The unique identifier (ID) of the policy for which you want to know its attachments. The `regex pattern <http://wikipedia.org/wiki/regex>`__ for a policy ID string requires \"p-\" followed by from 8 to 128 lower-case letters or digits. :type PaginationConfig: dict :param PaginationConfig: A dictionary that provides parameters to control pagination. - **MaxItems** *(integer) --* The total number of items to return. If the total number of items available is more than the value specified in max-items then a ``NextToken`` will be provided in the output that you can use to resume pagination. - **PageSize** *(integer) --* The size of each page. - **StartingToken** *(string) --* A token to specify where to start paginating. This is the ``NextToken`` from a previous response. :rtype: dict :returns: """<line_sep><pass><block_end><block_end>
<import_stmt>os<import_stmt>shutil<import_stmt>pytest<import_from_stmt>. settings<line_sep>@pytest.yield_fixture(scope='session' autouse=<true>)<def_stmt>empty_media <block_start>""" Removes the directories inside the MEDIA_ROOT that could have been filled during tests. """<line_sep><yield><for_stmt>candidate os.listdir(settings.MEDIA_ROOT)<block_start>path=os.path.join(settings.MEDIA_ROOT candidate)<try_stmt><block_start>shutil.rmtree(path)<block_end><except_stmt>OSError<block_start><pass><block_end><block_end><block_end>
<import_from_stmt>builtins range<import_stmt>json<import_stmt>falcon<import_from_stmt>mock MagicMock<import_from_stmt>ddt ddt data<import_from_stmt>tests RestTestBase<import_from_stmt>monitorrent.rest.execute_logs ExecuteLogs<class_stmt>ExecuteLogsTest(RestTestBase)<block_start><def_stmt>test_get_all self<block_start>entries=[{} {} {}]<line_sep>count=3<line_sep>log_manager=MagicMock()<line_sep>log_manager.get_log_entries=MagicMock(return_value=(entries count))<line_sep># noinspection PyTypeChecker execute_logs=ExecuteLogs(log_manager)<line_sep>self.api.add_route('/api/execute/logs' execute_logs)<line_sep>body=self.simulate_request('/api/execute/logs' query_string='take=10' decode='utf-8')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_OK)<line_sep>self.assertTrue('application/json'<in>self.srmock.headers_dict['Content-Type'])<line_sep>result=json.loads(body)<line_sep>self.assertEqual(entries result['data'])<line_sep>self.assertEqual(count result['count'])<block_end><def_stmt>test_get_paged self# count should be less than 30 <block_start>count=23<line_sep>entries=[{'i':i}<for>i range(count)]<def_stmt>get_log_entries skip take<block_start><return>entries[skip:skip+take] count<block_end>log_manager=MagicMock()<line_sep>log_manager.get_log_entries=MagicMock(side_effect=get_log_entries)<line_sep># noinspection PyTypeChecker execute_logs=ExecuteLogs(log_manager)<line_sep>self.api.add_route('/api/execute/logs' execute_logs)<line_sep>body=self.simulate_request('/api/execute/logs' query_string='take=10' decode='utf-8')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_OK)<line_sep>self.assertTrue('application/json'<in>self.srmock.headers_dict['Content-Type'])<line_sep>result=json.loads(body)<line_sep>self.assertEqual(entries[0:10] result['data'])<line_sep>self.assertEqual(count result['count'])<line_sep>body=self.simulate_request('/api/execute/logs' query_string='take=10&skip=0' decode='utf-8')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_OK)<line_sep>self.assertTrue('application/json'<in>self.srmock.headers_dict['Content-Type'])<line_sep>result=json.loads(body)<line_sep>self.assertEqual(entries[0:10] result['data'])<line_sep>self.assertEqual(count result['count'])<line_sep>body=self.simulate_request('/api/execute/logs' query_string='take=10&skip=10' decode='utf-8')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_OK)<line_sep>self.assertTrue('application/json'<in>self.srmock.headers_dict['Content-Type'])<line_sep>result=json.loads(body)<line_sep>self.assertEqual(entries[10:20] result['data'])<line_sep>self.assertEqual(count result['count'])<line_sep>body=self.simulate_request('/api/execute/logs' query_string='take=10&skip=20' decode='utf-8')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_OK)<line_sep>self.assertTrue('application/json'<in>self.srmock.headers_dict['Content-Type'])<line_sep>result=json.loads(body)<line_sep># assume that count is less then 30 self.assertEqual(entries[20:count] result['data'])<line_sep>self.assertEqual(count result['count'])<block_end><def_stmt>test_bad_requests self<block_start>entries=[{} {} {}]<line_sep>count=3<line_sep>log_manager=MagicMock()<line_sep>log_manager.get_log_entries=MagicMock(return_value=(entries count))<line_sep># noinspection PyTypeChecker execute_logs=ExecuteLogs(log_manager)<line_sep>self.api.add_route('/api/execute/logs' execute_logs)<line_sep>self.simulate_request('/api/execute/logs')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_BAD_REQUEST 'take is required')<line_sep>self.simulate_request('/api/execute/logs' query_string='take=abcd')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_BAD_REQUEST 'take should be int')<line_sep>self.simulate_request('/api/execute/logs' query_string='take=10&skip=abcd')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_BAD_REQUEST 'skip should be int')<line_sep>self.simulate_request('/api/execute/logs' query_string='take=101')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_BAD_REQUEST 'take should be less or equal to 100')<line_sep>self.simulate_request('/api/execute/logs' query_string='take=-10')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_BAD_REQUEST 'take should be greater than 0')<line_sep>self.simulate_request('/api/execute/logs' query_string='take=0')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_BAD_REQUEST 'take should be greater than 0')<line_sep>self.simulate_request('/api/execute/logs' query_string='take=10&skip=-1')<line_sep>self.assertEqual(self.srmock.status falcon.HTTP_BAD_REQUEST 'skip should be greater or equal to 0')<block_end><block_end>
#/u/GoldenSights <import_stmt>praw# simple interface to the reddit API, also handles rate limiting of requests <import_stmt>time<import_stmt>sqlite3<line_sep>'''USER CONFIGURATION'''<line_sep>APP_ID=""<line_sep>APP_SECRET=""<line_sep>APP_URI=""<line_sep>APP_REFRESH=""<line_sep># https://www.reddit.com/comments/3cm1p8/how_to_make_your_bot_use_oauth2/ USERAGENT=""<line_sep>#This is a short description of what the bot does. For example "/u/GoldenSights' Newsletter bot" SUBREDDIT="GoldTesting"<line_sep>#This is the sub or list of subs to scan for new posts. For a single sub, use "sub1". For multiple subreddits, use "sub1+sub2+sub3+..." PARENTSTRING="beetlejuice"<line_sep>#This is the string you're looking for REPLYSTRING="We are deeply sorry, but <NAME> can't join you in this comment thread right now. Would you like to leave a message?"<line_sep>#This will be put in reply DEPTHREQ=3<line_sep>#How many comments down to take action MAXPOSTS=100<line_sep>#This is how many posts you want to retrieve all at once. PRAW can download 100 at a time. WAIT=20<line_sep>#This is how many seconds you will wait between cycles. The bot is completely inactive during this time. '''All done!'''<line_sep>WAITS=str(WAIT)<try_stmt><block_start><import_stmt>bot<line_sep>USERAGENT=bot.aG<block_end><except_stmt>ImportError<block_start><pass><block_end>sql=sqlite3.connect('sql.db')<line_sep>print('Loaded SQL Database')<line_sep>cur=sql.cursor()<line_sep>cur.execute('CREATE TABLE IF NOT EXISTS oldposts(ID TEXT, DEPTH INT)')<line_sep>cur.execute('CREATE INDEX IF NOT EXISTS oldpost_index ON oldposts(id)')<line_sep>print('Loaded Completed table')<line_sep>sql.commit()<line_sep>r=praw.Reddit(USERAGENT)<line_sep>r.set_oauth_app_info(APP_ID APP_SECRET APP_URI)<line_sep>r.refresh_access_information(APP_REFRESH)<def_stmt>scanSub <block_start>print('Scanning '+SUBREDDIT)<line_sep>subreddit=r.get_subreddit(SUBREDDIT)<line_sep>comments=list(subreddit.get_comments(limit=MAXPOSTS))<line_sep>comments.reverse()<for_stmt>comment comments<block_start>cid=comment.fullname<line_sep>cur.execute('SELECT * FROM oldposts WHERE ID=?' [cid])<if_stmt><not>cur.fetchone()<block_start><try_stmt><block_start>cauthor=comment.author.name<if_stmt>cauthor.lower()<ne>r.user.name.lower()<block_start>cbody=comment.body.lower()<if_stmt>PARENTSTRING.lower()<in>cbody<block_start><if_stmt>'t3_'<in>comment.parent_id#is a root comment on the post <block_start>cdepth=0<block_end><else_stmt><block_start>cur.execute('SELECT * FROM oldposts WHERE ID=?' [comment.parent_id])<line_sep>fetch=cur.fetchone()<if_stmt><not>fetch<block_start>cdepth=0<block_end><else_stmt><block_start>cdepth=fetch[1]+1<block_end><block_end>print(cid '- Depth:' cdepth)<if_stmt>cdepth<ge>DEPTHREQ-1<block_start>print('\tAttempting to reply')<line_sep>cur.execute('SELECT * FROM oldposts WHERE ID=?' [comment.link_id])<if_stmt>cur.fetchone()<block_start>print('\tAlready posted in this thread')<block_end><else_stmt><block_start>comment.reply(REPLYSTRING)<line_sep>print('\tSuccess')<line_sep>cur.execute('INSERT INTO oldposts VALUES(?, ?)' [comment.link_id 0])<block_end><block_end><block_end><else_stmt>#Does not contain interest <block_start>cdepth=-1<line_sep>print(cid '- Depth:' cdepth)<block_end><block_end><else_stmt>#Will not reply to self <block_start>cdepth=-1<line_sep><pass><block_end><block_end><except_stmt>AttributeError#Author is deleted <block_start>cdepth=0<block_end>cur.execute('INSERT INTO oldposts VALUES(?, ?)' [cid cdepth])<block_end>sql.commit()<block_end><block_end><while_stmt><true><block_start><try_stmt><block_start>scanSub()<block_end><except_stmt>Exception<as>e<block_start>print('An error has occured:' e)<block_end>print('Running again in '+WAITS+' seconds \n')<line_sep>sql.commit()<line_sep>time.sleep(WAIT)<block_end>
""" Apis for comment. """<import_from_stmt>typing Dict Optional Union<import_stmt>pyfacebook.utils.constant<as>const<import_from_stmt>pyfacebook.api.base_resource BaseResource<import_from_stmt>pyfacebook.models.ig_business_models IgBusComment IgBusReply IgBusReplies<import_from_stmt>pyfacebook.utils.params_utils enf_comma_separated<class_stmt>IGBusinessComment(BaseResource)<block_start><def_stmt>get_info self comment_id:str fields:Optional[Union[str list tuple]]=<none> return_json:bool=<false> <arrow>Union[IgBusComment dict]<block_start>""" Get information about a Business comment. :param comment_id: ID for Comment. :param fields: Comma-separated id string for data fields which you want. You can also pass this with an id list, tuple. :param return_json: Set to false will return a dataclass for IgBusComment. Or return json data. Default is false. :return: Business comment information. """<if_stmt>fields<is><none><block_start>fields=const.IG_BUSINESS_MEDIA_PUBLIC_FIELDS<block_end>data=self.client.get_object(object_id=comment_id fields=enf_comma_separated(field="fields" value=fields) )<if_stmt>return_json<block_start><return>data<block_end><else_stmt><block_start><return>IgBusComment.new_from_json_dict(data=data)<block_end><block_end><def_stmt>get_batch self ids:Optional[Union[str list tuple]] fields:Optional[Union[str list tuple]]=<none> return_json:bool=<false> <arrow>Union[Dict[str IgBusComment] dict]<block_start>""" Get batch business comment information by ids :param ids: IDs for the comments. :param fields: Comma-separated id string for data fields which you want. You can also pass this with an id list, tuple. :param return_json: Set to false will return a dict of dataclass for IgBusComment. Or return json data. Default is false. :return: Business medias information. """<line_sep>ids=enf_comma_separated(field="ids" value=ids)<if_stmt>fields<is><none><block_start>fields=const.IG_BUSINESS_COMMENT_PUBLIC_FIELDS<block_end>data=self.client.get_objects(ids=ids fields=enf_comma_separated(field="fields" value=fields))<if_stmt>return_json<block_start><return>data<block_end><else_stmt><block_start><return>{comment_id:IgBusComment.new_from_json_dict(item)<for>comment_id,item data.items()}<block_end><block_end><def_stmt>get_replies self comment_id:str fields:Optional[Union[str list tuple]]=<none> count:Optional[int]=10 limit:Optional[int]=10 return_json:bool=<false> <arrow>Union[IgBusReplies dict]<block_start>""" Getting All Replies (Comments) on a Comment :param comment_id: ID for the comment. :param fields: Comma-separated id string for data fields which you want. You can also pass this with an id list, tuple. :param count: The total count for you to get data. :param limit: Each request retrieve objects count. It should no more than 100. Default is None will use api default limit. :param return_json: Set to false will return a dataclass for IgBusReplies. Or return json data. Default is false. :return: Comment replies response information. """<if_stmt>fields<is><none><block_start>fields=const.IG_BUSINESS_REPLY_PUBLIC_FIELDS<block_end>data=self.client.get_full_connections(object_id=comment_id connection="replies" fields=enf_comma_separated(field="fields" value=fields) count=count limit=limit )<if_stmt>return_json<block_start><return>data<block_end><else_stmt><block_start><return>IgBusReplies.new_from_json_dict(data)<block_end><block_end><block_end><class_stmt>IGBusinessReply(BaseResource)<block_start><def_stmt>get_info self reply_id:str fields:Optional[Union[str list tuple]]=<none> return_json:bool=<false> <arrow>Union[IgBusReply dict]<block_start>""" Get information about a Business reply. :param reply_id: ID for reply. :param fields: Comma-separated id string for data fields which you want. You can also pass this with an id list, tuple. :param return_json: Set to false will return a dataclass for IgBusReply. Or return json data. Default is false. :return: Business reply information. """<if_stmt>fields<is><none><block_start>fields=const.IG_BUSINESS_REPLY_PUBLIC_FIELDS<block_end>data=self.client.get_object(object_id=reply_id fields=enf_comma_separated(field="fields" value=fields) )<if_stmt>return_json<block_start><return>data<block_end><else_stmt><block_start><return>IgBusComment.new_from_json_dict(data=data)<block_end><block_end><def_stmt>get_batch self ids:Optional[Union[str list tuple]] fields:Optional[Union[str list tuple]]=<none> return_json:bool=<false> <arrow>Union[Dict[str IgBusReply] dict]<block_start>""" Get batch business replies information by ids :param ids: IDs for the replies. :param fields: Comma-separated id string for data fields which you want. You can also pass this with an id list, tuple. :param return_json: Set to false will return a dict of dataclass for IgBusReply. Or return json data. Default is false. :return: Business replies information. """<line_sep>ids=enf_comma_separated(field="ids" value=ids)<if_stmt>fields<is><none><block_start>fields=const.IG_BUSINESS_REPLY_PUBLIC_FIELDS<block_end>data=self.client.get_objects(ids=ids fields=enf_comma_separated(field="fields" value=fields))<if_stmt>return_json<block_start><return>data<block_end><else_stmt><block_start><return>{reply_id:IgBusReply.new_from_json_dict(item)<for>reply_id,item data.items()}<block_end><block_end><block_end>
<import_stmt>sys<try_stmt><block_start><import_from_stmt>cStringIO StringIO<block_end><except_stmt>ImportError<block_start><import_from_stmt>StringIO StringIO<block_end><import_stmt>argparse<import_stmt>functools<import_stmt>subprocess<line_sep>LZOP_BIN='lzop'<line_sep>PV_BIN='pv'<line_sep>S3_CONNECTION_HOSTS={'us-east-1':'s3.amazonaws.com' 'us-east-2':'s3-us-east-2.amazonaws.com' 'us-west-2':'s3-us-west-2.amazonaws.com' 'us-west-1':'s3-us-west-1.amazonaws.com' 'eu-central-1':'s3-eu-central-1.amazonaws.com' 'eu-west-1':'s3-eu-west-1.amazonaws.com' 'ap-southeast-1':'s3-ap-southeast-1.amazonaws.com' 'ap-southeast-2':'s3-ap-southeast-2.amazonaws.com' 'ap-northeast-1':'s3-ap-northeast-1.amazonaws.com' 'ap-south-1':'s3.ap-south-1.amazonaws.com' 'sa-east-1':'s3-sa-east-1.amazonaws.com' 'cn-north-1':'s3.cn-north-1.amazonaws.com.cn'}<line_sep>base_parser=argparse.ArgumentParser(formatter_class=argparse.RawDescriptionHelpFormatter description=__doc__)<line_sep>base_parser.add_argument('-v' '--verbose' action='store_true' help='increase output verbosity')<def_stmt>add_s3_arguments arg_parser<block_start>""" Adds common S3 argument to a parser """<line_sep>arg_parser.add_argument('--aws-access-key-id' help="public AWS access key.")<line_sep>arg_parser.add_argument('--aws-secret-access-key' help="S3 secret access key.")<line_sep>arg_parser.add_argument('--s3-bucket-region' default='us-east-1' help="S3 bucket region (default us-east-1)")<line_sep>arg_parser.add_argument('--s3-ssenc' action='store_true' help="Enable AWS S3 server-side encryption")<line_sep>arg_parser.add_argument('--s3-bucket-name' required=<true> help="S3 bucket name for backups.")<line_sep>arg_parser.add_argument('--s3-base-path' required=<true> help="S3 base path for backups.")<line_sep><return>arg_parser<block_end><def_stmt>get_s3_connection_host s3_bucket_region<block_start><return>S3_CONNECTION_HOSTS[s3_bucket_region]<block_end><def_stmt>map_wrap f<block_start>""" Fix annoying multiprocessing.imap bug when sending *args and **kwargs """<line_sep>@functools.wraps(f)<def_stmt>wrapper *args **kwargs<block_start><return>apply(f *args **kwargs)<block_end><return>wrapper<block_end><def_stmt>_check_bin bin_name<block_start><try_stmt><block_start>subprocess.check_call("{} --version > /dev/null 2>&1".format(bin_name) shell=<true>)<block_end><except_stmt>subprocess.CalledProcessError<block_start>sys.exit("{} not found on path".format(bin_name))<block_end><block_end><def_stmt>check_lzop <block_start>_check_bin(LZOP_BIN)<block_end><def_stmt>check_pv <block_start>_check_bin(PV_BIN)<block_end><def_stmt>compressed_pipe path size rate_limit quiet<block_start>""" Returns a generator that yields compressed chunks of the given file_path compression is done with lzop """<line_sep>lzop=subprocess.Popen((LZOP_BIN '--stdout' path) bufsize=size stdout=subprocess.PIPE)<if_stmt>rate_limit<g>0<block_start>pv_cmd=[PV_BIN '--rate-limit' '{}k'.format(rate_limit)]<if_stmt>quiet<block_start>pv_cmd.insert(1 '--quiet')<block_end>pv=subprocess.Popen(pv_cmd stdin=lzop.stdout stdout=subprocess.PIPE)<block_end><while_stmt><true><block_start><if_stmt>rate_limit<g>0<block_start>chunk=pv.stdout.read(size)<block_end><else_stmt><block_start>chunk=lzop.stdout.read(size)<block_end><if_stmt><not>chunk<block_start><break><block_end><yield>StringIO(chunk)<block_end><block_end><def_stmt>decompression_pipe path<block_start>lzop=subprocess.Popen((LZOP_BIN '-d' '-o' path) stdin=subprocess.PIPE)<line_sep><return>lzop<block_end>
# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. <import_stmt>encodings<import_stmt>json<import_stmt>os<import_stmt>subprocess<import_stmt>sys<import_stmt>time<import_stmt>pytest<import_stmt>requests<line_sep>PING_URL='http://localhost:8080/ping'<line_sep>INVOCATION_URL='http://localhost:8080/models/{}/invoke'<line_sep>MODELS_URL='http://localhost:8080/models'<line_sep>DELETE_MODEL_URL='http://localhost:8080/models/{}'<line_sep>path=os.path.abspath(__file__)<line_sep>resource_path=os.path.join(os.path.dirname(path) '..' 'resources')<line_sep>@pytest.fixture(scope='session' autouse=<true>)<def_stmt>volume <block_start><try_stmt><block_start>model_dir=os.path.join(resource_path 'models')<line_sep>subprocess.check_call('docker volume create --name dynamic_endpoint_model_volume --opt type=none '<concat>'--opt device={} --opt o=bind'.format(model_dir).split())<line_sep><yield>model_dir<block_end><finally_stmt><block_start>subprocess.check_call('docker volume rm dynamic_endpoint_model_volume'.split())<block_end><block_end>@pytest.fixture(scope='session' autouse=<true>)<def_stmt>modulevolume <block_start><try_stmt><block_start>module_dir=os.path.join(resource_path 'module')<line_sep>subprocess.check_call('docker volume create --name dynamic_endpoint_module_volume --opt type=none '<concat>'--opt device={} --opt o=bind'.format(module_dir).split())<line_sep><yield>module_dir<block_end><finally_stmt><block_start>subprocess.check_call('docker volume rm dynamic_endpoint_module_volume'.split())<block_end><block_end>@pytest.fixture(scope='module' autouse=<true>)<def_stmt>container request docker_base_name tag<block_start>test_name='sagemaker-sklearn-serving-test'<try_stmt><block_start>command=('docker run --name {} -p 8080:8080'<concat>' --mount type=volume,source=dynamic_endpoint_model_volume,target=/opt/ml/model,readonly'<concat>' --mount type=volume,source=dynamic_endpoint_module_volume,target=/user_module,readonly'<concat>' -e SAGEMAKER_BIND_TO_PORT=8080'<concat>' -e SAGEMAKER_SAFE_PORT_RANGE=9000-9999'<concat>' -e SAGEMAKER_MULTI_MODEL=true'<concat>' -e SAGEMAKER_PROGRAM={}'<concat>' -e SAGEMAKER_SUBMIT_DIRECTORY={}'<concat>' {}:{} serve').format(test_name 'script.py' "/user_module/user_code.tar.gz" docker_base_name tag)<line_sep>proc=subprocess.Popen(command.split() stdout=sys.stdout stderr=subprocess.STDOUT)<line_sep>attempts=0<while_stmt>attempts<l>5<block_start>time.sleep(3)<try_stmt><block_start>requests.get('http://localhost:8080/ping')<line_sep><break><block_end><except_stmt>Exception<block_start>attempts<augadd>1<line_sep><pass><block_end><block_end><yield>proc.pid<block_end><finally_stmt><block_start>subprocess.check_call('docker rm -f {}'.format(test_name).split())<block_end><block_end><def_stmt>make_invocation_request data model_name content_type='text/csv'<block_start>headers={'Content-Type':content_type }<line_sep>response=requests.post(INVOCATION_URL.format(model_name) data=data headers=headers)<line_sep><return>response.status_code json.loads(response.content.decode(encodings.utf_8.getregentry().name))<block_end><def_stmt>make_list_model_request <block_start>response=requests.get(MODELS_URL)<line_sep><return>response.status_code json.loads(response.content.decode(encodings.utf_8.getregentry().name))<block_end><def_stmt>make_get_model_request model_name<block_start>response=requests.get(MODELS_URL+'/{}'.format(model_name))<line_sep><return>response.status_code json.loads(response.content.decode(encodings.utf_8.getregentry().name))<block_end><def_stmt>make_load_model_request data content_type='application/json'<block_start>headers={'Content-Type':content_type}<line_sep>response=requests.post(MODELS_URL data=data headers=headers)<line_sep><return>response.status_code response.content.decode(encodings.utf_8.getregentry().name)<block_end><def_stmt>make_unload_model_request model_name<block_start>response=requests.delete(DELETE_MODEL_URL.format(model_name))<line_sep><return>response.status_code response.content.decode(encodings.utf_8.getregentry().name)<block_end><def_stmt>test_ping <block_start>res=requests.get(PING_URL)<assert_stmt>res.status_code<eq>200<block_end><def_stmt>test_list_models_empty <block_start>code,res=make_list_model_request()<line_sep># assert code == 200 <assert_stmt>res<eq>{'models':[]}<block_end><def_stmt>test_delete_unloaded_model # unloads the given model/version, no-op if not loaded <block_start>model_name='non-existing-model'<line_sep>code,res=make_unload_model_request(model_name)<assert_stmt>code<eq>404<block_end><def_stmt>test_load_and_unload_model <block_start>model_name='pickled-model-1'<line_sep>model_data={'model_name':model_name 'url':'/opt/ml/model/{}'.format(model_name)}<line_sep>code,res=make_load_model_request(json.dumps(model_data))<assert_stmt>code<eq>200 res<line_sep>res_json=json.loads(res)<assert_stmt>res_json['status']<eq>'Workers scaled'<line_sep>code,res=make_invocation_request('0.0, 0.0, 0.0, 0.0, 0.0, 0.0' model_name)<assert_stmt>code<eq>200 res<line_sep>code,res=make_unload_model_request(model_name)<assert_stmt>code<eq>200 res<line_sep>res_json=json.loads(res)<assert_stmt>res_json['status']<eq>"Model \"{}\" unregistered".format(model_name) res<line_sep>code,res=make_invocation_request('0.0, 0.0, 0.0, 0.0, 0.0, 0.0' model_name)<assert_stmt>code<eq>404 res<assert_stmt>res['message']<eq>"Model not found: {}".format(model_name) res<block_end><def_stmt>test_load_and_unload_two_models <block_start>model_name_0='pickled-model-1'<line_sep>model_data_0={'model_name':model_name_0 'url':'/opt/ml/model/{}'.format(model_name_0)}<line_sep>code,res=make_load_model_request(json.dumps(model_data_0))<assert_stmt>code<eq>200 res<line_sep>res_json=json.loads(res)<assert_stmt>res_json['status']<eq>'Workers scaled'<line_sep>model_name_1='pickled-model-2'<line_sep>model_data_1={'model_name':model_name_1 'url':'/opt/ml/model/{}'.format(model_name_1)}<line_sep>code,res=make_load_model_request(json.dumps(model_data_1))<assert_stmt>code<eq>200 res<line_sep>res_json=json.loads(res)<assert_stmt>res_json['status']<eq>'Workers scaled'<line_sep>code,res=make_invocation_request('0.0, 0.0, 0.0, 0.0, 0.0, 0.0' model_name_0)<assert_stmt>code<eq>200 res<line_sep>code,res=make_invocation_request('0.0, 0.0, 0.0, 0.0, 0.0, 0.0' model_name_1)<assert_stmt>code<eq>200 res<line_sep>code,res=make_unload_model_request(model_name_0)<assert_stmt>code<eq>200 res<line_sep>res_json=json.loads(res)<assert_stmt>res_json['status']<eq>"Model \"{}\" unregistered".format(model_name_0) res<line_sep>code,res=make_unload_model_request(model_name_1)<assert_stmt>code<eq>200 res<line_sep>res_json=json.loads(res)<assert_stmt>res_json['status']<eq>"Model \"{}\" unregistered".format(model_name_1) res<block_end><def_stmt>test_container_start_invocation_fail <block_start>x={'instances':[1.0 2.0 5.0]}<line_sep>code,res=make_invocation_request(json.dumps(x) 'half_plus_three')<assert_stmt>code<eq>404<assert_stmt>res['message']<eq>"Model not found: {}".format('half_plus_three')<block_end><def_stmt>test_load_one_model_two_times <block_start>model_name='pickled-model-1'<line_sep>model_data={'model_name':model_name 'url':'/opt/ml/model/{}'.format(model_name)}<line_sep>code_load,res=make_load_model_request(json.dumps(model_data))<assert_stmt>code_load<eq>200 res<line_sep>res_json=json.loads(res)<assert_stmt>res_json['status']<eq>'Workers scaled'<line_sep>code_load,res=make_load_model_request(json.dumps(model_data))<assert_stmt>code_load<eq>409<line_sep>res_json=json.loads(res)<assert_stmt>res_json['message']<eq>'Model {} is already registered.'.format(model_name)<block_end>
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # <def_stmt>f_gold arr n<block_start>s=[]<line_sep>j=0<line_sep>ans=0<for_stmt>i range(n)<block_start><while_stmt>(j<l>n<and>(arr[j]<not><in>s))<block_start>s.append(arr[j])<line_sep>j<augadd>1<block_end>ans<augadd>((j-i)<times>(j-i+1))<floordiv>2<line_sep>s.remove(arr[i])<block_end><return>ans<block_end>#TOFILL <if_stmt>__name__<eq>'__main__'<block_start>param=[([3 4 5 6 12 15 16 17 20 20 22 24 24 27 28 34 37 39 39 41 43 49 49 51 55 62 63 67 71 74 74 74 77 84 84 89 89 97 99] 24 ) ([-8 54 -22 18 20 44 0 54 90 -4 4 40 -74 -16] 13 ) ([0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1] 28 ) ([36 71 36 58 38 90 17] 4 ) ([-90 -32 -16 18 38 82] 5 ) ([1 0 1] 2 ) ([3 11 21 25 28 28 38 42 48 53 55 55 55 58 71 75 79 80 80 94 96 99] 20 ) ([-16 -52 -4 -46 54 0 8 -64 -82 -10 -62 -10 58 44 -28 86 -24 16 44 22 -28 -42 -52 8 76 -44 -34 2 88 -88 -14 -84 -36 -68 76 20 20 -50] 35 ) ([0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] 27 ) ([19 13 61 32 92 90 12 81 52] 5 )]<line_sep>n_success=0<for_stmt>i,parameters_set enumerate(param)<block_start><if_stmt>f_filled(*parameters_set)<eq>f_gold(*parameters_set)<block_start>n_success<augadd>1<block_end><block_end>print("#Results: %i, %i"%(n_success len(param)))<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>mock patch<import_from_stmt>sceptre.resolvers.environment_variable EnvironmentVariable<class_stmt>TestEnvironmentVariableResolver(object)<block_start><def_stmt>setup_method self test_method<block_start>self.environment_variable_resolver=EnvironmentVariable(argument=<none>)<block_end>@patch("sceptre.resolvers.environment_variable.os")<def_stmt>test_resolving_with_set_environment_variable self mock_os<block_start>mock_os.environ={"VARIABLE":"value"}<line_sep>self.environment_variable_resolver.argument="VARIABLE"<line_sep>response=self.environment_variable_resolver.resolve()<assert_stmt>response<eq>"value"<block_end><def_stmt>test_resolving_with_unset_environment_variable self<block_start>self.environment_variable_resolver.argument="UNSETVARIABLE"<line_sep>response=self.environment_variable_resolver.resolve()<assert_stmt>response<is><none><block_end><def_stmt>test_resolving_with_environment_variable_name_as_none self<block_start>self.environment_variable_resolver.argument=<none><line_sep>response=self.environment_variable_resolver.resolve()<assert_stmt>response<is><none><block_end><block_end>
<import_from_future_stmt> division<import_from_future_stmt> unicode_literals<import_stmt>torch<def_stmt>get_param_buffer_for_ema model update_buffer=<false> required_buffers=['running_mean' 'running_var']<block_start>params=model.parameters()<line_sep>all_param_buffer=[p<for>p params<if>p.requires_grad]<if_stmt>update_buffer<block_start>named_buffers=model.named_buffers()<for_stmt>key,value named_buffers<block_start><for_stmt>buffer_name required_buffers<block_start><if_stmt>buffer_name<in>key<block_start>all_param_buffer.append(value)<line_sep><break><block_end><block_end><block_end><block_end><return>all_param_buffer<block_end><class_stmt>ExponentialMovingAverage<block_start>""" Maintains (exponential) moving average of a set of parameters. """<def_stmt>__init__ self parameters decay use_num_updates=<true><block_start>""" Args: parameters: Iterable of `torch.nn.Parameter`; usually the result of `model.parameters()`. decay: The exponential decay. use_num_updates: Whether to use number of updates when computing averages. """<if_stmt>decay<l>0.0<or>decay<g>1.0<block_start><raise>ValueError('Decay must be between 0 and 1')<block_end>self.decay=decay<line_sep>self.num_updates=0<if>use_num_updates<else><none><line_sep>self.shadow_params=[p.clone().detach()<for>p parameters]<line_sep>self.collected_params=[]<block_end><def_stmt>update self parameters<block_start>""" Update currently maintained parameters. Call this every time the parameters are updated, such as the result of the `optimizer.step()` call. Args: parameters: Iterable of `torch.nn.Parameter`; usually the same set of parameters used to initialize this object. """<line_sep>decay=self.decay<if_stmt>self.num_updates<is><not><none><block_start>self.num_updates<augadd>1<line_sep>decay=min(decay (1+self.num_updates)/(10+self.num_updates))<block_end>one_minus_decay=1.0-decay<with_stmt>torch.no_grad()<block_start><for_stmt>s_param,param zip(self.shadow_params parameters)<block_start>s_param.sub_(one_minus_decay<times>(s_param-param))<block_end><block_end><block_end><def_stmt>copy_to self parameters<block_start>""" Copy current parameters into given collection of parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored moving averages. """<for_stmt>s_param,param zip(self.shadow_params parameters)<block_start>param.data.copy_(s_param.data)<block_end><block_end><def_stmt>store self parameters<block_start>""" Save the current parameters for restoring later. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be temporarily stored. """<line_sep>self.collected_params=[param.clone()<for>param parameters]<block_end><def_stmt>restore self parameters<block_start>""" Restore the parameters stored with the `store` method. Useful to validate the model with EMA parameters without affecting the original optimization process. Store the parameters before the `copy_to` method. After validation (or model saving), use this to restore the former parameters. Args: parameters: Iterable of `torch.nn.Parameter`; the parameters to be updated with the stored parameters. """<for_stmt>c_param,param zip(self.collected_params parameters)<block_start>param.data.copy_(c_param.data)<block_end><del_stmt>(self.collected_params)<block_end><block_end>
<import_stmt>requests<import_from_stmt>requests.packages.urllib3 poolmanager<line_sep>__all__=('poolmanager')<line_sep>
# Original code from SPIN: https://github.com/nkolot/SPIN <import_stmt>torch<import_from_stmt>torch.nn functional<as>F<import_stmt>numpy<as>np<import_stmt>torchgeometry<line_sep>""" Useful geometric operations, e.g. Perspective projection and a differentiable Rodrigues formula Parts of the code are taken from https://github.com/MandyMo/pytorch_HMR """<def_stmt>batch_rodrigues theta<block_start>"""Convert axis-angle representation to rotation matrix. Args: theta: size = [B, 3] Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] """<line_sep>l1norm=torch.norm(theta+1e-8 p=2 dim=1)<line_sep>angle=torch.unsqueeze(l1norm -1)<line_sep>normalized=torch.div(theta angle)<line_sep>angle=angle<times>0.5<line_sep>v_cos=torch.cos(angle)<line_sep>v_sin=torch.sin(angle)<line_sep>quat=torch.cat([v_cos v_sin<times>normalized] dim=1)<line_sep><return>quat_to_rotmat(quat)<block_end><def_stmt>quat_to_rotmat quat<block_start>"""Convert quaternion coefficients to rotation matrix. Args: quat: size = [B, 4] 4 <===>(w, x, y, z) Returns: Rotation matrix corresponding to the quaternion -- size = [B, 3, 3] """<line_sep>norm_quat=quat<line_sep>norm_quat=norm_quat/norm_quat.norm(p=2 dim=1 keepdim=<true>)<line_sep>w,x,y,z=norm_quat[: 0] norm_quat[: 1] norm_quat[: 2] norm_quat[: 3]<line_sep>B=quat.size(0)<line_sep>w2,x2,y2,z2=w.pow(2) x.pow(2) y.pow(2) z.pow(2)<line_sep>wx,wy,wz=w<times>x w<times>y w<times>z<line_sep>xy,xz,yz=x<times>y x<times>z y<times>z<line_sep>rotMat=torch.stack([w2+x2-y2-z2 2<times>xy-2<times>wz 2<times>wy+2<times>xz 2<times>wz+2<times>xy w2-x2+y2-z2 2<times>yz-2<times>wx 2<times>xz-2<times>wy 2<times>wx+2<times>yz w2-x2-y2+z2] dim=1).view(B 3 3)<line_sep><return>rotMat<block_end><def_stmt>cross_product u v<block_start>batch=u.shape[0]<line_sep>i=u[: 1]<times>v[: 2]-u[: 2]<times>v[: 1]<line_sep>j=u[: 2]<times>v[: 0]-u[: 0]<times>v[: 2]<line_sep>k=u[: 0]<times>v[: 1]-u[: 1]<times>v[: 0]<line_sep>out=torch.cat((i.view(batch 1) j.view(batch 1) k.view(batch 1)) 1)<line_sep><return>out<block_end><def_stmt>normalize_vector v<block_start>batch=v.shape[0]<line_sep>v_mag=torch.sqrt(v.pow(2).sum(1))# batch v_mag=torch.max(v_mag v.new([1e-8]))<line_sep>v_mag=v_mag.view(batch 1).expand(batch v.shape[1])<line_sep>v=v/v_mag<line_sep><return>v<block_end>#Code from <def_stmt>rot6d_to_rotmat x<block_start>"""Convert 6D rotation representation to 3x3 rotation matrix. Based on Zhou et al., "On the Continuity of Rotation Representations in Neural Networks", CVPR 2019 Input: (B,6) Batch of 6-D rotation representations Output: (B,3,3) Batch of corresponding rotation matrices """<line_sep>x=x.view(-1 3 2)<line_sep>a1=x[: : 0]<line_sep>a2=x[: : 1]<line_sep>b1=F.normalize(a1)<line_sep>b2=F.normalize(a2-torch.einsum('bi,bi->b' b1 a2).unsqueeze(-1)<times>b1)<line_sep>b3=torch.cross(b1 b2)<line_sep><return>torch.stack((b1 b2 b3) dim=-1)<block_end><def_stmt>perspective_projection points rotation translation focal_length camera_center<block_start>""" This function computes the perspective projection of a set of points. Input: points (bs, N, 3): 3D points rotation (bs, 3, 3): Camera rotation translation (bs, 3): Camera translation focal_length (bs,) or scalar: Focal length camera_center (bs, 2): Camera center """<line_sep>batch_size=points.shape[0]<line_sep>K=torch.zeros([batch_size 3 3] device=points.device)<line_sep>K[: 0 0]=focal_length<line_sep>K[: 1 1]=focal_length<line_sep>K[: 2 2]=1.<line_sep>K[: :-1 -1]=camera_center<line_sep># Transform points points=torch.einsum('bij,bkj->bki' rotation points)<line_sep>points=points+translation.unsqueeze(1)<line_sep># Apply perspective distortion projected_points=points/points[: : -1].unsqueeze(-1)<line_sep># Apply camera intrinsics projected_points=torch.einsum('bij,bkj->bki' K projected_points)<line_sep><return>projected_points[: : :-1]<block_end><def_stmt>estimate_translation_np S joints_2d joints_conf focal_length=5000 img_size=224<block_start>"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d. Input: S: (25, 3) 3D joint locations joints: (25, 3) 2D joint locations and confidence Returns: (3,) camera translation vector """<line_sep>num_joints=S.shape[0]<line_sep># focal length f=np.array([focal_length focal_length])<line_sep># optical center center=np.array([img_size/2. img_size/2.])<line_sep># transformations Z=np.reshape(np.tile(S[: 2] (2 1)).T -1)<line_sep>XY=np.reshape(S[: 0:2] -1)<line_sep>O=np.tile(center num_joints)<line_sep>F=np.tile(f num_joints)<line_sep>weight2=np.reshape(np.tile(np.sqrt(joints_conf) (2 1)).T -1)<line_sep># least squares Q=np.array([F<times>np.tile(np.array([1 0]) num_joints) F<times>np.tile(np.array([0 1]) num_joints) O-np.reshape(joints_2d -1)]).T<line_sep>c=(np.reshape(joints_2d -1)-O)<times>Z-F<times>XY<line_sep># weighted least squares W=np.diagflat(weight2)<line_sep>Q=np.dot(W Q)<line_sep>c=np.dot(W c)<line_sep># square matrix A=np.dot(Q.T Q)<line_sep>b=np.dot(Q.T c)<line_sep># solution trans=np.linalg.solve(A b)<line_sep><return>trans<block_end><def_stmt>estimate_translation S joints_2d focal_length=5000. img_size=224.<block_start>"""Find camera translation that brings 3D joints S closest to 2D the corresponding joints_2d. Input: S: (B, 49, 3) 3D joint locations joints: (B, 49, 3) 2D joint locations and confidence Returns: (B, 3) camera translation vectors """<line_sep>device=S.device<line_sep># Use only joints 25:49 (GT joints) S=S[: 25: :].cpu().numpy()<line_sep>joints_2d=joints_2d[: 25: :].cpu().numpy()<line_sep>joints_conf=joints_2d[: : -1]<line_sep>joints_2d=joints_2d[: : :-1]<line_sep>trans=np.zeros((S.shape[0] 3) dtype=np.float32)<line_sep># Find the translation for each example in the batch <for_stmt>i range(S.shape[0])<block_start>S_i=S[i]<line_sep>joints_i=joints_2d[i]<line_sep>conf_i=joints_conf[i]<line_sep>trans[i]=estimate_translation_np(S_i joints_i conf_i focal_length=focal_length img_size=img_size)<block_end><return>torch.from_numpy(trans).to(device)<block_end><def_stmt>weakProjection_gpu skel3D scale trans2D# if len(skel3D.shape)==1: # skel3D = np.reshape(skel3D, (-1,3)) <block_start>skel3D=skel3D.view((skel3D.shape[0] -1 3))<line_sep>trans2D=trans2D.view((trans2D.shape[0] 1 2))<line_sep>scale=scale.view((scale.shape[0] 1 1))<line_sep>skel3D_proj=scale<times>skel3D[: : :2]+trans2D<line_sep><return>skel3D_proj<block_end>#skel3D_proj.view((skel3D.shape[0],-1)) #(N, 19*2) o #(57) (1) (2) <def_stmt>weakProjection skel3D scale trans2D<block_start>skel3D_proj=scale<times>skel3D[: :2]+trans2D<line_sep><return>skel3D_proj<block_end>#skel3D_proj.view((skel3D.shape[0],-1)) #(N, 19*2) o <def_stmt>rotmat_to_angleaxis init_pred_rotmat<block_start>""" init_pred_rotmat: torch.tensor with (24,3,3) dimension """<line_sep>device=init_pred_rotmat.device<line_sep>ones=torch.tensor([0 0 1] dtype=torch.float32 ).view(1 3 1).expand(init_pred_rotmat.shape[1] -1 -1).to(device)<line_sep>pred_rotmat_hom=torch.cat([init_pred_rotmat.view(-1 3 3) ones] dim=-1)#24,3,4 pred_aa=torchgeometry.rotation_matrix_to_angle_axis(pred_rotmat_hom).contiguous().view(1 -1)#[1,72] # tgm.rotation_matrix_to_angle_axis returns NaN for 0 rotation, so manually hack it pred_aa[torch.isnan(pred_aa)]=0.0#[1,72] pred_aa=pred_aa.view(1 24 3)<line_sep><return>pred_aa<block_end>
# -*- coding: utf-8 -*- <class_stmt>Empty(object)<block_start>""" Empty object represents emptyness state in `grappa`. """<def_stmt>__repr__ self<block_start><return>'Empty'<block_end><def_stmt>__len__ self<block_start><return>0<block_end><block_end># Object reference representing emptpyness empty=Empty()<line_sep>
<import_from_stmt>.arenas_configurations_proto_pb2 *<import_from_stmt>.arena_configuration_proto_pb2 *<import_from_stmt>.items_to_spawn_proto_pb2 *<import_from_stmt>.vector_proto_pb2 *<import_from_stmt>.__init__ *<line_sep>
<import_stmt>re<import_from_stmt>six.moves.urllib.parse urljoin<import_from_stmt>scrapely.extractors url<as>strip_url<import_from_stmt>scrapy.utils.url safe_download_url<import_from_stmt>scrapy.utils.markup unquote_markup<import_from_stmt>slybot.baseurl get_base_url<line_sep>disallowed=re.compile('[\x00-\x1F\x7F]')<class_stmt>UrlFieldTypeProcessor(object)<block_start>"""Renders URLs as links"""<line_sep>name='url'<line_sep>description='URL'<line_sep>limit=80<def_stmt>extract self text<block_start><if_stmt>text<is><not><none><block_start><return>strip_url(text)<block_end><return>''<block_end><def_stmt>adapt self text htmlpage=<none><block_start><if_stmt>htmlpage<is><none><block_start><return>text<block_end><if_stmt>text<is><none><block_start><return><block_end>encoding=getattr(htmlpage 'encoding' 'utf-8')<line_sep>text=text.encode(encoding)<line_sep>unquoted=unquote_markup(text encoding=encoding)<line_sep>cleaned=strip_url(disallowed.sub('' unquoted))<line_sep>base=get_base_url(htmlpage).encode(encoding)<line_sep>base_url=strip_url(unquote_markup(base encoding=encoding))<line_sep>joined=urljoin(base_url cleaned)<line_sep><return>safe_download_url(joined)<block_end><block_end>
""" This module simply exposes a wrapper of a pydub.AudioSegment object. """<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>collections<import_stmt>functools<import_stmt>itertools<import_stmt>math<import_stmt>numpy<as>np<import_stmt>pickle<import_stmt>pydub<import_stmt>os<import_stmt>random<import_stmt>scipy.signal<as>signal<import_stmt>subprocess<import_stmt>sys<import_stmt>tempfile<import_stmt>warnings<import_stmt>webrtcvad<line_sep>MS_PER_S=1000<line_sep>S_PER_MIN=60<line_sep>MS_PER_MIN=MS_PER_S<times>S_PER_MIN<def_stmt>deprecated func<block_start>""" Deprecator decorator. """<line_sep>@functools.wraps(func)<def_stmt>new_func *args **kwargs<block_start>warnings.warn("Call to deprecated function {}.".format(func.__name__) category=DeprecationWarning stacklevel=2)<line_sep><return>func(*args **kwargs)<block_end><return>new_func<block_end><class_stmt>AudioSegment<block_start>""" This class is a wrapper for a pydub.AudioSegment that provides additional methods. """<def_stmt>__init__ self pydubseg name<block_start>self.seg=pydubseg<line_sep>self.name=name<block_end><def_stmt>__getattr__ self attr<block_start>orig_attr=self.seg.__getattribute__(attr)<if_stmt>callable(orig_attr)<block_start><def_stmt>hooked *args **kwargs<block_start>result=orig_attr(*args **kwargs)<if_stmt>result<eq>self.seg<block_start><return>self<block_end><elif_stmt>type(result)<eq>pydub.AudioSegment<block_start><return>AudioSegment(result self.name)<block_end><else_stmt><block_start><return>result<block_end><block_end><return>hooked<block_end><else_stmt><block_start><return>orig_attr<block_end><block_end><def_stmt>__len__ self<block_start><return>len(self.seg)<block_end><def_stmt>__eq__ self other<block_start><return>self.seg<eq>other<block_end><def_stmt>__ne__ self other<block_start><return>self.seg<ne>other<block_end><def_stmt>__iter__ self other<block_start><return>(x<for>x self.seg)<block_end><def_stmt>__getitem__ self millisecond<block_start><return>AudioSegment(self.seg[millisecond] self.name)<block_end><def_stmt>__add__ self arg<block_start><if_stmt>type(arg)<eq>AudioSegment<block_start>self.seg._data=self.seg._data+arg.seg._data<block_end><else_stmt><block_start>self.seg=self.seg+arg<block_end><return>self<block_end><def_stmt>__radd__ self rarg<block_start><return>self.seg.__radd__(rarg)<block_end><def_stmt>__repr__ self<block_start><return>str(self)<block_end><def_stmt>__str__ self<block_start>s="%s: %s channels, %s bit, sampled @ %s kHz, %.3fs long"%(self.name str(self.channels) str(self.sample_width<times>8) str(self.frame_rate/1000.0) self.duration_seconds)<line_sep><return>s<block_end><def_stmt>__sub__ self arg<block_start><if_stmt>type(arg)<eq>AudioSegment<block_start>self.seg=self.seg-arg.seg<block_end><else_stmt><block_start>self.seg=self.seg-arg<block_end><return>self<block_end><def_stmt>__mul__ self arg<block_start><if_stmt>type(arg)<eq>AudioSegment<block_start>self.seg=self.seg<times>arg.seg<block_end><else_stmt><block_start>self.seg=self.seg<times>arg<block_end><return>self<block_end>@property<def_stmt>spl self<block_start>""" Sound Pressure Level - defined as 20 * log10(abs(value)). Returns a numpy array of SPL dB values. """<line_sep><return>20.0<times>np.log10(np.abs(self.to_numpy_array()+1E-9))<block_end><def_stmt>_bandpass_filter self data low high fs order=5<block_start>""" :param data: The data (numpy array) to be filtered. :param low: The low cutoff in Hz. :param high: The high cutoff in Hz. :param fs: The sample rate (in Hz) of the data. :param order: The order of the filter. The higher the order, the tighter the roll-off. :returns: Filtered data (numpy array). """<line_sep>nyq=0.5<times>fs<line_sep>low=low/nyq<line_sep>high=high/nyq<line_sep>b,a=signal.butter(order [low high] btype='band')<line_sep>y=signal.lfilter(b a data)<line_sep><return>y<block_end><def_stmt>auditory_scene_analysis self<block_start>""" Algorithm based on paper: Auditory Segmentation Based on Onset and Offset Analysis, by Hu and Wang, 2007. """<def_stmt>lowpass_filter data cutoff fs order=5<block_start>""" """<line_sep>nyq=0.5<times>fs<line_sep>normal_cutoff=cutoff/nyq<line_sep>b,a=signal.butter(order normal_cutoff btype='low' analog=<false>)<line_sep>y=signal.lfilter(b a data)<line_sep><return>y<block_end><import_stmt>matplotlib.pyplot<as>plt<def_stmt>visualize_time_domain seg title=""<block_start>plt.plot(seg)<line_sep>plt.title(title)<line_sep>plt.show()<line_sep>plt.clf()<block_end><def_stmt>visualize spect frequencies title=""<block_start>i=0<for_stmt>freq,(index row) zip(frequencies[::-1] enumerate(spect[::-1 :]))<block_start>plt.subplot(spect.shape[0] 1 index+1)<if_stmt>i<eq>0<block_start>plt.title(title)<line_sep>i<augadd>1<block_end>plt.ylabel("{0:.0f}".format(freq))<line_sep>plt.plot(row)<block_end>plt.show()<line_sep>plt.clf()<block_end># Normalize self into 25dB average SPL normalized=self.normalize_spl_by_average(db=25)<line_sep>visualize_time_domain(normalized.to_numpy_array() "Normalized")<line_sep># Do a band-pass filter in each frequency data=normalized.to_numpy_array()<line_sep>start_frequency=50<line_sep>stop_frequency=8000<line_sep>start=np.log10(start_frequency)<line_sep>stop=np.log10(stop_frequency)<line_sep>frequencies=np.logspace(start stop num=10 endpoint=<true> base=10.0)<line_sep>print("Dealing with the following frequencies:" frequencies)<line_sep>rows=[self._bandpass_filter(data freq<times>0.8 freq<times>1.2 self.frame_rate)<for>freq frequencies]<line_sep>rows=np.array(rows)<line_sep>spect=np.vstack(rows)<line_sep>visualize(spect frequencies "After bandpass filtering (cochlear model)")<line_sep># Half-wave rectify each frequency channel spect[spect<l>0]=0<line_sep>visualize(spect frequencies "After half-wave rectification in each frequency")<line_sep># Low-pass filter each frequency channel spect=np.apply_along_axis(lowpass_filter 1 spect 30 self.frame_rate 6)<line_sep>visualize(spect frequencies "After low-pass filtering in each frequency")<line_sep># Downsample each frequency to 400 Hz downsample_freq_hz=400<if_stmt>self.frame_rate<g>downsample_freq_hz<block_start>step=int(round(self.frame_rate/downsample_freq_hz))<line_sep>spect=spect[: ::step]<block_end>visualize(spect frequencies "After downsampling in each frequency")<line_sep># Now you have the temporal envelope of each frequency channel # Smoothing scales=[(6 1/4) (6 1/14) (1/2 1/14)]<line_sep>thetas=[0.95 0.95 0.85]<line_sep>## For each (sc, st) scale, smooth across time using st, then across frequency using sc gaussian=<lambda>x mu sig:np.exp(-np.power(x-mu 2.0)/(2<times>np.power(sig 2.0)))<line_sep>gaussian_kernel=<lambda>sig:gaussian(np.linspace(-10 10 len(frequencies)/2) 0 sig)<line_sep>spectrograms=[]<for_stmt>sc,st scales<block_start>time_smoothed=np.apply_along_axis(lowpass_filter 1 spect 1/st downsample_freq_hz 6)<line_sep>visualize(time_smoothed frequencies "After time smoothing with scale: "+str(st))<line_sep>freq_smoothed=np.apply_along_axis(np.convolve 0 spect gaussian_kernel(sc))<line_sep>spectrograms.append(freq_smoothed)<line_sep>visualize(freq_smoothed frequencies "After time and frequency smoothing with scales (freq) "+str(sc)+" and (time) "+str(st))<block_end>## Now we have a set of scale-space spectrograms of different scales (sc, st) # Onset/Offset Detection and Matching <def_stmt>theta_on spect<block_start><return>np.nanmean(spect)+np.nanstd(spect)<block_end><def_stmt>compute_peaks_or_valleys_of_first_derivative s do_peaks=<true><block_start>""" Takes a spectrogram and returns a 2D array of the form: 0 0 0 1 0 0 1 0 0 0 1 <-- Frequency 0 0 0 1 0 0 0 0 0 0 1 0 <-- Frequency 1 0 0 0 0 0 0 1 0 1 0 0 <-- Frequency 2 *** Time axis ******* Where a 1 means that the value in that time bin in the spectrogram corresponds to a peak/valley in the first derivative. """<line_sep>gradient=np.nan_to_num(np.apply_along_axis(np.gradient 1 s) copy=<false>)<line_sep>half_window=4<if_stmt>do_peaks<block_start>indexes=[signal.argrelextrema(gradient[i :] np.greater order=half_window)<for>i range(gradient.shape[0])]<block_end><else_stmt><block_start>indexes=[signal.argrelextrema(gradient[i :] np.less order=half_window)<for>i range(gradient.shape[0])]<block_end>extrema=np.zeros(s.shape)<for_stmt>row_index,index_array enumerate(indexes)# Each index_array is a list of indexes corresponding to all the extrema in a given row <block_start><for_stmt>col_index index_array<block_start>extrema[row_index col_index]=1<block_end><block_end><return>extrema<block_end><for_stmt>spect,(sc st) zip(spectrograms scales)# Compute sudden upward changes in spect, these are onsets of events <block_start>onsets=compute_peaks_or_valleys_of_first_derivative(spect)<line_sep># Compute sudden downward changes in spect, these are offsets of events offsets=compute_peaks_or_valleys_of_first_derivative(spect do_peaks=<false>)<line_sep>print("TOTAL ONSETS:" np.sum(onsets axis=1))<line_sep>print("TOTAL OFFSETS:" np.sum(offsets axis=1))<line_sep>exit()<line_sep># onsets and offsets are 2D arrays ## Determine the offset time for each onset: ### If t_on[c, i] represents the time of the ith onset in frequency channel c, the corresponding offset ### must occur between t_on[c, i] and t_on[c, i+1] ### If there are more than one offsets candidates in this range, choose the one with largest intensity decrease. ## Create onset/offset fronts by connecting onsets across frequency channels (connect two onsets ## if they occur within 20ms of each other). Start over whenever a frequency band does not contain an offset ## in this range. Do the same procedure for offsets. Now you have onset and offset fronts. ## Now hook up the onsets with the offsets to form segments: ## For each onset front, (t_on[c, i1, t_on[c + 1, i2], ..., t_on[c + m - 1, im]): ## matching_offsets = (t_off[c, i1], t_off[c + 1, i2], ..., t_off[c + m - 1, im]) ## Get all offset fronts which contain at least one of offset time found in matching_offsets ## Among these offset fronts, the one that crosses the most of matching_offsets is chosen, ## - call this offset front: matching_offset_front ## Update all t_offs in matching_offsets whose 'c's are in matching_offset_front to be 'matched', and ## - update their times to the corresponding channel offset in matching_offset_front. ## If all t_offs in matching_offsets are 'matched', continue to next onset front ## Now go through all the segments you have created and break them up along frequencies if the temporal ## envelopes don't match well enough. That is, if we have two adjacent channels c and c+1, and they ## are part of the same segment as determined above, break this segment into two along these lines ## if the correlation between them is below theta_c. Theta_c is thetas[i] where i depends on the scale. <block_end># Multiscale Integration ## ## TODO <block_end><def_stmt>detect_voice self prob_detect_voice=0.5<block_start>""" Returns self as a list of tuples: [('v', voiced segment), ('u', unvoiced segment), (etc.)] The overall order of the AudioSegment is preserved. :param prob_detect_voice: The raw probability that any random 20ms window of the audio file contains voice. :returns: The described list. """<assert_stmt>self.frame_rate<in>(48000 32000 16000 8000) "Try resampling to one of the allowed frame rates."<assert_stmt>self.sample_width<eq>2 "Try resampling to 16 bit."<assert_stmt>self.channels<eq>1 "Try resampling to one channel."<class_stmt>model_class<block_start><def_stmt>__init__ self aggressiveness<block_start>self.v=webrtcvad.Vad(int(aggressiveness))<block_end><def_stmt>predict self vector<block_start><if_stmt>self.v.is_speech(vector.raw_data vector.frame_rate)<block_start><return>1<block_end><else_stmt><block_start><return>0<block_end><block_end><block_end>model=model_class(aggressiveness=1)<line_sep>pyesno=0.3# Probability of the next 20 ms being unvoiced given that this 20 ms was voiced pnoyes=0.2# Probability of the next 20 ms being voiced given that this 20 ms was unvoiced p_realyes_outputyes=0.4# WebRTCVAD has a very high FP rate - just because it says yes, doesn't mean much p_realyes_outputno=0.05# If it says no, we can be very certain that it really is a no p_yes_raw=prob_detect_voice<line_sep>filtered=self.detect_event(model=model ms_per_input=20 transition_matrix=(pyesno pnoyes) model_stats=(p_realyes_outputyes p_realyes_outputno) event_length_s=0.25 prob_raw_yes=p_yes_raw)<line_sep>ret=[]<for_stmt>tup filtered<block_start>t=('v' tup[1])<if>tup[0]<eq>'y'<else>('u' tup[1])<line_sep>ret.append(t)<block_end><return>ret<block_end><def_stmt>dice self seconds zero_pad=<false><block_start>""" Cuts the AudioSegment into `seconds` segments (at most). So for example, if seconds=10, this will return a list of AudioSegments, in order, where each one is at most 10 seconds long. If `zero_pad` is True, the last item AudioSegment object will be zero padded to result in `seconds` seconds. :param seconds: The length of each segment in seconds. Can be either a float/int, in which case `self.duration_seconds` / `seconds` are made, each of `seconds` length, or a list-like can be given, in which case the given list must sum to `self.duration_seconds` and each segment is specified by the list - e.g. the 9th AudioSegment in the returned list will be `seconds[8]` seconds long. :param zero_pad: Whether to zero_pad the final segment if necessary. Ignored if `seconds` is a list-like. :returns: A list of AudioSegments, each of which is the appropriate number of seconds long. :raises: ValueError if a list-like is given for `seconds` and the list's durations do not sum to `self.duration_seconds`. """<try_stmt><block_start>total_s=sum(seconds)<if_stmt><not>(self.duration_seconds<le>total_s+1<and>self.duration_seconds<ge>total_s-1)<block_start><raise>ValueError("`seconds` does not sum to within one second of the duration of this AudioSegment.\ given total seconds: %s and self.duration_seconds: %s"%(total_s self.duration_seconds))<block_end>starts=[]<line_sep>stops=[]<line_sep>time_ms=0<for_stmt>dur seconds<block_start>starts.append(time_ms)<line_sep>time_ms<augadd>dur<times>MS_PER_S<line_sep>stops.append(time_ms)<block_end>zero_pad=<false><block_end><except_stmt>TypeError# `seconds` is not a list <block_start>starts=range(0 int(round(self.duration_seconds<times>MS_PER_S)) int(round(seconds<times>MS_PER_S)))<line_sep>stops=(min(self.duration_seconds<times>MS_PER_S start+seconds<times>MS_PER_S)<for>start starts)<block_end>outs=[self[start:stop]<for>start,stop zip(starts stops)]<line_sep>out_lens=[out.duration_seconds<for>out outs]<line_sep># Check if our last slice is within one ms of expected - if so, we don't need to zero pad <if_stmt>zero_pad<and><not>(out_lens[-1]<le>seconds<times>MS_PER_S+1<and>out_lens[-1]<ge>seconds<times>MS_PER_S-1)<block_start>num_zeros=self.frame_rate<times>(seconds<times>MS_PER_S-out_lens[-1])<line_sep>outs[-1]=outs[-1].zero_extend(num_samples=num_zeros)<block_end><return>outs<block_end><def_stmt>detect_event self model ms_per_input transition_matrix model_stats event_length_s start_as_yes=<false> prob_raw_yes=0.5<block_start>""" A list of tuples of the form [('n', AudioSegment), ('y', AudioSegment), etc.] is returned, where tuples of the form ('n', AudioSegment) are the segments of sound where the event was not detected, while ('y', AudioSegment) tuples were the segments of sound where the event was detected. .. code-block:: python # Example usage import audiosegment import keras import keras.models import numpy as np import sys class Model: def __init__(self, modelpath): self.model = keras.models.load_model(modelpath) def predict(self, seg): _bins, fft_vals = seg.fft() fft_vals = np.abs(fft_vals) / len(fft_vals) predicted_np_form = self.model.predict(np.array([fft_vals]), batch_size=1) prediction_as_int = int(round(predicted_np_form[0][0])) return prediction_as_int modelpath = sys.argv[1] wavpath = sys.argv[2] model = Model(modelpath) seg = audiosegment.from_file(wavpath).resample(sample_rate_Hz=32000, sample_width=2, channels=1) pyes_to_no = 0.3 # The probability of one 30 ms sample being an event, and the next one not pno_to_yes = 0.2 # The probability of one 30 ms sample not being an event, and the next one yes ptrue_pos_rate = 0.8 # The true positive rate (probability of a predicted yes being right) pfalse_neg_rate = 0.3 # The false negative rate (probability of a predicted no being wrong) raw_prob = 0.7 # The raw probability of seeing the event in any random 30 ms slice of this file events = seg.detect_event(model, ms_per_input=30, transition_matrix=[pyes_to_no, pno_to_yes], model_stats=[ptrue_pos_rate, pfalse_neg_rate], event_length_s=0.25, prob_raw_yes=raw_prob) nos = [event[1] for event in events if event[0] == 'n'] yeses = [event[1] for event in events if event[0] == 'y'] if len(nos) > 1: notdetected = nos[0].reduce(nos[1:]) notdetected.export("notdetected.wav", format="WAV") if len(yeses) > 1: detected = yeses[0].reduce(yeses[1:]) detected.export("detected.wav", format="WAV") :param model: The model. The model must have a predict() function which takes an AudioSegment of `ms_per_input` number of ms and which outputs 1 if the audio event is detected in that input, and 0 if not. Make sure to resample the AudioSegment to the right values before calling this function on it. :param ms_per_input: The number of ms of AudioSegment to be fed into the model at a time. If this does not come out even, the last AudioSegment will be zero-padded. :param transition_matrix: An iterable of the form: [p(yes->no), p(no->yes)]. That is, the probability of moving from a 'yes' state to a 'no' state and the probability of vice versa. :param model_stats: An iterable of the form: [p(reality=1|output=1), p(reality=1|output=0)]. That is, the probability of the ground truth really being a 1, given that the model output a 1, and the probability of the ground truth being a 1, given that the model output a 0. :param event_length_s: The typical duration of the event you are looking for in seconds (can be a float). :param start_as_yes: If True, the first `ms_per_input` will be in the 'y' category. Otherwise it will be in the 'n' category. :param prob_raw_yes: The raw probability of finding the event in any given `ms_per_input` vector. :returns: A list of tuples of the form [('n', AudioSegment), ('y', AudioSegment), etc.], where over the course of the list, the AudioSegment in tuple 3 picks up where the one in tuple 2 left off. :raises: ValueError if `ms_per_input` is negative or larger than the number of ms in this AudioSegment; if `transition_matrix` or `model_stats` do not have a __len__ attribute or are not length 2; if the values in `transition_matrix` or `model_stats` are not in the closed interval [0.0, 1.0]. """<if_stmt>ms_per_input<l>0<or>ms_per_input/MS_PER_S<g>self.duration_seconds<block_start><raise>ValueError("ms_per_input cannot be negative and cannot be longer than the duration of the AudioSegment."<concat>" The given value was "+str(ms_per_input))<block_end><elif_stmt><not>hasattr(transition_matrix "__len__")<or>len(transition_matrix)<ne>2<block_start><raise>ValueError("transition_matrix must be an iterable of length 2.")<block_end><elif_stmt><not>hasattr(model_stats "__len__")<or>len(model_stats)<ne>2<block_start><raise>ValueError("model_stats must be an iterable of length 2.")<block_end><elif_stmt>any([<true><for>prob transition_matrix<if>prob<g>1.0<or>prob<l>0.0])<block_start><raise>ValueError("Values in transition_matrix are probabilities, and so must be in the range [0.0, 1.0].")<block_end><elif_stmt>any([<true><for>prob model_stats<if>prob<g>1.0<or>prob<l>0.0])<block_start><raise>ValueError("Values in model_stats are probabilities, and so must be in the range [0.0, 1.0].")<block_end><elif_stmt>prob_raw_yes<g>1.0<or>prob_raw_yes<l>0.0<block_start><raise>ValueError("`prob_raw_yes` is a probability, and so must be in the range [0.0, 1.0]")<block_end># Get the yeses or nos for when the filter is triggered (when the event is on/off) filter_indices=[yes_or_no<for>yes_or_no self._get_filter_indices(start_as_yes prob_raw_yes ms_per_input model transition_matrix model_stats)]<line_sep># Run a homogeneity filter over the values to make local regions more self-similar (reduce noise) ret=self._homogeneity_filter(filter_indices window_size=int(round(0.25<times>MS_PER_S/ms_per_input)))<line_sep># Group the consecutive ones together ret=self._group_filter_values(ret ms_per_input)<line_sep># Take the groups and turn them into AudioSegment objects real_ret=self._reduce_filtered_segments(ret)<line_sep><return>real_ret<block_end><def_stmt>_get_filter_indices self start_as_yes prob_raw_yes ms_per_input model transition_matrix model_stats<block_start>""" This has been broken out of the `filter` function to reduce cognitive load. """<line_sep>filter_triggered=1<if>start_as_yes<else>0<line_sep>prob_raw_no=1.0-prob_raw_yes<for_stmt>segment,_timestamp self.generate_frames_as_segments(ms_per_input)<block_start><yield>filter_triggered<line_sep>observation=int(round(model.predict(segment)))<assert_stmt>observation<eq>1<or>observation<eq>0 "The given model did not output a 1 or a 0, output: "+str(observation)<line_sep>prob_hyp_yes_given_last_hyp=1.0-transition_matrix[0]<if>filter_triggered<else>transition_matrix[1]<line_sep>prob_hyp_no_given_last_hyp=transition_matrix[0]<if>filter_triggered<else>1.0-transition_matrix[1]<line_sep>prob_hyp_yes_given_data=model_stats[0]<if>observation<eq>1<else>model_stats[1]<line_sep>prob_hyp_no_given_data=1.0-model_stats[0]<if>observation<eq>1<else>1.0-model_stats[1]<line_sep>hypothesis_yes=prob_raw_yes<times>prob_hyp_yes_given_last_hyp<times>prob_hyp_yes_given_data<line_sep>hypothesis_no=prob_raw_no<times>prob_hyp_no_given_last_hyp<times>prob_hyp_no_given_data<line_sep># make a list of ints - each is 0 or 1. The number of 1s is hypotheis_yes * 100 # the number of 0s is hypothesis_no * 100 distribution=[1<for>i range(int(round(hypothesis_yes<times>100)))]<line_sep>distribution.extend([0<for>i range(int(round(hypothesis_no<times>100)))])<line_sep># shuffle random.shuffle(distribution)<line_sep>filter_triggered=random.choice(distribution)<block_end><block_end><def_stmt>_group_filter_values self filter_indices ms_per_input<block_start>""" This has been broken out of the `filter` function to reduce cognitive load. """<line_sep>ret=[]<for_stmt>filter_value,(_segment timestamp) zip(filter_indices self.generate_frames_as_segments(ms_per_input))<block_start><if_stmt>filter_value<eq>1<block_start><if_stmt>len(ret)<g>0<and>ret[-1][0]<eq>'n'<block_start>ret.append(['y' timestamp])# The last one was different, so we create a new one <block_end><elif_stmt>len(ret)<g>0<and>ret[-1][0]<eq>'y'<block_start>ret[-1][1]=timestamp# The last one was the same as this one, so just update the timestamp <block_end><else_stmt><block_start>ret.append(['y' timestamp])# This is the first one <block_end><block_end><else_stmt><block_start><if_stmt>len(ret)<g>0<and>ret[-1][0]<eq>'n'<block_start>ret[-1][1]=timestamp<block_end><elif_stmt>len(ret)<g>0<and>ret[-1][0]<eq>'y'<block_start>ret.append(['n' timestamp])<block_end><else_stmt><block_start>ret.append(['n' timestamp])<block_end><block_end><block_end><return>ret<block_end><def_stmt>_homogeneity_filter self ls window_size<block_start>""" This has been broken out of the `filter` function to reduce cognitive load. ls is a list of 1s or 0s for when the filter is on or off """<line_sep>k=window_size<line_sep>i=k<while_stmt>i<le>len(ls)-k# Get a window of k items <block_start>window=[ls[i+j]<for>j range(k)]<line_sep># Change the items in the window to be more like the mode of that window mode=1<if>sum(window)<ge>k/2<else>0<for_stmt>j range(k)<block_start>ls[i+j]=mode<block_end>i<augadd>k<block_end><return>ls<block_end><def_stmt>_reduce_filtered_segments self ret<block_start>""" This has been broken out of the `filter` function to reduce cognitive load. """<line_sep>real_ret=[]<for_stmt>i,(this_yesno next_timestamp) enumerate(ret)<block_start><if_stmt>i<g>0<block_start>_next_yesno,timestamp=ret[i-1]<block_end><else_stmt><block_start>timestamp=0<block_end>data=self[timestamp<times>MS_PER_S:next_timestamp<times>MS_PER_S].raw_data<line_sep>seg=AudioSegment(pydub.AudioSegment(data=data sample_width=self.sample_width frame_rate=self.frame_rate channels=self.channels) self.name)<line_sep>real_ret.append((this_yesno seg))<block_end><return>real_ret<block_end><def_stmt>filter_silence self duration_s=1 threshold_percentage=1 console_output=<false><block_start>""" Returns a copy of this AudioSegment, but whose silence has been removed. .. note:: This method requires that you have the program 'sox' installed. .. warning:: This method uses the program 'sox' to perform the task. While this is very fast for a single function call, the IO may add up for a large numbers of AudioSegment objects. :param duration_s: The number of seconds of "silence" that must be present in a row to be stripped. :param threshold_percentage: Silence is defined as any samples whose absolute value is below `threshold_percentage * max(abs(samples in this segment))`. :param console_output: If True, will pipe all sox output to the console. :returns: A copy of this AudioSegment, but whose silence has been removed. """<line_sep>tmp=tempfile.NamedTemporaryFile()<line_sep>othertmp=tempfile.NamedTemporaryFile()<line_sep>self.export(tmp.name format="WAV")<line_sep>command="sox "+tmp.name+" -t wav "+othertmp.name+" silence -l 1 0.1 "+str(threshold_percentage)+"% -1 "+str(float(duration_s))+" "+str(threshold_percentage)+"%"<line_sep>stdout=stderr=subprocess.PIPE<if>console_output<else>subprocess.DEVNULL<line_sep>res=subprocess.run(command.split(' ') stdout=stdout stderr=stderr)<assert_stmt>res.returncode<eq>0 "Sox did not work as intended, or perhaps you don't have Sox installed?"<line_sep>other=AudioSegment(pydub.AudioSegment.from_wav(othertmp.name) self.name)<line_sep>tmp.close()<line_sep>othertmp.close()<line_sep><return>other<block_end><def_stmt>fft self start_s=<none> duration_s=<none> start_sample=<none> num_samples=<none> zero_pad=<false><block_start>""" Transforms the indicated slice of the AudioSegment into the frequency domain and returns the bins and the values. If neither `start_s` or `start_sample` is specified, the first sample of the slice will be the first sample of the AudioSegment. If neither `duration_s` or `num_samples` is specified, the slice will be from the specified start to the end of the segment. .. code-block:: python # Example for plotting the FFT using this function import matplotlib.pyplot as plt import numpy as np seg = audiosegment.from_file("furelise.wav") # Just take the first 3 seconds hist_bins, hist_vals = seg[1:3000].fft() hist_vals_real_normed = np.abs(hist_vals) / len(hist_vals) plt.plot(hist_bins / 1000, hist_vals_real_normed) plt.xlabel("kHz") plt.ylabel("dB") plt.show() .. image:: images/fft.png :param start_s: The start time in seconds. If this is specified, you cannot specify `start_sample`. :param duration_s: The duration of the slice in seconds. If this is specified, you cannot specify `num_samples`. :param start_sample: The zero-based index of the first sample to include in the slice. If this is specified, you cannot specify `start_s`. :param num_samples: The number of samples to include in the slice. If this is specified, you cannot specify `duration_s`. :param zero_pad: If True and the combination of start and duration result in running off the end of the AudioSegment, the end is zero padded to prevent this. :returns: np.ndarray of frequencies, np.ndarray of amount of each frequency :raises: ValueError If `start_s` and `start_sample` are both specified and/or if both `duration_s` and `num_samples` are specified. """<if_stmt>start_s<is><not><none><and>start_sample<is><not><none><block_start><raise>ValueError("Only one of start_s and start_sample can be specified.")<block_end><if_stmt>duration_s<is><not><none><and>num_samples<is><not><none><block_start><raise>ValueError("Only one of duration_s and num_samples can be specified.")<block_end><if_stmt>start_s<is><none><and>start_sample<is><none><block_start>start_sample=0<block_end><if_stmt>duration_s<is><none><and>num_samples<is><none><block_start>num_samples=len(self.get_array_of_samples())-int(start_sample)<block_end><if_stmt>duration_s<is><not><none><block_start>num_samples=int(round(duration_s<times>self.frame_rate))<block_end><if_stmt>start_s<is><not><none><block_start>start_sample=int(round(start_s<times>self.frame_rate))<block_end>end_sample=start_sample+num_samples# end_sample is excluded <if_stmt>end_sample<g>len(self.get_array_of_samples())<and><not>zero_pad<block_start><raise>ValueError("The combination of start and duration will run off the end of the AudioSegment object.")<block_end><elif_stmt>end_sample<g>len(self.get_array_of_samples())<and>zero_pad<block_start>arr=np.array(self.get_array_of_samples())<line_sep>zeros=np.zeros(end_sample-len(arr))<line_sep>arr=np.append(arr zeros)<block_end><else_stmt><block_start>arr=np.array(self.get_array_of_samples())<block_end>audioslice=np.array(arr[start_sample:end_sample])<line_sep>fft_result=np.fft.fft(audioslice)[range(int(round(num_samples/2))+1)]<line_sep>bins=np.arange(0 int(round(num_samples/2))+1 1.0)<times>(self.frame_rate/num_samples)<line_sep><return>bins fft_result<block_end><def_stmt>generate_frames self frame_duration_ms zero_pad=<true><block_start>""" Yields self's data in chunks of frame_duration_ms. This function adapted from pywebrtc's example [https://github.com/wiseman/py-webrtcvad/blob/master/example.py]. :param frame_duration_ms: The length of each frame in ms. :param zero_pad: Whether or not to zero pad the end of the AudioSegment object to get all the audio data out as frames. If not, there may be a part at the end of the Segment that is cut off (the part will be <= `frame_duration_ms` in length). :returns: A Frame object with properties 'bytes (the data)', 'timestamp (start time)', and 'duration'. """<line_sep>Frame=collections.namedtuple("Frame" "bytes timestamp duration")<line_sep># (samples/sec) * (seconds in a frame) * (bytes/sample) bytes_per_frame=int(self.frame_rate<times>(frame_duration_ms/1000)<times>self.sample_width)<line_sep>offset=0# where we are so far in self's data (in bytes) timestamp=0.0# where we are so far in self (in seconds) # (bytes/frame) * (sample/bytes) * (sec/samples) frame_duration_s=(bytes_per_frame/self.frame_rate)/self.sample_width<while_stmt>offset+bytes_per_frame<l>len(self.raw_data)<block_start><yield>Frame(self.raw_data[offset:offset+bytes_per_frame] timestamp frame_duration_s)<line_sep>timestamp<augadd>frame_duration_s<line_sep>offset<augadd>bytes_per_frame<block_end><if_stmt>zero_pad<block_start>rest=self.raw_data[offset:]<if_stmt>len(rest)<ge>bytes_per_frame<and>len(rest)%bytes_per_frame<block_start>zeros=bytes(bytes_per_frame-len(rest))<line_sep><yield>Frame(rest+zeros timestamp frame_duration_s)<block_end><block_end><block_end><def_stmt>generate_frames_as_segments self frame_duration_ms zero_pad=<true><block_start>""" Does the same thing as `generate_frames`, but yields tuples of (AudioSegment, timestamp) instead of Frames. """<for_stmt>frame self.generate_frames(frame_duration_ms zero_pad=zero_pad)<block_start>seg=AudioSegment(pydub.AudioSegment(data=frame.bytes sample_width=self.sample_width frame_rate=self.frame_rate channels=self.channels) self.name)<line_sep><yield>seg frame.timestamp<block_end><block_end><def_stmt>normalize_spl_by_average self db<block_start>""" Normalize the values in the AudioSegment so that its average dB value is `db`. The dB of a value is calculated as 20 * log10(abs(value + 1E-9)). :param db: The decibels to normalize average to. :returns: A new AudioSegment object whose values are changed so that their average is `db`. """<def_stmt>inverse_spl val<block_start>"""Calculates the (positive) 'PCM' value for the given SPl val"""<line_sep><return>10<power>(val/20.0)<block_end># Convert dB into 'PCM' db_pcm=inverse_spl(db)<line_sep># Calculate current 'PCM' average curavg=np.abs(np.mean(self.to_numpy_array()))<line_sep># Calculate ratio of dB_pcm / curavg_pcm ratio=db_pcm/curavg<line_sep># Multiply all values by ratio dtype_dict={1:np.int8 2:np.int16 4:np.int32}<line_sep>dtype=dtype_dict[self.sample_width]<line_sep>new_seg=from_numpy_array(np.array(self.to_numpy_array()<times>ratio dtype=dtype) self.frame_rate)<line_sep># Check SPL average to see if we are right #assert math.isclose(np.mean(new_seg.spl), db), "new = " + str(np.mean(new_seg.spl)) + " != " + str(db) <return>new_seg<block_end><def_stmt>reduce self others<block_start>""" Reduces others into this one by concatenating all the others onto this one and returning the result. Does not modify self, instead, makes a copy and returns that. :param others: The other AudioSegment objects to append to this one. :returns: The concatenated result. """<line_sep>ret=AudioSegment(self.seg self.name)<line_sep>selfdata=[self.seg._data]<line_sep>otherdata=[o.seg._data<for>o others]<line_sep>ret.seg._data=b''.join(selfdata+otherdata)<line_sep><return>ret<block_end><def_stmt>resample self sample_rate_Hz=<none> sample_width=<none> channels=<none> console_output=<false><block_start>""" Returns a new AudioSegment whose data is the same as this one, but which has been resampled to the specified characteristics. Any parameter left None will be unchanged. .. note:: This method requires that you have the program 'sox' installed. .. warning:: This method uses the program 'sox' to perform the task. While this is very fast for a single function call, the IO may add up for a large numbers of AudioSegment objects. :param sample_rate_Hz: The new sample rate in Hz. :param sample_width: The new sample width in bytes, so sample_width=2 would correspond to 16 bit (2 byte) width. :param channels: The new number of channels. :param console_output: Will print the output of sox to the console if True. :returns: The newly sampled AudioSegment. """<if_stmt>sample_rate_Hz<is><none><block_start>sample_rate_Hz=self.frame_rate<block_end><if_stmt>sample_width<is><none><block_start>sample_width=self.sample_width<block_end><if_stmt>channels<is><none><block_start>channels=self.channels<block_end>infile,outfile=tempfile.NamedTemporaryFile() tempfile.NamedTemporaryFile()<line_sep>self.export(infile.name format="wav")<line_sep>command="sox "+infile.name+" -b"+str(sample_width<times>8)+" -r "+str(sample_rate_Hz)+" -t wav "+outfile.name+" channels "+str(channels)<line_sep># stdout = stderr = subprocess.PIPE if console_output else subprocess.DEVNULL # res = subprocess.run(command.split(' '), stdout=stdout, stderr=stderr) res=subprocess.call(command.split(' '))<if_stmt>res<block_start><raise>subprocess.CalledProcessError(res cmd=command)<block_end>other=AudioSegment(pydub.AudioSegment.from_wav(outfile.name) self.name)<line_sep>infile.close()<line_sep>outfile.close()<line_sep><return>other<block_end><def_stmt>serialize self<block_start>""" Serializes into a bytestring. :returns: An object of type Bytes. """<line_sep>d={'name':self.name 'seg':pickle.dumps(self.seg protocol=-1)}<line_sep><return>pickle.dumps(d protocol=-1)<block_end><def_stmt>spectrogram self start_s=<none> duration_s=<none> start_sample=<none> num_samples=<none> window_length_s=<none> window_length_samples=<none> overlap=0.5<block_start>""" Does a series of FFTs from `start_s` or `start_sample` for `duration_s` or `num_samples`. Effectively, transforms a slice of the AudioSegment into the frequency domain across different time bins. .. code-block:: python # Example for plotting a spectrogram using this function import audiosegment import matplotlib.pyplot as plt #... seg = audiosegment.from_file("somebodytalking.wav") freqs, times, amplitudes = seg.spectrogram(window_length_s=0.03, overlap=0.5) amplitudes = 10 * np.log10(amplitudes + 1e-9) # Plot plt.pcolormesh(times, freqs, amplitudes) plt.xlabel("Time in Seconds") plt.ylabel("Frequency in Hz") plt.show() .. image:: images/spectrogram.png :param start_s: The start time. Starts at the beginning if neither this nor `start_sample` is specified. :param duration_s: The duration of the spectrogram in seconds. Goes to the end if neither this nor `num_samples` is specified. :param start_sample: The index of the first sample to use. Starts at the beginning if neither this nor `start_s` is specified. :param num_samples: The number of samples in the spectrogram. Goes to the end if neither this nor `duration_s` is specified. :param window_length_s: The length of each FFT in seconds. If the total number of samples in the spectrogram is not a multiple of the window length in samples, the last window will be zero-padded. :param window_length_samples: The length of each FFT in number of samples. If the total number of samples in the spectrogram is not a multiple of the window length in samples, the last window will be zero-padded. :param overlap: The fraction of each window to overlap. :returns: Three np.ndarrays: The frequency values in Hz (the y-axis in a spectrogram), the time values starting at start time and then increasing by `duration_s` each step (the x-axis in a spectrogram), and the dB of each time/frequency bin as a 2D array of shape [len(frequency values), len(duration)]. :raises ValueError: If `start_s` and `start_sample` are both specified, if `duration_s` and `num_samples` are both specified, if the first window's duration plus start time lead to running off the end of the AudioSegment, or if `window_length_s` and `window_length_samples` are either both specified or if they are both not specified. """<if_stmt>start_s<is><not><none><and>start_sample<is><not><none><block_start><raise>ValueError("Only one of start_s and start_sample may be specified.")<block_end><if_stmt>duration_s<is><not><none><and>num_samples<is><not><none><block_start><raise>ValueError("Only one of duration_s and num_samples may be specified.")<block_end><if_stmt>window_length_s<is><not><none><and>window_length_samples<is><not><none><block_start><raise>ValueError("Only one of window_length_s and window_length_samples may be specified.")<block_end><if_stmt>window_length_s<is><none><and>window_length_samples<is><none><block_start><raise>ValueError("You must specify a window length, either in window_length_s or in window_length_samples.")<block_end><if_stmt>start_s<is><none><and>start_sample<is><none><block_start>start_sample=0<block_end><if_stmt>duration_s<is><none><and>num_samples<is><none><block_start>num_samples=len(self.get_array_of_samples())-int(start_sample)<block_end><if_stmt>duration_s<is><not><none><block_start>num_samples=int(round(duration_s<times>self.frame_rate))<block_end><if_stmt>start_s<is><not><none><block_start>start_sample=int(round(start_s<times>self.frame_rate))<block_end><if_stmt>window_length_s<is><not><none><block_start>window_length_samples=int(round(window_length_s<times>self.frame_rate))<block_end><if_stmt>start_sample+num_samples<g>len(self.get_array_of_samples())<block_start><raise>ValueError("The combination of start and duration will run off the end of the AudioSegment object.")<block_end>f,t,sxx=signal.spectrogram(self.to_numpy_array() self.frame_rate scaling='spectrum' nperseg=window_length_samples noverlap=int(round(overlap<times>window_length_samples)) mode='magnitude')<line_sep><return>f t sxx<block_end><def_stmt>to_numpy_array self<block_start>""" Convenience function for `np.array(self.get_array_of_samples())` while keeping the appropriate dtype. """<line_sep>dtype_dict={1:np.int8 2:np.int16 4:np.int32}<line_sep>dtype=dtype_dict[self.sample_width]<line_sep><return>np.array(self.get_array_of_samples() dtype=dtype)<block_end>@deprecated<def_stmt>trim_to_minutes self strip_last_seconds=<false><block_start>""" Returns a list of minute-long (at most) Segment objects. .. note:: This function has been deprecated. Use the `dice` function instead. :param strip_last_seconds: If True, this method will return minute-long segments, but the last three seconds of this AudioSegment won't be returned. This is useful for removing the microphone artifact at the end of the recording. :returns: A list of AudioSegment objects, each of which is one minute long at most (and only the last one - if any - will be less than one minute). """<line_sep>outs=self.dice(seconds=60 zero_pad=<false>)<line_sep># Now cut out the last three seconds of the last item in outs (it will just be microphone artifact) # or, if the last item is less than three seconds, just get rid of it <if_stmt>strip_last_seconds<block_start><if_stmt>outs[-1].duration_seconds<g>3<block_start>outs[-1]=outs[-1][:-MS_PER_S<times>3]<block_end><else_stmt><block_start>outs=outs[:-1]<block_end><block_end><return>outs<block_end><def_stmt>zero_extend self duration_s=<none> num_samples=<none><block_start>""" Adds a number of zeros (digital silence) to the AudioSegment (returning a new one). :param duration_s: The number of seconds of zeros to add. If this is specified, `num_samples` must be None. :param num_samples: The number of zeros to add. If this is specified, `duration_s` must be None. :returns: A new AudioSegment object that has been zero extended. :raises: ValueError if duration_s and num_samples are both specified. """<if_stmt>duration_s<is><not><none><and>num_samples<is><not><none><block_start><raise>ValueError("`duration_s` and `num_samples` cannot both be specified.")<block_end><elif_stmt>duration_s<is><not><none><block_start>num_samples=self.frame_rate<times>duration_s<block_end>seg=AudioSegment(self.seg self.name)<line_sep>zeros=silent(duration=num_samples/self.frame_rate frame_rate=self.frame_rate)<line_sep><return>zeros.overlay(seg)<block_end><block_end><def_stmt>deserialize bstr<block_start>""" Attempts to deserialize a bytestring into an audiosegment. :param bstr: The bytestring serialized via an audiosegment's serialize() method. :returns: An AudioSegment object deserialized from `bstr`. """<line_sep>d=pickle.loads(bstr)<line_sep>seg=pickle.loads(d['seg'])<line_sep><return>AudioSegment(seg d['name'])<block_end><def_stmt>empty <block_start>""" Creates a zero-duration AudioSegment object. :returns: An empty AudioSegment object. """<line_sep>dubseg=pydub.AudioSegment.empty()<line_sep><return>AudioSegment(dubseg "")<block_end><def_stmt>from_file path<block_start>""" Returns an AudioSegment object from the given file based on its file extension. If the extension is wrong, this will throw some sort of error. :param path: The path to the file, including the file extension. :returns: An AudioSegment instance from the file. """<line_sep>_name,ext=os.path.splitext(path)<line_sep>ext=ext.lower()[1:]<line_sep>seg=pydub.AudioSegment.from_file(path ext)<line_sep><return>AudioSegment(seg path)<block_end><def_stmt>from_mono_audiosegments *args<block_start>""" Creates a multi-channel AudioSegment out of multiple mono AudioSegments (two or more). Each mono AudioSegment passed in should be exactly the same number of samples. :returns: An AudioSegment of multiple channels formed from the given mono AudioSegments. """<line_sep><return>AudioSegment(pydub.AudioSegment.from_mono_audiosegments(*args) "")<block_end><def_stmt>from_numpy_array nparr framerate<block_start>""" Returns an AudioSegment created from the given numpy array. The numpy array must have shape = (num_samples, num_channels). :param nparr: The numpy array to create an AudioSegment from. :returns: An AudioSegment created from the given array. """<line_sep># interleave the audio across all channels and collapse <if_stmt>nparr.dtype.itemsize<not><in>(1 2 4)<block_start><raise>ValueError("Numpy Array must contain 8, 16, or 32 bit values.")<block_end><if_stmt>len(nparr.shape)<eq>1<block_start>arrays=[nparr]<block_end><elif_stmt>len(nparr.shape)<eq>2<block_start>arrays=[nparr[i :]<for>i range(nparr.shape[0])]<block_end><else_stmt><block_start><raise>ValueError("Numpy Array must be one or two dimensional. Shape must be: (num_samples, num_channels).")<block_end>interleaved=np.vstack(arrays).reshape((-1 ) order='F')<line_sep>dubseg=pydub.AudioSegment(interleaved.tobytes() frame_rate=framerate sample_width=interleaved.dtype.itemsize channels=len(interleaved.shape))<line_sep><return>AudioSegment(dubseg "")<block_end><def_stmt>silent duration=1000 frame_rate=11025<block_start>""" Creates an AudioSegment object of the specified duration/frame_rate filled with digital silence. :param duration: The duration of the returned object in ms. :param frame_rate: The samples per second of the returned object. :returns: AudioSegment object filled with pure digital silence. """<line_sep>seg=pydub.AudioSegment.silent(duration=duration frame_rate=frame_rate)<line_sep><return>AudioSegment(seg "")<block_end>
<import_from_stmt>OpenGL.GL *<import_from_stmt>OpenGL.GLUT *<import_from_stmt>OpenGL.GLU *<line_sep>""" Mouse interaction This piece of code will capture mouse clicks by the user. Every time the user presses the left mouse the current point is pushed onto an array, when the right mouse button is pressed the last element is removed from the array. After a click a glutPostRedisplay() is called to trigger a call to the display function which creates a line strip out of all created points. http://www.de-brauwer.be/wiki/wikka.php?wakka=PyOpenGLMouse """<class_stmt>Point<block_start><def_stmt>__init__ self x y<block_start>self.x=x<line_sep>self.y=y<block_end><block_end>points=[]<def_stmt>initFun <block_start>glClearColor(1.0 1.0 1.0 0.0)<line_sep>glColor3f(0.0 0.0 0.0)<line_sep>glMatrixMode(GL_PROJECTION)<line_sep>glLoadIdentity()<line_sep>gluOrtho2D(0.0 640.0 0.0 480.0)<block_end><def_stmt>displayFun <block_start><global>points<line_sep>glClear(GL_COLOR_BUFFER_BIT)<line_sep>glBegin(GL_LINE_STRIP)<line_sep>glColor3f(0 0 0)<for_stmt>p points<block_start>glVertex2i(p.x p.y)<block_end>glEnd()<line_sep>glFlush()<block_end><def_stmt>mouseFun button state x y<block_start><global>points<if_stmt>button<eq>GLUT_LEFT_BUTTON<and>state<eq>GLUT_DOWN<block_start>p=Point(x 480-y)<line_sep>points.append(p)<block_end><if_stmt>button<eq>GLUT_RIGHT_BUTTON<and>state<eq>GLUT_DOWN<block_start><if_stmt>points<block_start>points=points[:-1]<block_end><block_end>glutPostRedisplay()<block_end><if_stmt>__name__<eq>'__main__'<block_start>glutInit()<line_sep>glutInitWindowSize(640 480)<line_sep>glutCreateWindow(b"Polyline")<line_sep>glutInitDisplayMode(GLUT_SINGLE|GLUT_RGB)<line_sep>glutDisplayFunc(displayFun)<line_sep>glutMouseFunc(mouseFun)<line_sep>initFun()<line_sep>glutMainLoop()<block_end>
<import_stmt>os<import_stmt>pytest<import_stmt>sys<import_stmt>numpy<as>np<try_stmt><block_start><import_stmt>pymake<block_end><except_stmt><block_start>msg="Error. Pymake package is not available.\n"<line_sep>msg<augadd>"Try installing using the following command:\n"<line_sep>msg<augadd>" pip install https://github.com/modflowpy/pymake/zipball/master"<line_sep><raise>Exception(msg)<block_end><try_stmt><block_start><import_stmt>flopy<block_end><except_stmt><block_start>msg="Error. FloPy package is not available.\n"<line_sep>msg<augadd>"Try installing using the following command:\n"<line_sep>msg<augadd>" pip install flopy"<line_sep><raise>Exception(msg)<block_end><import_from_stmt>framework testing_framework<import_from_stmt>simulation Simulation<line_sep>ex=["aux02"]<line_sep>exdirs=[]<for_stmt>s ex<block_start>exdirs.append(os.path.join("temp" s))<block_end><def_stmt>build_model idx dir<block_start>nlay,nrow,ncol=1 10 10<line_sep>nper=3<line_sep>perlen=[1.0 1.0 1.0]<line_sep>nstp=[10 10 10]<line_sep>tsmult=[1.0 1.0 1.0]<line_sep>lenx=300.0<line_sep>delr=delc=lenx/float(nrow)<line_sep>strt=100.0<line_sep>nouter,ninner=100 300<line_sep>hclose,rclose,relax=1e-9 1e-3 0.97<line_sep>tdis_rc=[]<for_stmt>i range(nper)<block_start>tdis_rc.append((perlen[i] nstp[i] tsmult[i]))<block_end>name=ex[idx]<line_sep># build MODFLOW 6 files ws=dir<line_sep>sim=flopy.mf6.MFSimulation(sim_name=name version="mf6" exe_name="mf6" sim_ws=ws)<line_sep># create tdis package tdis=flopy.mf6.ModflowTdis(sim time_units="DAYS" nper=nper perioddata=tdis_rc)<line_sep># create gwf model gwf=flopy.mf6.ModflowGwf(sim modelname=name)<line_sep># create iterative model solution and register the gwf model with it ims=flopy.mf6.ModflowIms(sim print_option="SUMMARY" outer_dvclose=hclose outer_maximum=nouter under_relaxation="DBD" inner_maximum=ninner inner_dvclose=hclose rcloserecord=rclose linear_acceleration="BICGSTAB" scaling_method="NONE" reordering_method="NONE" relaxation_factor=relax )<line_sep>sim.register_ims_package(ims [gwf.name])<line_sep>dis=flopy.mf6.ModflowGwfdis(gwf nlay=nlay nrow=nrow ncol=ncol delr=delr delc=delc top=90.0 botm=0.0 )<line_sep># initial conditions ic=flopy.mf6.ModflowGwfic(gwf strt=strt)<line_sep># node property flow npf=flopy.mf6.ModflowGwfnpf(gwf save_flows=<true> icelltype=1 k=1.0 k33=0.01)<line_sep># chd files chdlist0=[]<line_sep>chdlist0.append([(0 0 0) 100.0]+[a<for>a range(100)])<line_sep>chdlist0.append([(0 nrow-1 ncol-1) 95.0]+[a<for>a range(100)])<line_sep>chdspdict={0:chdlist0}<line_sep>chd=flopy.mf6.ModflowGwfchd(gwf stress_period_data=chdspdict save_flows=<true> auxiliary=[f"aux{i}"<for>i range(100)] pname="CHD-1" )<line_sep># output control oc=flopy.mf6.ModflowGwfoc(gwf budget_filerecord="{}.bud".format(name) head_filerecord="{}.hds".format(name) headprintrecord=[("COLUMNS" 10 "WIDTH" 15 "DIGITS" 6 "GENERAL")] saverecord=[("HEAD" "ALL") ("BUDGET" "ALL")] printrecord=[("HEAD" "ALL") ("BUDGET" "ALL")] filename="{}.oc".format(name) )<line_sep><return>sim <none><block_end><def_stmt>eval_model sim<block_start>print("evaluating model...")<line_sep># maw budget aux variables fpth=os.path.join(sim.simpath "aux02.bud")<line_sep>bobj=flopy.utils.CellBudgetFile(fpth precision="double")<line_sep>records=bobj.get_data(text="CHD")<for_stmt>r records<block_start><for_stmt>a range(100)<block_start>aname=f"AUX{a}"<assert_stmt>np.allclose(r[aname] a)<block_end><block_end><return><block_end># - No need to change any code below @pytest.mark.parametrize("idx, dir" list(enumerate(exdirs)) )<def_stmt>test_mf6model idx dir# initialize testing framework <block_start>test=testing_framework()<line_sep># build the model test.build_mf6_models(build_model idx dir)<line_sep># run the test model test.run_mf6(Simulation(dir exfunc=eval_model idxsim=idx))<block_end><def_stmt>main # initialize testing framework <block_start>test=testing_framework()<line_sep># run the test model <for_stmt>idx,dir enumerate(exdirs)<block_start>test.build_mf6_models(build_model idx dir)<line_sep>sim=Simulation(dir exfunc=eval_model idxsim=idx)<line_sep>test.run_mf6(sim)<block_end><block_end><if_stmt>__name__<eq>"__main__"# print message <block_start>print("standalone run of {}".format(os.path.basename(__file__)))<line_sep># run main routine main()<block_end>
<import_stmt>numpy<as>np<import_from_stmt>kernel_tuner core<import_from_stmt>kernel_tuner.interface Options _kernel_options<import_from_stmt>kernel_tuner.integration TuneResults<class_stmt>PythonKernel(object)<block_start><def_stmt>__init__ self kernel_name kernel_string problem_size arguments params=<none> inputs=<none> outputs=<none> device=0 platform=0 block_size_names=<none> grid_div_x=<none> grid_div_y=<none> grid_div_z=<none> verbose=<true> lang=<none> results_file=<none><block_start>""" Construct Python helper object to compile and call the kernel from Python This object compiles a GPU kernel parameterized using the parameters in params. GPU memory is allocated for each argument using its size and type as listed in arguments. The object can be called directly as a function with the kernel arguments as function arguments. Kernel arguments marked as inputs will be copied to the GPU on every kernel launch. Only the kernel arguments marked as outputs will be returned, note that the result is always returned in a list, even when there is only one output. Most of the arguments to this function are the same as with tune_kernel or run_kernel in Kernel Tuner, and are therefore not duplicated here. The two new arguments are: :param inputs: a boolean list of length arguments to signal whether an argument is input to the kernel :type inputs: list(bool) :param outputs: a boolean list of length arguments to signal whether an argument is output of the kernel :type outputs: list(bool) """<line_sep>#construct device interface kernel_source=core.KernelSource(kernel_name kernel_string lang)<line_sep>self.dev=core.DeviceInterface(kernel_source device=device quiet=<true>)<if_stmt><not>params<block_start>params={}<block_end>#if results_file is passed use the results file to lookup tunable parameters <if_stmt>results_file<block_start>results=TuneResults(results_file)<line_sep>params.update(results.get_best_config(self.dev.name problem_size))<block_end>self.params=params<line_sep>#construct kernel_options to hold information about the kernel opts=locals()<line_sep>kernel_options=Options([(k opts[k])<for>k _kernel_options.keys()<if>k<in>opts.keys()])<line_sep>#instantiate the kernel given the parameters in params self.kernel_instance=self.dev.create_kernel_instance(kernel_source kernel_options params verbose)<line_sep>#compile the kernel self.func=self.dev.compile_kernel(self.kernel_instance verbose)<line_sep>#setup GPU memory self.gpu_args=self.dev.ready_argument_list(arguments)<if_stmt>inputs<block_start>self.inputs=inputs<block_end><else_stmt><block_start>self.inputs=[<true><for>_ arguments]<block_end><if_stmt>outputs<block_start>self.outputs=outputs<block_end><else_stmt><block_start>self.outputs=[<true><for>_ arguments]<block_end><block_end><def_stmt>update_gpu_args self args<block_start><for_stmt>i,arg enumerate(args)<block_start><if_stmt>self.inputs[i]<block_start><if_stmt>isinstance(args[i] np.ndarray)<block_start>self.dev.dev.memcpy_htod(self.gpu_args[i] arg)<block_end><else_stmt><block_start>self.gpu_args[i]=arg<block_end><block_end><block_end><return>self.gpu_args<block_end><def_stmt>get_gpu_result self args<block_start>results=[]<for_stmt>i,_ enumerate(self.gpu_args)<block_start><if_stmt>self.outputs[i]<and>isinstance(args[i] np.ndarray)<block_start>res=np.zeros_like(args[i])<line_sep>self.dev.memcpy_dtoh(res self.gpu_args[i])<line_sep>results.append(res)<block_end><block_end><return>results<block_end><def_stmt>run_kernel self args<block_start>"""Run the GPU kernel Copy the arguments marked as inputs to the GPU Call the GPU kernel Copy the arguments marked as outputs from the GPU Return the outputs in a list of numpy arrays :param args: A list with the kernel arguments as numpy arrays or numpy scalars :type args: list(np.ndarray or np.generic) """<line_sep>self.update_gpu_args(args)<line_sep>self.dev.run_kernel(self.func self.gpu_args self.kernel_instance)<line_sep><return>self.get_gpu_result(args)<block_end><def_stmt>__call__ self *args<block_start>"""Run the GPU kernel Copy the arguments marked as inputs to the GPU Call the GPU kernel Copy the arguments marked as outputs from the GPU Return the outputs in a list of numpy arrays :param *args: A variable number of kernel arguments as numpy arrays or numpy scalars :type *args: np.ndarray or np.generic """<line_sep><return>self.run_kernel(args)<block_end><def_stmt>__del__ self<block_start><if_stmt>hasattr(self 'dev')<block_start>self.dev.__exit__([<none> <none> <none>])<block_end><block_end><block_end>
""" Unit tests for optimizers. """<import_stmt>numpy<as>np<import_stmt>pytest<import_from_stmt>numpy.linalg norm<import_from_stmt>scipy.integrate odeint<import_from_stmt>sklearn.base BaseEstimator<import_from_stmt>sklearn.exceptions ConvergenceWarning<import_from_stmt>sklearn.exceptions NotFittedError<import_from_stmt>sklearn.linear_model ElasticNet<import_from_stmt>sklearn.linear_model Lasso<import_from_stmt>sklearn.utils.validation check_is_fitted<import_from_stmt>pysindy FiniteDifference<import_from_stmt>pysindy PolynomialLibrary<import_from_stmt>pysindy SINDy<import_from_stmt>pysindy.feature_library CustomLibrary<import_from_stmt>pysindy.optimizers ConstrainedSR3<import_from_stmt>pysindy.optimizers SINDyOptimizer<import_from_stmt>pysindy.optimizers SR3<import_from_stmt>pysindy.optimizers STLSQ<import_from_stmt>pysindy.optimizers TrappingSR3<import_from_stmt>pysindy.utils supports_multiple_targets<def_stmt>lorenz z t<block_start><return>10<times>(z[1]-z[0]) z[0]<times>(28-z[2])-z[1] z[0]<times>z[1]-8/3<times>z[2]<block_end><class_stmt>DummyLinearModel(BaseEstimator)# Does not natively support multiple targets <block_start><def_stmt>fit self x y<block_start>self.coef_=np.ones(x.shape[1])<line_sep>self.intercept_=0<line_sep><return>self<block_end><def_stmt>predict self x<block_start><return>x<block_end><block_end><class_stmt>DummyEmptyModel(BaseEstimator)# Does not have fit or predict methods <block_start><def_stmt>__init__ self<block_start>self.fit_intercept=<false><line_sep>self.normalize=<false><block_end><block_end><class_stmt>DummyModelNoCoef(BaseEstimator)# Does not set the coef_ attribute <block_start><def_stmt>fit self x y<block_start>self.intercept_=0<line_sep><return>self<block_end><def_stmt>predict self x<block_start><return>x<block_end><block_end>@pytest.mark.parametrize("cls, support" [(Lasso <true>) (STLSQ <true>) (SR3 <true>) (ConstrainedSR3 <true>) (TrappingSR3 <true>) (DummyLinearModel <false>) ] )<def_stmt>test_supports_multiple_targets cls support<block_start><assert_stmt>supports_multiple_targets(cls())<eq>support<block_end>@pytest.fixture(params=["data_derivative_1d" "data_derivative_2d"])<def_stmt>data request<block_start><return>request.getfixturevalue(request.param)<block_end>@pytest.mark.parametrize("optimizer" [STLSQ() SR3() ConstrainedSR3() TrappingSR3() Lasso(fit_intercept=<false>) ElasticNet(fit_intercept=<false>) DummyLinearModel() ] )<def_stmt>test_fit data optimizer<block_start>x,x_dot=data<if_stmt>len(x.shape)<eq>1<block_start>x=x.reshape(-1 1)<block_end>opt=SINDyOptimizer(optimizer unbias=<false>)<line_sep>opt.fit(x x_dot)<line_sep>check_is_fitted(opt)<assert_stmt>opt.complexity<ge>0<if_stmt>len(x_dot.shape)<g>1<block_start><assert_stmt>opt.coef_.shape<eq>(x.shape[1] x_dot.shape[1])<block_end><else_stmt><block_start><assert_stmt>opt.coef_.shape<eq>(1 x.shape[1])<block_end><block_end>@pytest.mark.parametrize("optimizer" [STLSQ() SR3()] )<def_stmt>test_not_fitted optimizer<block_start><with_stmt>pytest.raises(NotFittedError)<block_start>optimizer.predict(np.ones((1 3)))<block_end><block_end>@pytest.mark.parametrize("optimizer" [STLSQ() SR3()])<def_stmt>test_complexity_not_fitted optimizer data_derivative_2d<block_start><with_stmt>pytest.raises(NotFittedError)<block_start>optimizer.complexity<block_end>x,_=data_derivative_2d<line_sep>optimizer.fit(x x)<assert_stmt>optimizer.complexity<g>0<block_end>@pytest.mark.parametrize("kwargs" [{"normalize":<true>} {"fit_intercept":<true>} {"copy_X":<false>}])<def_stmt>test_alternate_parameters data_derivative_1d kwargs<block_start>x,x_dot=data_derivative_1d<line_sep>x=x.reshape(-1 1)<line_sep>model=STLSQ(**kwargs)<line_sep>model.fit(x x_dot)<line_sep>model.fit(x x_dot sample_weight=x[: 0])<line_sep>check_is_fitted(model)<block_end>@pytest.mark.parametrize("optimizer" [STLSQ SR3 ConstrainedSR3])@pytest.mark.parametrize("params" [dict(threshold=-1) dict(max_iter=0)])<def_stmt>test_general_bad_parameters optimizer params<block_start><with_stmt>pytest.raises(ValueError)<block_start>optimizer(**params)<block_end><block_end>@pytest.mark.parametrize("optimizer" [SR3 ConstrainedSR3])@pytest.mark.parametrize("params" [dict(nu=0) dict(tol=0) dict(trimming_fraction=-1) dict(trimming_fraction=2)] )<def_stmt>test_sr3_bad_parameters optimizer params<block_start><with_stmt>pytest.raises(ValueError)<block_start>optimizer(**params)<block_end><block_end>@pytest.mark.parametrize("params" [dict(eta=-1) dict(tol=0) dict(tol_m=0) dict(eps_solver=0) dict(alpha_m=-1) dict(alpha_A=-1) dict(gamma=1) dict(evolve_w=<false> relax_optim=<false>) dict(thresholder="l0") dict(threshold=-1) dict(max_iter=0) dict(eta=10 alpha_m=20) dict(eta=10 alpha_A=20) ] )<def_stmt>test_trapping_bad_parameters params<block_start><with_stmt>pytest.raises(ValueError)<block_start>TrappingSR3(**params)<block_end><block_end>@pytest.mark.parametrize("params" [dict(PL=np.random.rand(3 3 3 9)) dict(PQ=np.random.rand(3 3 3 3 9))] )<def_stmt>test_trapping_bad_tensors params<block_start>x=np.random.standard_normal((10 9))<line_sep>x_dot=np.random.standard_normal((10 3))<with_stmt>pytest.raises(ValueError)<block_start>model=TrappingSR3(**params)<line_sep>model.fit(x x_dot)<block_end><block_end>@pytest.mark.parametrize("params" [dict(PL=np.ones((3 3 3 9)) PQ=np.ones((3 3 3 3 9)))] )<def_stmt>test_trapping_quadratic_library params<block_start>x=np.random.standard_normal((10 3))<line_sep>library_functions=[<lambda>x:x <lambda>x y:x<times>y <lambda>x:x<power>2 ]<line_sep>library_function_names=[<lambda>x:str(x) <lambda>x y:"{} * {}".format(x y) <lambda>x:"{}^2".format(x) ]<line_sep>sindy_library=CustomLibrary(library_functions=library_functions function_names=library_function_names)<line_sep>opt=TrappingSR3(**params)<line_sep>model=SINDy(optimizer=opt feature_library=sindy_library)<line_sep>model.fit(x)<assert_stmt>opt.PL.shape<eq>(3 3 3 9)<assert_stmt>opt.PQ.shape<eq>(3 3 3 3 9)<line_sep>check_is_fitted(model)<block_end>@pytest.mark.parametrize("params" [dict(PL=np.ones((3 3 3 9)) PQ=np.ones((3 3 3 3 9)))] )<def_stmt>test_trapping_cubic_library params<block_start>x=np.random.standard_normal((10 3))<line_sep>library_functions=[<lambda>x:x <lambda>x y:x<times>y <lambda>x:x<power>2 <lambda>x y z:x<times>y<times>z <lambda>x y:x<power>2<times>y <lambda>x:x<power>3 ]<line_sep>library_function_names=[<lambda>x:str(x) <lambda>x y:"{} * {}".format(x y) <lambda>x:"{}^2".format(x) <lambda>x y z:"{} * {} * {}".format(x y z) <lambda>x y:"{}^2 * {}".format(x y) <lambda>x:"{}^3".format(x) ]<line_sep>sindy_library=CustomLibrary(library_functions=library_functions function_names=library_function_names)<with_stmt>pytest.raises(ValueError)<block_start>opt=TrappingSR3(**params)<line_sep>model=SINDy(optimizer=opt feature_library=sindy_library)<line_sep>model.fit(x)<block_end><block_end>@pytest.mark.parametrize("error, optimizer, params" [(ValueError STLSQ dict(alpha=-1)) (NotImplementedError SR3 dict(thresholder="l2")) (NotImplementedError ConstrainedSR3 dict(thresholder="l2")) (ValueError ConstrainedSR3 dict(thresholder="weighted_l0" thresholds=<none>)) (ValueError ConstrainedSR3 dict(thresholder="weighted_l0" thresholds=<none>)) (ValueError ConstrainedSR3 dict(thresholds=-np.ones((5 5)))) ] )<def_stmt>test_specific_bad_parameters error optimizer params<block_start><with_stmt>pytest.raises(error)<block_start>optimizer(**params)<block_end><block_end><def_stmt>test_bad_optimizers data_derivative_1d<block_start>x,x_dot=data_derivative_1d<line_sep>x=x.reshape(-1 1)<with_stmt>pytest.raises(AttributeError)<block_start>opt=SINDyOptimizer(DummyEmptyModel())<block_end><with_stmt>pytest.raises(AttributeError)<block_start>opt=SINDyOptimizer(DummyModelNoCoef())<line_sep>opt.fit(x x_dot)<block_end><block_end>@pytest.mark.parametrize("optimizer" [SR3 ConstrainedSR3])<def_stmt>test_initial_guess_sr3 optimizer<block_start>x=np.random.standard_normal((10 3))<line_sep>x_dot=np.random.standard_normal((10 2))<line_sep>control_model=optimizer(max_iter=1).fit(x x_dot)<line_sep>initial_guess=np.random.standard_normal((x_dot.shape[1] x.shape[1]))<line_sep>guess_model=optimizer(max_iter=1 initial_guess=initial_guess).fit(x x_dot)<assert_stmt>np.any(np.not_equal(control_model.coef_ guess_model.coef_))<block_end># The different capitalizations are intentional; # I want to make sure different versions are recognized @pytest.mark.parametrize("optimizer" [SR3 ConstrainedSR3])@pytest.mark.parametrize("thresholder" ["L0" "l1"])<def_stmt>test_prox_functions data_derivative_1d optimizer thresholder<block_start>x,x_dot=data_derivative_1d<line_sep>x=x.reshape(-1 1)<line_sep>model=optimizer(thresholder=thresholder)<line_sep>model.fit(x x_dot)<line_sep>check_is_fitted(model)<block_end><def_stmt>test_cad_prox_function data_derivative_1d<block_start>x,x_dot=data_derivative_1d<line_sep>x=x.reshape(-1 1)<line_sep>model=SR3(thresholder="cAd")<line_sep>model.fit(x x_dot)<line_sep>check_is_fitted(model)<block_end>@pytest.mark.parametrize("thresholder" ["weighted_l0" "weighted_l1"])<def_stmt>test_weighted_prox_functions data thresholder<block_start>x,x_dot=data<if_stmt>x.ndim<eq>1<block_start>x=x.reshape(-1 1)<line_sep>thresholds=np.ones((1 1))<block_end><else_stmt><block_start>thresholds=np.ones((x_dot.shape[1] x.shape[1]))<block_end>model=ConstrainedSR3(thresholder=thresholder thresholds=thresholds)<line_sep>model.fit(x x_dot)<line_sep>check_is_fitted(model)<block_end>@pytest.mark.parametrize("thresholder" ["L0" "l1"])<def_stmt>test_constrained_sr3_prox_functions data_derivative_1d thresholder<block_start>x,x_dot=data_derivative_1d<line_sep>x=x.reshape(-1 1)<line_sep>model=ConstrainedSR3(thresholder=thresholder)<line_sep>model.fit(x x_dot)<line_sep>check_is_fitted(model)<block_end><def_stmt>test_unbias data_derivative_1d<block_start>x,x_dot=data_derivative_1d<line_sep>x=x.reshape(-1 1)<line_sep>optimizer_biased=SINDyOptimizer(STLSQ(threshold=0.01 alpha=0.1 max_iter=1) unbias=<false>)<line_sep>optimizer_biased.fit(x x_dot)<line_sep>optimizer_unbiased=SINDyOptimizer(STLSQ(threshold=0.01 alpha=0.1 max_iter=1) unbias=<true>)<line_sep>optimizer_unbiased.fit(x x_dot)<assert_stmt>(norm(optimizer_biased.coef_-optimizer_unbiased.coef_)/norm(optimizer_unbiased.coef_)<g>1e-9)<block_end><def_stmt>test_unbias_external data_derivative_1d<block_start>x,x_dot=data_derivative_1d<line_sep>x=x.reshape(-1 1)<line_sep>optimizer_biased=SINDyOptimizer(Lasso(alpha=0.1 fit_intercept=<false> max_iter=1) unbias=<false>)<line_sep>optimizer_biased.fit(x x_dot)<line_sep>optimizer_unbiased=SINDyOptimizer(Lasso(alpha=0.1 fit_intercept=<false> max_iter=1) unbias=<true>)<line_sep>optimizer_unbiased.fit(x x_dot)<assert_stmt>(norm(optimizer_biased.coef_-optimizer_unbiased.coef_)/(norm(optimizer_unbiased.coef_)+1e-5)<g>1e-9)<block_end>@pytest.mark.parametrize("optimizer" [SR3 ConstrainedSR3])<def_stmt>test_sr3_trimming optimizer data_linear_oscillator_corrupted<block_start>X,X_dot,trimming_array=data_linear_oscillator_corrupted<line_sep>optimizer_without_trimming=SINDyOptimizer(optimizer() unbias=<false>)<line_sep>optimizer_without_trimming.fit(X X_dot)<line_sep>optimizer_trimming=SINDyOptimizer(optimizer(trimming_fraction=0.15) unbias=<false>)<line_sep>optimizer_trimming.fit(X X_dot)<line_sep># Check that trimming found the right samples to remove np.testing.assert_array_equal(optimizer_trimming.optimizer.trimming_array trimming_array)<line_sep># Check that the coefficients found by the optimizer with trimming # are closer to the true coefficients than the coefficients found by the # optimizer without trimming true_coef=np.array([[-2.0 0.0] [0.0 1.0]])<assert_stmt>norm(true_coef-optimizer_trimming.coef_)<l>norm(true_coef-optimizer_without_trimming.coef_)<block_end>@pytest.mark.parametrize("optimizer" [SR3 ConstrainedSR3])<def_stmt>test_sr3_disable_trimming optimizer data_linear_oscillator_corrupted<block_start>x,x_dot,_=data_linear_oscillator_corrupted<line_sep>model_plain=optimizer()<line_sep>model_plain.fit(x x_dot)<line_sep>model_trimming=optimizer(trimming_fraction=0.5)<line_sep>model_trimming.disable_trimming()<line_sep>model_trimming.fit(x x_dot)<line_sep>np.testing.assert_allclose(model_plain.coef_ model_trimming.coef_)<block_end>@pytest.mark.parametrize("optimizer" [SR3 ConstrainedSR3])<def_stmt>test_sr3_enable_trimming optimizer data_linear_oscillator_corrupted<block_start>x,x_dot,_=data_linear_oscillator_corrupted<line_sep>model_plain=optimizer()<line_sep>model_plain.enable_trimming(trimming_fraction=0.5)<line_sep>model_plain.fit(x x_dot)<line_sep>model_trimming=optimizer(trimming_fraction=0.5)<line_sep>model_trimming.fit(x x_dot)<line_sep>np.testing.assert_allclose(model_plain.coef_ model_trimming.coef_)<block_end>@pytest.mark.parametrize("optimizer" [SR3 ConstrainedSR3 TrappingSR3])<def_stmt>test_sr3_warn optimizer data_linear_oscillator_corrupted<block_start>x,x_dot,_=data_linear_oscillator_corrupted<line_sep>model=optimizer(max_iter=1 tol=1e-10)<with_stmt>pytest.warns(ConvergenceWarning)<block_start>model.fit(x x_dot)<block_end><block_end>@pytest.mark.parametrize("optimizer" [STLSQ(max_iter=1) SR3(max_iter=1) ConstrainedSR3(max_iter=1) TrappingSR3(max_iter=1) ] )<def_stmt>test_fit_warn data_derivative_1d optimizer<block_start>x,x_dot=data_derivative_1d<line_sep>x=x.reshape(-1 1)<with_stmt>pytest.warns(ConvergenceWarning)<block_start>optimizer.fit(x x_dot)<block_end><block_end>@pytest.mark.parametrize("optimizer" [ConstrainedSR3 TrappingSR3])@pytest.mark.parametrize("target_value" [0 -1 3])<def_stmt>test_row_format_constraints data_linear_combination optimizer target_value# Solution is x_dot = x.dot(np.array([[1, 1, 0], [0, 1, 1]])) <block_start>x,x_dot=data_linear_combination<line_sep>constraint_rhs=target_value<times>np.ones(2)<line_sep>constraint_lhs=np.zeros((2 x.shape[1]<times>x_dot.shape[1]))<line_sep># Should force corresponding entries of coef_ to be target_value constraint_lhs[0 0]=1<line_sep>constraint_lhs[1 3]=1<line_sep>model=optimizer(constraint_lhs=constraint_lhs constraint_rhs=constraint_rhs constraint_order="feature" )<line_sep>model.fit(x x_dot)<line_sep>np.testing.assert_allclose(np.array([model.coef_[0 0] model.coef_[1 1]]) target_value atol=1e-8)<block_end>@pytest.mark.parametrize("optimizer" [ConstrainedSR3 TrappingSR3])@pytest.mark.parametrize("target_value" [0 -1 3])<def_stmt>test_target_format_constraints data_linear_combination optimizer target_value<block_start>x,x_dot=data_linear_combination<line_sep>constraint_rhs=target_value<times>np.ones(2)<line_sep>constraint_lhs=np.zeros((2 x.shape[1]<times>x_dot.shape[1]))<line_sep># Should force corresponding entries of coef_ to be target_value constraint_lhs[0 1]=1<line_sep>constraint_lhs[1 4]=1<line_sep>model=optimizer(constraint_lhs=constraint_lhs constraint_rhs=constraint_rhs)<line_sep>model.fit(x x_dot)<line_sep>np.testing.assert_allclose(model.coef_[: 1] target_value atol=1e-8)<block_end>@pytest.mark.parametrize("thresholds" [0.005 0.05])@pytest.mark.parametrize("relax_optim" [<false> <true>])@pytest.mark.parametrize("noise_levels" [0.0 0.05 0.5])<def_stmt>test_trapping_inequality_constraints thresholds relax_optim noise_levels<block_start>t=np.arange(0 40 0.05)<line_sep>x=odeint(lorenz [-8 8 27] t)<line_sep>x=x+np.random.normal(0.0 noise_levels x.shape)<line_sep># if order is "feature" constraint_rhs=np.array([-10.0 -2.0])<line_sep>constraint_matrix=np.zeros((2 30))<line_sep>constraint_matrix[0 6]=1.0<line_sep>constraint_matrix[1 17]=1.0<line_sep>feature_names=["x" "y" "z"]<line_sep>opt=TrappingSR3(threshold=thresholds constraint_lhs=constraint_matrix constraint_rhs=constraint_rhs constraint_order="feature" inequality_constraints=<true> relax_optim=relax_optim )<line_sep>poly_lib=PolynomialLibrary(degree=2)<line_sep>model=SINDy(optimizer=opt feature_library=poly_lib differentiation_method=FiniteDifference(drop_endpoints=<true>) feature_names=feature_names )<line_sep>model.fit(x t=t[1]-t[0])<assert_stmt>np.all(np.dot(constraint_matrix (model.coefficients()).flatten("F"))<le>constraint_rhs)<or>np.allclose(np.dot(constraint_matrix (model.coefficients()).flatten("F")) constraint_rhs)<block_end><def_stmt>test_inequality_constraints_reqs <block_start>constraint_rhs=np.array([-10.0 -2.0])<line_sep>constraint_matrix=np.zeros((2 30))<line_sep>constraint_matrix[0 6]=1.0<line_sep>constraint_matrix[1 17]=1.0<with_stmt>pytest.raises(ValueError)<block_start>TrappingSR3(threshold=0.0 constraint_lhs=constraint_matrix constraint_rhs=constraint_rhs constraint_order="feature" inequality_constraints=<true> relax_optim=<true> )<block_end><block_end>
<import_from_stmt>.rman_sg_node RmanSgNode<class_stmt>RmanSgEmitter(RmanSgNode)<block_start><def_stmt>__init__ self rman_scene sg_node db_name<block_start>super().__init__(rman_scene sg_node db_name)<line_sep>self.matrix_world=<none><line_sep>self.npoints=-1<line_sep>self.render_type=''<line_sep>self.sg_particles_node=<none><block_end>@property<def_stmt>matrix_world self<block_start><return>self.__matrix_world<block_end>@matrix_world.setter<def_stmt>matrix_world self mtx<block_start>self.__matrix_world=mtx<block_end>@property<def_stmt>npoints self<block_start><return>self.__npoints<block_end>@npoints.setter<def_stmt>npoints self npoints<block_start>self.__npoints=npoints<block_end>@property<def_stmt>render_type self<block_start><return>self.__render_type<block_end>@render_type.setter<def_stmt>render_type self render_type<block_start>self.__render_type=render_type<block_end><block_end>
<import_from_future_stmt> absolute_import division print_function unicode_literals<import_from_stmt>.throughput_benchmark ThroughputBenchmark<import_stmt>os.path<as>_osp<line_sep># Set the module for a given object for nicer printing <def_stmt>set_module obj mod<block_start><if_stmt><not>isinstance(mod str)<block_start><raise>TypeError("The mod argument should be a string")<block_end>obj.__module__=mod<block_end>#: Path to folder containing CMake definitions for Torch package cmake_prefix_path=_osp.join(_osp.dirname(_osp.dirname(__file__)) 'share' 'cmake')<line_sep>
""" regex_test.py author: <NAME> date: 2019-03-16 This module samples the fulltext of the arxiv, pulls out some arxiv IDs, and then checks these IDs against valid ones in our set of metadata, producing a report of bad id's found so that we can improve the citation extraction. """<import_stmt>os<import_stmt>re<import_stmt>numpy<as>np<import_stmt>arxiv_public_data.regex_arxiv<as>ra<line_sep>RE_FLEX=re.compile(ra.REGEX_ARXIV_FLEXIBLE)<def_stmt>strip_version name<block_start><return>name.split('v')[0]<block_end><def_stmt>format_cat name<block_start>""" Strip subcategory, add hyphen to category if missing """<if_stmt>'/'<in>name# OLD ID, names contains subcategory <block_start>catsubcat,aid=name.split('/')<line_sep>cat=catsubcat.split('.')[0]<line_sep><return>ra.dashdict.get(cat cat)+"/"+aid<block_end><else_stmt><block_start><return>name<block_end><block_end><def_stmt>zeropad_1501 name<block_start>""" Arxiv IDs after yymm=1501 are padded to 5 zeros """<if_stmt><not>'/'<in>name# new ID <block_start>yymm,num=name.split('.')<if_stmt>int(yymm)<g>1500<and>len(num)<l>5<block_start><return>yymm+".0"+num<block_end><block_end><return>name<block_end><def_stmt>clean name<block_start>funcs=[strip_version format_cat zeropad_1501]<for_stmt>func funcs<block_start>name=func(name)<block_end><return>name<block_end><def_stmt>get_alltxt directory='/pool0/arxiv/full-text'<block_start>out=[]<for_stmt>root,dirs,files os.walk(directory)<block_start><for_stmt>f files<block_start><if_stmt>'txt'<in>f<block_start>out.append(os.path.join(root f))<block_end><block_end><block_end><return>out<block_end><def_stmt>sample out num<block_start><return>[out[s]<for>s np.random.randint(0 len(out)-1 num)]<block_end><def_stmt>all_matches filename pattern=RE_FLEX<block_start>out=[]<line_sep>matches=pattern.findall(open(filename 'r').read())<for_stmt>match matches<block_start><for_stmt>group match<block_start><if_stmt>group<block_start>out.append(group)<block_end><block_end><block_end><return>out<block_end><def_stmt>all_matches_context filename pattern=RE_FLEX pad=10<block_start>out=[]<line_sep>contents=open(filename 'r').read()<line_sep>match=pattern.search(contents)<while_stmt>match<is><not><none><block_start>s,e=match.start() match.end()<line_sep>out.append(contents[max(s-pad 0):e+pad])<line_sep>contents=contents[e:]<line_sep>match=pattern.search(contents)<block_end><return>out<block_end><def_stmt>test_samples samples valid_ids directory='/pool0/arxiv/full-text' pattern=RE_FLEX showpasses=<false><block_start>failures=dict()<line_sep>n_matches=0<line_sep>n_failures=0<for_stmt>i,s enumerate(samples)<block_start>matches=all_matches(s pattern)<line_sep>n_matches<augadd>len(matches)<line_sep>valid=[clean(m)<in>valid_ids<for>m matches]<if_stmt><not>all(valid)<block_start>failures[s]=all_matches_context(s RE_FLEX)<line_sep>print("{}: BAD match in {}".format(i s))<for_stmt>v,c zip(valid failures[s])<block_start><if_stmt><not>v<block_start>n_failures<augadd>1<line_sep>print("\t{}".format(c))<block_end><block_end><block_end><else_stmt><block_start><if_stmt>showpasses<block_start>print("{}: {} had no match errors".format(i s))<block_end><block_end><block_end>error_rate=n_failures/n_matches<line_sep>print("Error rate from {} matches is {}".format(n_matches error_rate))<line_sep><return>failures<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_from_stmt>arxiv_public_data.oai_metadata load_metadata<line_sep>md_file='data/oai-arxiv-metadata-2019-03-01.json.gz'<line_sep>valid_ids=[m['id']<for>m load_metadata(md_file)]<line_sep>samples=sample(get_alltxt() 10000)<line_sep>failures=test_samples(samples valid_ids)<block_end>
# RevKit: A Toolkit for Reversible Circuit Design (www.revkit.org) # Copyright (C) 2009-2011 The RevKit Developers <<EMAIL>> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. <import_from_stmt>PyQt4.QtCore SIGNAL pyqtProperty<import_from_stmt>core.BaseItem *<import_from_stmt>revkit circuit reed_muller_synthesis_func swop transformation_based_synthesis_func gate_costs quantum_costs transistor_costs<import_from_stmt>helpers.RevKitHelper *<import_from_stmt>ui.DesignerWidget DesignerWidget<import_from_stmt>ui.TransformationBasedSynthesis Ui_TransformationBasedSynthesis<class_stmt>TransformationBasedSynthesis(DesignerWidget)<block_start><def_stmt>__init__ self parent=<none><block_start>DesignerWidget.__init__(self Ui_TransformationBasedSynthesis parent)<line_sep>self.connect(self.swop SIGNAL('currentIndexChanged(int)') self.swopChanged)<block_end><def_stmt>swopChanged self index<block_start>self.cost_function.setEnabled(index<g>0)<line_sep>self.cost_function_label.setEnabled(index<g>0)<block_end><block_end>@item("Transformation-based Synthesis" requires="Truth Table" provides="Circuit" properties=["variant" "bidi_synthesis" "swop" "cost_function"] widget={'class':TransformationBasedSynthesis 'size':(300 175)})<class_stmt>TransformationBasedSynthesisItem(BaseItem)<block_start>"""This item provides the transformation-based synthesis method as well as the corresponding synthesis with output permutation method. The respective synthesis approach can be selected in the pull-down menu (in case of synthesis with output permutation additionally the optimization criteria can be defined). Furthermore, it can be specified whether bi-directional synthesis should be applied or not. After the item has been processed, the enlarged item reports the run-time needed to perform the synthesis."""<def_stmt>onCreate self<block_start>self.setState(self.CONFIGURED)<block_end><def_stmt>executeEvent self inputs<block_start>circ=circuit()<line_sep>cf=[gate_costs quantum_costs transistor_costs][int(self.cost_function)]()<line_sep>synthesis=[transformation_based_synthesis_func reed_muller_synthesis_func][int(self.variant)]<line_sep>res=swop(circ inputs[0] enable=int(self.swop)<g>0 exhaustive=int(self.swop)<eq>1 synthesis=synthesis(bidirectional=bool(int(self.bidi_synthesis))) cf=cf)<if_stmt>type(res)<eq>dict<block_start><try_stmt><block_start>circ.circuit_name=inputs[0].name<block_end><except_stmt><block_start><pass><block_end>self.widget.runtime.setText("%.2f s"%res['runtime'])<line_sep>circuit_add_runtime(circ res['runtime'])<block_end><else_stmt><block_start><return>res<block_end><return>[circ]<block_end><def_stmt>onVariantChanged self value<block_start>suffix=["TT" "RMS"][int(value)]<line_sep>self.setText("Transformation-based (%s)"%suffix)<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>numpy<as>np<import_stmt>pyworld<import_stmt>pysptk<import_from_stmt>pysptk.synthesis MLSADF<class_stmt>Synthesizer(object)<block_start>""" Speech synthesizer with several acoustic features Parameters ---------- fs: int, optional Sampling frequency Default set to 16000 fftl: int, optional Frame Length of STFT Default set to 1024 shiftms: int, optional Shift size for STFT Default set to 5 """<def_stmt>__init__ self fs=16000 fftl=1024 shiftms=5<block_start>self.fs=fs<line_sep>self.fftl=fftl<line_sep>self.shiftms=shiftms<line_sep><return><block_end><def_stmt>synthesis self f0 mcep ap rmcep=<none> alpha=0.42<block_start>"""synthesis generates waveform from F0, mcep, aperiodicity Parameters ---------- f0 : array, shape (`T`, `1`) array of F0 sequence mcep : array, shape (`T`, `dim`) array of mel-cepstrum sequence ap : array, shape (`T`, `fftlen / 2 + 1`) or (`T`, `dim_codeap`) array of aperiodicity or code aperiodicity rmcep : array, optional, shape (`T`, `dim`) array of reference mel-cepstrum sequence Default set to None alpha : int, optional Parameter of all-path transfer function Default set to 0.42 Returns ---------- wav: array, Synethesized waveform """<if_stmt>rmcep<is><not><none># power modification <block_start>mcep=mod_power(mcep rmcep alpha=alpha)<block_end><if_stmt>ap.shape[1]<l>self.fftl<floordiv>2+1# decode codeap to ap <block_start>ap=pyworld.decode_aperiodicity(ap self.fs self.fftl)<block_end># mcep into spc spc=pysptk.mc2sp(mcep alpha self.fftl)<line_sep># generate waveform using world vocoder with f0, spc, ap wav=pyworld.synthesize(f0 spc ap self.fs frame_period=self.shiftms)<line_sep><return>wav<block_end><def_stmt>synthesis_diff self x diffmcep rmcep=<none> alpha=0.42<block_start>"""filtering with a differential mel-cesptrum Parameters ---------- x : array, shape (`samples`) array of waveform sequence diffmcep : array, shape (`T`, `dim`) array of differential mel-cepstrum sequence rmcep : array, shape (`T`, `dim`) array of reference mel-cepstrum sequence Default set to None alpha : float, optional Parameter of all-path transfer function Default set to 0.42 Return ---------- wav: array, shape (`samples`) Synethesized waveform """<line_sep>x=x.astype(np.float64)<line_sep>dim=diffmcep.shape[1]-1<line_sep>shiftl=int(self.fs/1000<times>self.shiftms)<if_stmt>rmcep<is><not><none># power modification <block_start>diffmcep=mod_power(rmcep+diffmcep rmcep alpha=alpha)-rmcep<block_end>b=np.apply_along_axis(pysptk.mc2b 1 diffmcep alpha)<assert_stmt>np.isfinite(b).all()<line_sep>mlsa_fil=pysptk.synthesis.Synthesizer(MLSADF(dim alpha=alpha) shiftl)<line_sep>wav=mlsa_fil.synthesis(x b)<line_sep><return>wav<block_end><def_stmt>synthesis_spc self f0 spc ap<block_start>"""synthesis generates waveform from F0, mcep, ap Parameters ---------- f0 : array, shape (`T`, `1`) array of F0 sequence spc : array, shape (`T`, `fftl // 2 + 1`) array of mel-cepstrum sequence ap : array, shape (`T`, `fftl // 2 + 1`) array of aperiodicity Return ------ wav: vector, shape (`samples`) Synethesized waveform """<line_sep># generate waveform using world vocoder with f0, spc, ap wav=pyworld.synthesize(f0 spc ap self.fs frame_period=self.shiftms)<line_sep><return>wav<block_end><block_end><def_stmt>mod_power cvmcep rmcep alpha=0.42 irlen=1024<block_start>"""Power modification based on inpulse responce Parameters ---------- cvmcep : array, shape (`T`, `dim`) array of converted mel-cepstrum rmcep : array, shape (`T`, `dim`) array of reference mel-cepstrum alpha : float, optional All-path filter transfer function Default set to 0.42 irlen : int, optional Length for IIR filter Default set to 1024 Return ------ modified_cvmcep : array, shape (`T`, `dim`) array of power modified converted mel-cepstrum """<if_stmt>rmcep.shape<ne>cvmcep.shape<block_start><raise>ValueError("The shapes of the converted and \ reference mel-cepstrum are different: \ {} / {}".format(cvmcep.shape rmcep.shape))<block_end>cv_e=pysptk.mc2e(cvmcep alpha=alpha irlen=irlen)<line_sep>r_e=pysptk.mc2e(rmcep alpha=alpha irlen=irlen)<line_sep>dpow=np.log(r_e/cv_e)/2<line_sep>modified_cvmcep=np.copy(cvmcep)<line_sep>modified_cvmcep[: 0]<augadd>dpow<line_sep><return>modified_cvmcep<block_end>
"""Function to work on a population in dynamic mode."""<import_stmt>sys<import_from_stmt>redis StrictRedis<import_stmt>cloudpickle<as>pickle<import_from_stmt>time sleep time<import_stmt>logging<import_from_stmt>..util any_particle_preliminary<import_from_stmt>.cmd N_EVAL N_ACC N_REQ N_FAIL ALL_ACCEPTED N_WORKER N_LOOKAHEAD_EVAL SSA QUEUE BATCH_SIZE IS_LOOK_AHEAD ANALYSIS_ID MAX_N_EVAL_LOOK_AHEAD SLEEP_TIME DONE_IXS idfy <import_from_stmt>.cli KillHandler<line_sep>logger=logging.getLogger("ABC.Sampler")<def_stmt>work_on_population_dynamic analysis_id:str t:int redis:StrictRedis catch:bool start_time:float max_runtime_s:float kill_handler:KillHandler<block_start>"""Work on population in dynamic mode. Here the actual sampling happens. """<line_sep># short-form ana_id=analysis_id<def_stmt>get_int var:str<block_start>"""Convenience function to read an int variable."""<line_sep><return>int(redis.get(idfy(var ana_id t)).decode())<block_end># set timers population_start_time=time()<line_sep>cumulative_simulation_time=0<line_sep># read from pipeline pipeline=redis.pipeline()<line_sep># extract bytes (ssa_b batch_size_b all_accepted_b is_look_ahead_b max_eval_look_ahead_b)=(pipeline.get(idfy(SSA ana_id t)).get(idfy(BATCH_SIZE ana_id t)).get(idfy(ALL_ACCEPTED ana_id t)).get(idfy(IS_LOOK_AHEAD ana_id t)).get(idfy(MAX_N_EVAL_LOOK_AHEAD ana_id t)).execute())<line_sep># if the ssa object does not exist, something went wrong, return <if_stmt>ssa_b<is><none><block_start><return><block_end># notify sign up as worker n_worker=redis.incr(idfy(N_WORKER ana_id t))<line_sep>logger.info(f"Begin generation {t}, I am worker {n_worker}")<line_sep># only allow stopping the worker at particular points kill_handler.exit=<false><line_sep># convert from bytes simulate_one,sample_factory=pickle.loads(ssa_b)<line_sep>batch_size=int(batch_size_b.decode())<line_sep>all_accepted=bool(int(all_accepted_b.decode()))<line_sep>is_look_ahead=bool(int(is_look_ahead_b.decode()))<line_sep>max_n_eval_look_ahead=float(max_eval_look_ahead_b.decode())<line_sep># counter for number of simulations internal_counter=0<line_sep># create empty sample sample=sample_factory(is_look_ahead=is_look_ahead)<line_sep># loop until no more particles required # all numbers are re-loaded in each iteration as they can dynamically # update <while_stmt>get_int(N_ACC)<l>get_int(N_REQ)<and>(<not>all_accepted<or>get_int(N_EVAL)-get_int(N_FAIL)<l>get_int(N_REQ))# check whether the process was externally asked to stop <block_start><if_stmt>kill_handler.killed<block_start>logger.info(f"Worker {n_worker} received stop signal. "<concat>"Terminating in the middle of a population "<concat>f"after {internal_counter} samples.")<line_sep># notify quit redis.decr(idfy(N_WORKER ana_id t))<line_sep>sys.exit(0)<block_end># check whether time's up current_runtime=time()-start_time<if_stmt>current_runtime<g>max_runtime_s<block_start>logger.info(f"Worker {n_worker} stops during population because "<concat>f"runtime {current_runtime} exceeds "<concat>f"max runtime {max_runtime_s}")<line_sep># notify quit redis.decr(idfy(N_WORKER ana_id t))<line_sep># return to task queue <return><block_end># check whether the analysis was terminated or replaced by a new one ana_id_new_b=redis.get(ANALYSIS_ID)<if_stmt>ana_id_new_b<is><none><or>str(ana_id_new_b.decode())<ne>ana_id<block_start>logger.info(f"Worker {n_worker} stops during population because "<concat>"the analysis seems to have been stopped.")<line_sep># notify quit redis.decr(idfy(N_WORKER ana_id t))<line_sep># return to task queue <return><block_end># check if the analysis left the look-ahead mode <if_stmt>is_look_ahead<and><not>bool(int(redis.get(idfy(IS_LOOK_AHEAD ana_id t)).decode()))# reload SSA object <block_start>ssa_b=redis.get(idfy(SSA ana_id t))<line_sep>simulate_one,sample_factory=pickle.loads(ssa_b)<line_sep># cache is_look_ahead=<false><line_sep># create new empty sample for clean split sample=sample_factory(is_look_ahead=is_look_ahead)<block_end># check if in look-ahead mode and should sleep <if_stmt>is_look_ahead<and>get_int(N_EVAL)<ge>max_n_eval_look_ahead# sleep ... seconds <block_start>sleep(SLEEP_TIME)<line_sep><continue><block_end># increase global evaluation counter (before simulation!) particle_max_id:int=redis.incr(idfy(N_EVAL ana_id t) batch_size)<if_stmt>is_look_ahead# increment look-ahead evaluation counter <block_start>redis.incr(idfy(N_LOOKAHEAD_EVAL ana_id t) batch_size)<block_end># timer for current simulation until batch_size acceptances this_sim_start=time()<line_sep># collect accepted particles accepted_samples=[]<line_sep># whether any particle in this iteration is preliminary any_prel=<false><line_sep># make batch_size attempts <for_stmt>n_batched range(batch_size)# increase evaluation counter <block_start>internal_counter<augadd>1<try_stmt># simulate <block_start>new_sim=simulate_one()<block_end><except_stmt>Exception<as>e<block_start>logger.warning(f"Redis worker number {n_worker} failed. "<concat>f"Error message is: {e}")<line_sep># increment the failure counter redis.incr(idfy(N_FAIL ana_id t) 1)<if_stmt><not>catch<block_start><raise>e<block_end><continue><block_end># append to current sample sample.append(new_sim)<line_sep># check for acceptance <if_stmt>new_sim.accepted# The order of the IDs is reversed, but this does not # matter. Important is only that the IDs are specified # before the simulation starts # append to accepted list <block_start>accepted_samples.append(pickle.dumps((particle_max_id-n_batched sample)))<line_sep>any_prel=any_prel<or>any_particle_preliminary(sample)<line_sep># initialize new sample sample=sample_factory(is_look_ahead=is_look_ahead)<block_end><block_end># update total simulation-specific time cumulative_simulation_time<augadd>time()-this_sim_start<line_sep># new pipeline pipeline=redis.pipeline()<line_sep># push to pipeline if at least one sample got accepted <if_stmt>len(accepted_samples)<g>0# update particles counter if nothing is preliminary, # otherwise final acceptance is done by the sampler <block_start><if_stmt><not>any_prel<block_start>pipeline.incr(idfy(N_ACC ana_id t) len(accepted_samples))<block_end># note: samples are appended 1-by-1 pipeline.rpush(idfy(QUEUE ana_id t) *accepted_samples)<block_end># append to list of done simulations pipeline.rpush(idfy(DONE_IXS ana_id t) *range(particle_max_id-batch_size+1 particle_max_id+1) )<line_sep># execute all commands pipeline.execute()<block_end># end of sampling loop # notify quit redis.decr(idfy(N_WORKER ana_id t))<line_sep>kill_handler.exit=<true><line_sep>population_total_time=time()-population_start_time<line_sep>logger.info(f"Finished generation {t}, did {internal_counter} samples. "<concat>f"Simulation time: {cumulative_simulation_time:.2f}s, "<concat>f"total time {population_total_time:.2f}.")<block_end>
"""Particle Swarm Optimization-based algorithms. """<import_stmt>copy<import_stmt>numpy<as>np<import_stmt>opytimizer.math.random<as>r<import_stmt>opytimizer.utils.constant<as>c<import_stmt>opytimizer.utils.exception<as>e<import_stmt>opytimizer.utils.logging<as>l<import_from_stmt>opytimizer.core Optimizer<line_sep>logger=l.get_logger(__name__)<class_stmt>PSO(Optimizer)<block_start>"""A PSO class, inherited from Optimizer. This is the designed class to define PSO-related variables and methods. References: <NAME>, <NAME> and <NAME>. Swarm intelligence. Artificial Intelligence (2001). """<def_stmt>__init__ self params=<none><block_start>"""Initialization method. Args: params (dict): Contains key-value parameters to the meta-heuristics. """<line_sep>logger.info('Overriding class: Optimizer -> PSO.')<line_sep># Overrides its parent class with the receiving params super(PSO self).__init__()<line_sep># Inertia weight self.w=0.7<line_sep># Cognitive constant self.c1=1.7<line_sep># Social constant self.c2=1.7<line_sep># Builds the class self.build(params)<line_sep>logger.info('Class overrided.')<block_end>@property<def_stmt>w self<block_start>"""float: Inertia weight. """<line_sep><return>self._w<block_end>@w.setter<def_stmt>w self w<block_start><if_stmt><not>isinstance(w (float int))<block_start><raise>e.TypeError('`w` should be a float or integer')<block_end><if_stmt>w<l>0<block_start><raise>e.ValueError('`w` should be >= 0')<block_end>self._w=w<block_end>@property<def_stmt>c1 self<block_start>"""float: Cognitive constant. """<line_sep><return>self._c1<block_end>@c1.setter<def_stmt>c1 self c1<block_start><if_stmt><not>isinstance(c1 (float int))<block_start><raise>e.TypeError('`c1` should be a float or integer')<block_end><if_stmt>c1<l>0<block_start><raise>e.ValueError('`c1` should be >= 0')<block_end>self._c1=c1<block_end>@property<def_stmt>c2 self<block_start>"""float: Social constant. """<line_sep><return>self._c2<block_end>@c2.setter<def_stmt>c2 self c2<block_start><if_stmt><not>isinstance(c2 (float int))<block_start><raise>e.TypeError('`c2` should be a float or integer')<block_end><if_stmt>c2<l>0<block_start><raise>e.ValueError('`c2` should be >= 0')<block_end>self._c2=c2<block_end>@property<def_stmt>local_position self<block_start>"""np.array: Array of velocities. """<line_sep><return>self._local_position<block_end>@local_position.setter<def_stmt>local_position self local_position<block_start><if_stmt><not>isinstance(local_position np.ndarray)<block_start><raise>e.TypeError('`local_position` should be a numpy array')<block_end>self._local_position=local_position<block_end>@property<def_stmt>velocity self<block_start>"""np.array: Array of velocities. """<line_sep><return>self._velocity<block_end>@velocity.setter<def_stmt>velocity self velocity<block_start><if_stmt><not>isinstance(velocity np.ndarray)<block_start><raise>e.TypeError('`velocity` should be a numpy array')<block_end>self._velocity=velocity<block_end><def_stmt>compile self space<block_start>"""Compiles additional information that is used by this optimizer. Args: space (Space): A Space object containing meta-information. """<line_sep># Arrays of local positions and velocities self.local_position=np.zeros((space.n_agents space.n_variables space.n_dimensions))<line_sep>self.velocity=np.zeros((space.n_agents space.n_variables space.n_dimensions))<block_end><def_stmt>evaluate self space function<block_start>"""Evaluates the search space according to the objective function. Args: space (Space): A Space object that will be evaluated. function (Function): A Function object that will be used as the objective function. """<line_sep># Iterates through all agents <for_stmt>i,agent enumerate(space.agents)# Calculates the fitness value of current agent <block_start>fit=function(agent.position)<line_sep># If fitness is better than agent's best fit <if_stmt>fit<l>agent.fit# Updates its current fitness to the newer one <block_start>agent.fit=fit<line_sep># Also updates the local best position to current's agent position self.local_position[i]=copy.deepcopy(agent.position)<block_end># If agent's fitness is better than global fitness <if_stmt>agent.fit<l>space.best_agent.fit# Makes a deep copy of agent's local best position and fitness to the best agent <block_start>space.best_agent.position=copy.deepcopy(self.local_position[i])<line_sep>space.best_agent.fit=copy.deepcopy(agent.fit)<block_end><block_end><block_end><def_stmt>update self space<block_start>"""Wraps Particle Swarm Optimization over all agents and variables. Args: space (Space): Space containing agents and update-related information. """<line_sep># Iterates through all agents <for_stmt>i,agent enumerate(space.agents)# Generates random numbers <block_start>r1=r.generate_uniform_random_number()<line_sep>r2=r.generate_uniform_random_number()<line_sep># Updates agent's velocity (p. 294) self.velocity[i]=self.w<times>self.velocity[i]+self.c1<times>r1<times>(self.local_position[i]-agent.position)+self.c2<times>r2<times>(space.best_agent.position-agent.position)<line_sep># Updates agent's position (p. 294) agent.position<augadd>self.velocity[i]<block_end><block_end><block_end><class_stmt>AIWPSO(PSO)<block_start>"""An AIWPSO class, inherited from PSO. This is the designed class to define AIWPSO-related variables and methods. References: <NAME>, <NAME> and <NAME>. A novel particle swarm optimization algorithm with adaptive inertia weight. Applied Soft Computing (2011). """<def_stmt>__init__ self params=<none><block_start>"""Initialization method. Args: params (dict): Contains key-value parameters to the meta-heuristics. """<line_sep>logger.info('Overriding class: PSO -> AIWPSO.')<line_sep># Minimum inertia weight self.w_min=0.1<line_sep># Maximum inertia weight self.w_max=0.9<line_sep># Overrides its parent class with the receiving params super(AIWPSO self).__init__(params)<line_sep>logger.info('Class overrided.')<block_end>@property<def_stmt>w_min self<block_start>"""float: Minimum inertia weight. """<line_sep><return>self._w_min<block_end>@w_min.setter<def_stmt>w_min self w_min<block_start><if_stmt><not>isinstance(w_min (float int))<block_start><raise>e.TypeError('`w_min` should be a float or integer')<block_end><if_stmt>w_min<l>0<block_start><raise>e.ValueError('`w_min` should be >= 0')<block_end>self._w_min=w_min<block_end>@property<def_stmt>w_max self<block_start>"""float: Maximum inertia weight. """<line_sep><return>self._w_max<block_end>@w_max.setter<def_stmt>w_max self w_max<block_start><if_stmt><not>isinstance(w_max (float int))<block_start><raise>e.TypeError('`w_max` should be a float or integer')<block_end><if_stmt>w_max<l>0<block_start><raise>e.ValueError('`w_max` should be >= 0')<block_end><if_stmt>w_max<l>self.w_min<block_start><raise>e.ValueError('`w_max` should be >= `w_min`')<block_end>self._w_max=w_max<block_end>@property<def_stmt>fitness self<block_start>"""list: List of fitnesses. """<line_sep><return>self._fitness<block_end>@fitness.setter<def_stmt>fitness self fitness<block_start><if_stmt><not>isinstance(fitness list)<block_start><raise>e.TypeError('`fitness` should be a list')<block_end>self._fitness=fitness<block_end><def_stmt>_compute_success self agents<block_start>"""Computes the particles' success for updating inertia weight (eq. 16). Args: agents (list): List of agents. """<line_sep># Initial counter p=0<line_sep># Iterates through every agent <for_stmt>i,agent enumerate(agents)# If current agent fitness is smaller than its best <block_start><if_stmt>agent.fit<l>self.fitness[i]# Increments the counter <block_start>p<augadd>1<block_end># Replaces fitness with current agent's fitness self.fitness[i]=agent.fit<block_end># Update inertia weight value self.w=(self.w_max-self.w_min)<times>(p/len(agents))+self.w_min<block_end><def_stmt>update self space iteration<block_start>"""Wraps Adaptive Inertia Weight Particle Swarm Optimization over all agents and variables. Args: space (Space): Space containing agents and update-related information. iteration (int): Current iteration. """<line_sep># Checks if it is the first iteration <if_stmt>iteration<eq>0# Creates a list of initial fitnesses <block_start>self.fitness=[agent.fit<for>agent space.agents]<block_end># Iterates through all agents <for_stmt>i,agent enumerate(space.agents)# Generates random numbers <block_start>r1=r.generate_uniform_random_number()<line_sep>r2=r.generate_uniform_random_number()<line_sep># Updates agent's velocity self.velocity[i]=self.w<times>self.velocity[i]+self.c1<times>r1<times>(self.local_position[i]-agent.position)+self.c2<times>r2<times>(space.best_agent.position-agent.position)<line_sep># Updates agent's position agent.position<augadd>self.velocity[i]<block_end># Computing particle's success and updating inertia weight self._compute_success(space.agents)<block_end><block_end><class_stmt>RPSO(PSO)<block_start>"""An RPSO class, inherited from Optimizer. This is the designed class to define RPSO-related variables and methods. References: <NAME>, <NAME>, <NAME>, <NAME> and <NAME>. Harnessing Particle Swarm Optimization Through Relativistic Velocity. IEEE Congress on Evolutionary Computation (2020). """<def_stmt>__init__ self params=<none><block_start>"""Initialization method. Args: params (dict): Contains key-value parameters to the meta-heuristics. """<line_sep>logger.info('Overriding class: PSO -> RPSO.')<line_sep># Overrides its parent class with the receiving params super(RPSO self).__init__(params)<line_sep>logger.info('Class overrided.')<block_end>@property<def_stmt>mass self<block_start>"""np.array: Array of masses. """<line_sep><return>self._mass<block_end>@mass.setter<def_stmt>mass self mass<block_start><if_stmt><not>isinstance(mass np.ndarray)<block_start><raise>e.TypeError('`mass` should be a numpy array')<block_end>self._mass=mass<block_end><def_stmt>compile self space<block_start>"""Compiles additional information that is used by this optimizer. Args: space (Space): A Space object containing meta-information. """<line_sep># Arrays of local positions, velocities and masses self.local_position=np.zeros((space.n_agents space.n_variables space.n_dimensions))<line_sep>self.velocity=np.zeros((space.n_agents space.n_variables space.n_dimensions))<line_sep>self.mass=r.generate_uniform_random_number(size=(space.n_agents space.n_variables space.n_dimensions))<block_end><def_stmt>update self space<block_start>"""Wraps Relativistic Particle Swarm Optimization over all agents and variables. Args: space (Space): Space containing agents and update-related information. """<line_sep># Calculates the maximum velocity max_velocity=np.max(self.velocity)<line_sep># Iterates through all agents <for_stmt>i,agent enumerate(space.agents)# Generates rnadom number <block_start>r1=r.generate_uniform_random_number()<line_sep>r2=r.generate_uniform_random_number()<line_sep># Updates current agent velocity (eq. 11) gamma=1/np.sqrt(1-(max_velocity<power>2/c.LIGHT_SPEED<power>2))<line_sep>self.velocity[i]=self.mass[i]<times>self.velocity[i]<times>gamma+self.c1<times>r1<times>(self.local_position[i]-agent.position)+self.c2<times>r2<times>(space.best_agent.position-agent.position)<line_sep># Updates current agent position agent.position<augadd>self.velocity[i]<block_end><block_end><block_end><class_stmt>SAVPSO(PSO)<block_start>"""An SAVPSO class, inherited from Optimizer. This is the designed class to define SAVPSO-related variables and methods. References: <NAME> and <NAME>. Self-adaptive velocity particle swarm optimization for solving constrained optimization problems. Journal of global optimization (2008). """<def_stmt>__init__ self params=<none><block_start>"""Initialization method. Args: params (dict): Contains key-value parameters to the meta-heuristics. """<line_sep>logger.info('Overriding class: PSO -> SAVPSO.')<line_sep># Overrides its parent class with the receiving params super(SAVPSO self).__init__(params)<line_sep>logger.info('Class overrided.')<block_end><def_stmt>update self space<block_start>"""Wraps Self-adaptive Velocity Particle Swarm Optimization over all agents and variables. Args: space (Space): Space containing agents and update-related information. """<line_sep># Creates an array of positions positions=np.zeros((space.agents[0].position.shape[0] space.agents[0].position.shape[1]))<line_sep># For every agent <for_stmt>agent space.agents# Sums up its position <block_start>positions<augadd>agent.position<block_end># Divides by the number of agents positions<augdiv>len(space.agents)<line_sep># Iterates through all agents <for_stmt>i,agent enumerate(space.agents)# Generates a random index for selecting an agent <block_start>idx=r.generate_integer_random_number(0 len(space.agents))<line_sep># Updates current agent's velocity (eq. 8) r1=r.generate_uniform_random_number()<line_sep>self.velocity[i]=self.w<times>np.fabs(self.local_position[idx]-self.local_position[i])<times>np.sign(self.velocity[i])+r1<times>(self.local_position[i]-agent.position)+(1-r1)<times>(space.best_agent.position-agent.position)<line_sep># Updates current agent's position agent.position<augadd>self.velocity[i]<line_sep># For every decision variable <for_stmt>j range(agent.n_variables)# Generates a random number <block_start>r4=r.generate_uniform_random_number(0 1)<line_sep># If position is greater than upper bound <if_stmt>agent.position[j]<g>agent.ub[j]# Replaces its value <block_start>agent.position[j]=positions[j]+1<times>r4<times>(agent.ub[j]-positions[j])<block_end># If position is smaller than lower bound <if_stmt>agent.position[j]<l>agent.lb[j]# Replaces its value <block_start>agent.position[j]=positions[j]+1<times>r4<times>(agent.lb[j]-positions[j])<block_end><block_end><block_end><block_end><block_end><class_stmt>VPSO(PSO)<block_start>"""A VPSO class, inherited from Optimizer. This is the designed class to define VPSO-related variables and methods. References: <NAME>. Vertical particle swarm optimization algorithm and its application in soft-sensor modeling. International Conference on Machine Learning and Cybernetics (2007). """<def_stmt>__init__ self params=<none><block_start>"""Initialization method. Args: params (dict): Contains key-value parameters to the meta-heuristics. """<line_sep>logger.info('Overriding class: PSO -> VPSO.')<line_sep># Overrides its parent class with the receiving params super(VPSO self).__init__(params)<line_sep>logger.info('Class overrided.')<block_end>@property<def_stmt>v_velocity self<block_start>"""np.array: Array of vertical velocities. """<line_sep><return>self._v_velocity<block_end>@v_velocity.setter<def_stmt>v_velocity self v_velocity<block_start><if_stmt><not>isinstance(v_velocity np.ndarray)<block_start><raise>e.TypeError('`v_velocity` should be a numpy array')<block_end>self._v_velocity=v_velocity<block_end><def_stmt>compile self space<block_start>"""Compiles additional information that is used by this optimizer. Args: space (Space): A Space object containing meta-information. """<line_sep># Arrays of local positions, velocities and vertical velocities self.local_position=np.zeros((space.n_agents space.n_variables space.n_dimensions))<line_sep>self.velocity=np.zeros((space.n_agents space.n_variables space.n_dimensions))<line_sep>self.v_velocity=np.ones((space.n_agents space.n_variables space.n_dimensions))<block_end><def_stmt>update self space<block_start>"""Wraps Vertical Particle Swarm Optimization over all agents and variables. Args: space (Space): Space containing agents and update-related information. """<line_sep># Iterates through all agents <for_stmt>i,agent enumerate(space.agents)# Generates uniform random numbers <block_start>r1=r.generate_uniform_random_number()<line_sep>r2=r.generate_uniform_random_number()<line_sep># Updates current agent velocity (eq. 3) self.velocity[i]=self.w<times>self.velocity[i]+self.c1<times>r1<times>(self.local_position[i]-agent.position)+self.c2<times>r2<times>(space.best_agent.position-agent.position)<line_sep># Updates current agent vertical velocity (eq. 4) self.v_velocity[i]<augsub>(np.dot(self.velocity[i].T self.v_velocity[i])/(np.dot(self.velocity[i].T self.velocity[i])+c.EPSILON))<times>self.velocity[i]<line_sep># Updates current agent position (eq. 5) r1=r.generate_uniform_random_number()<line_sep>agent.position<augadd>r1<times>self.velocity[i]+(1-r1)<times>self.v_velocity[i]<block_end><block_end><block_end>
""" Detection dataset Hacked together by <NAME> """<import_stmt>torch.utils.data<as>data<import_stmt>numpy<as>np<import_from_stmt>PIL Image<import_from_stmt>.parsers create_parser<class_stmt>DetectionDatset(data.Dataset)<block_start>"""`Object Detection Dataset. Use with parsers for COCO, VOC, and OpenImages. Args: parser (string, Parser): transform (callable, optional): A function/transform that takes in an PIL image and returns a transformed version. E.g, ``transforms.ToTensor`` """<def_stmt>__init__ self data_dir parser=<none> parser_kwargs=<none> transform=<none><block_start>super(DetectionDatset self).__init__()<line_sep>parser_kwargs=parser_kwargs<or>{}<line_sep>self.data_dir=data_dir<if_stmt>isinstance(parser str)<block_start>self._parser=create_parser(parser **parser_kwargs)<block_end><else_stmt><block_start><assert_stmt>parser<is><not><none><and>len(parser.img_ids)<line_sep>self._parser=parser<block_end>self._transform=transform<block_end><def_stmt>__getitem__ self index<block_start>""" Args: index (int): Index Returns: tuple: Tuple (image, annotations (target)). """<line_sep>img_info=self._parser.img_infos[index]<line_sep>target=dict(img_idx=index img_size=(img_info['width'] img_info['height']))<if_stmt>self._parser.has_labels<block_start>ann=self._parser.get_ann_info(index)<line_sep>target.update(ann)<block_end>img_path=self.data_dir/img_info['file_name']<line_sep>img=Image.open(img_path).convert('RGB')<if_stmt>self.transform<is><not><none><block_start>img,target=self.transform(img target)<block_end><return>img target<block_end><def_stmt>__len__ self<block_start><return>len(self._parser.img_ids)<block_end>@property<def_stmt>parser self<block_start><return>self._parser<block_end>@property<def_stmt>transform self<block_start><return>self._transform<block_end>@transform.setter<def_stmt>transform self t<block_start>self._transform=t<block_end><block_end><class_stmt>SkipSubset(data.Dataset)<block_start>r""" Subset of a dataset at specified indices. Arguments: dataset (Dataset): The whole Dataset n (int): skip rate (select every nth) """<def_stmt>__init__ self dataset n=2<block_start>self.dataset=dataset<assert_stmt>n<ge>1<line_sep>self.indices=np.arange(len(dataset))[::n]<block_end><def_stmt>__getitem__ self idx<block_start><return>self.dataset[self.indices[idx]]<block_end><def_stmt>__len__ self<block_start><return>len(self.indices)<block_end>@property<def_stmt>parser self<block_start><return>self.dataset.parser<block_end>@property<def_stmt>transform self<block_start><return>self.dataset.transform<block_end>@transform.setter<def_stmt>transform self t<block_start>self.dataset.transform=t<block_end><block_end>
# Generated by Django 2.2.17 on 2021-01-21 17:45 <import_from_stmt>django.db migrations<line_sep>POLICY_DICT={"BLACKLIST":2 "WHITELIST":1 "SILENT_BLACKLIST":3 }<def_stmt>convert_santa_probes apps schema_editor<block_start>ProbeSource=apps.get_model("probes" "ProbeSource")<line_sep>Tag=apps.get_model("inventory" "Tag")<line_sep>Configuration=apps.get_model("santa" "Configuration")<line_sep>Rule=apps.get_model("santa" "Rule")<line_sep>Target=apps.get_model("santa" "Target")<line_sep>configurations=list(Configuration.objects.all())<if_stmt><not>configurations<block_start><return><block_end><for_stmt>ps ProbeSource.objects.filter(model="SantaProbe" status="ACTIVE")<block_start>body=ps.body<if_stmt><not>body<block_start><continue><block_end>rules=body.get("rules" [])<if_stmt><not>rules<block_start><continue><block_end>tag_ids=set([])<for_stmt>inv_filter body.get("filters" {}).get("inventory" [])<block_start><for_stmt>tag_id inv_filter.get("tag_ids" [])<block_start>tag_ids.add(tag_id)<block_end><block_end>tags=[]<if_stmt>tag_ids<block_start>tags=list(Tag.objects.filter(pk__in=tag_ids))<block_end><for_stmt>rule rules<block_start>policy=rule.get("policy")<if_stmt>policy<eq>"REMOVE"<block_start><continue><block_end>defaults={}<try_stmt><block_start>defaults["policy"]=POLICY_DICT[policy]<block_end><except_stmt>KeyError<block_start><continue><block_end>custom_msg=rule.get("custom_msg")<if_stmt>custom_msg<block_start>defaults["custom_msg"]=custom_msg<block_end><else_stmt><block_start>defaults["custom_msg"]=""<block_end>target_type=rule.get("rule_type")<if_stmt>target_type<not><in>("CERTIFICATE" "BINARY")<block_start><continue><block_end>sha256=rule.get("sha256")<if_stmt><not>sha256<block_start><continue><block_end>target,_=Target.objects.get_or_create(type=target_type sha256=sha256)<for_stmt>configuration configurations<block_start>r,_=Rule.objects.update_or_create(configuration=configuration target=target defaults=defaults)<for_stmt>tag tags<block_start>r.tags.add(tag)<block_end><block_end><block_end><block_end>ProbeSource.objects.filter(model="SantaProbe").delete()<block_end><class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('santa' '0021_delete_collectedapplication') ]<line_sep>operations=[migrations.RunPython(convert_santa_probes) ]<block_end>
# Copyright 2017 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>httplib2<import_stmt>json<import_from_stmt>oauth2client service_account# pylint: disable=no-name-in-module <import_stmt>os<import_stmt>sys<line_sep>DEFAULT_SCOPES=['https://www.googleapis.com/auth/userinfo.email']<line_sep>MILO_ENDPOINT='https://luci-milo.appspot.com/prpc/milo.Buildbot/'<line_sep>LOGDOG_ENDPOINT='https://luci-logdog.appspot.com/prpc/logdog.Logs/'<class_stmt>_MiloLogdogConfig(object)<block_start>"""Config class used to hold credentials shared by all API resquests."""<line_sep>credentials=<none><block_end><class_stmt>RequestError(Exception)<block_start><pass><block_end><def_stmt>IsOkStatus status<block_start><return>200<le>int(status)<le>299<block_end><def_stmt>SetCredentials json_keyfile scopes=<none><block_start>"""Configure the credentials used to access the milo/logdog API."""<line_sep>filepath=os.path.expanduser(json_keyfile)<if_stmt><not>os.path.isfile(filepath)<block_start>sys.stderr.write('Credentials not found: %s\n'%json_keyfile)<line_sep>sys.stderr.write('You need a json keyfile for a service account with '<concat>'milo/logdog access.\n')<line_sep>sys.exit(1)<block_end><if_stmt>scopes<is><none><block_start>scopes=DEFAULT_SCOPES<block_end>_MiloLogdogConfig.credentials=(service_account.ServiceAccountCredentials.from_json_keyfile_name(filepath scopes))<block_end><def_stmt>_Request url params<block_start><if_stmt>_MiloLogdogConfig.credentials<is><none># Try to use some default credentials if they haven't been explicitly set. <block_start>SetCredentials('~/.default_service_credentials.json')<block_end>http=_MiloLogdogConfig.credentials.authorize(httplib2.Http())<line_sep>body=json.dumps(params).encode('utf-8')<line_sep>response,content=http.request(url 'POST' body headers={'Accept':'application/json' 'Content-Type':'application/json'})<if_stmt><not>IsOkStatus(response['status'])<block_start><raise>RequestError('Server returned %s response'%response['status'])<block_end># Need to skip over the 4 characters jsonp header. <return>json.loads(content[4:].decode('utf-8'))<block_end><def_stmt>MiloRequest method params<block_start><return>_Request(MILO_ENDPOINT+method params)<block_end><def_stmt>LogdogRequest method params<block_start><return>_Request(LOGDOG_ENDPOINT+method params)<block_end>
"""Manage Treadmill app manifest. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_from_future_stmt> unicode_literals<import_stmt>logging<import_stmt>os<import_stmt>shlex<import_stmt>click<import_stmt>six<import_from_stmt>six.moves urllib_parse<import_from_stmt>treadmill cli<import_from_stmt>treadmill context<import_from_stmt>treadmill restclient<import_from_stmt>treadmill yamlwrapper<as>yaml<line_sep>_LOGGER=logging.getLogger(__name__)<line_sep>_DEFAULT_MEM='100M'<line_sep>_DEFAULT_DISK='100M'<line_sep>_DEFAULT_CPU='10%'<def_stmt>_run apis count manifest memory cpu disk tickets traits service restart_limit restart_interval endpoint debug debug_services appname command<block_start>"""Run Treadmill app."""<line_sep># too many branches # # pylint: disable=R0912 app={}<if_stmt>manifest<block_start>app=yaml.load(stream=manifest)<block_end><if_stmt>endpoint<block_start>app['endpoints']=[{'name':name 'port':port}<for>name,port endpoint]<block_end><if_stmt>tickets<block_start>app['tickets']=tickets<block_end><if_stmt>traits<block_start>app['traits']=traits<block_end><if_stmt>command<block_start><if_stmt><not>service# Take the basename of the command, always assume / on all # platforms. <block_start>service=os.path.basename(shlex.split(command[0])[0])<block_end><block_end>services_dict={svc['name']:svc<for>svc app.get('services' [])}<if_stmt>service<block_start><if_stmt>service<not><in>services_dict<block_start>services_dict[service]={'name':service 'restart':{'limit':restart_limit 'interval':restart_interval }}<block_end><if_stmt>command<block_start>services_dict[service]['command']=' '.join(list(command))<block_end><block_end><if_stmt>services_dict<block_start>app['services']=list(six.itervalues(services_dict))<block_end><if_stmt>app# Ensure defaults are set. <block_start><if_stmt>'memory'<not><in>app<block_start>app['memory']=_DEFAULT_MEM<block_end><if_stmt>'disk'<not><in>app<block_start>app['disk']=_DEFAULT_DISK<block_end><if_stmt>'cpu'<not><in>app<block_start>app['cpu']=_DEFAULT_CPU<block_end># Override if requested. <if_stmt>memory<is><not><none><block_start>app['memory']=str(memory)<block_end><if_stmt>disk<is><not><none><block_start>app['disk']=str(disk)<block_end><if_stmt>cpu<is><not><none><block_start>app['cpu']=str(cpu)<block_end><block_end>url='/instance/'+appname<line_sep>query={}<if_stmt>count<block_start>query['count']=count<block_end><if_stmt>debug<block_start>query['debug']='true'<block_end><if_stmt>debug_services<block_start>query['debug_services']=','.join(debug_services)<block_end><if_stmt>query<block_start>url='{}?{}'.format(url urllib_parse.urlencode(query))<block_end>response=restclient.post(apis url payload=app)<for_stmt>instance_id response.json()['instances']<block_start>cli.out(instance_id)<block_end><block_end><def_stmt>init <block_start>"""Return top level command handler."""<line_sep>@click.command()@click.option('--api-service-principal' required=<false> envvar='TREADMILL_API_SERVICE_PRINCIPAL' callback=cli.handle_context_opt help='API service principal for SPNEGO auth (default HTTP)' expose_value=<false>)@click.option('--cell' required=<true> envvar='TREADMILL_CELL' callback=cli.handle_context_opt expose_value=<false>)@click.option('--count' help='Number of instances to start' default=1)@click.option('-m' '--manifest' help='App manifest file (stream)' type=click.File('rb'))@click.option('--memory' help='Memory demand, default %s.'%_DEFAULT_MEM metavar='G|M' callback=cli.validate_memory)@click.option('--cpu' help='CPU demand, default %s.'%_DEFAULT_CPU metavar='XX%' callback=cli.validate_cpu)@click.option('--disk' help='Disk demand, default %s.'%_DEFAULT_DISK metavar='G|M' callback=cli.validate_disk)@click.option('--tickets' help='Tickets.' type=cli.LIST)@click.option('--traits' help='Traits.' type=cli.LIST)@click.option('--service' help='Service name.' type=str)@click.option('--restart-limit' type=int default=0 help='Service restart limit.')@click.option('--restart-interval' type=int default=60 help='Service restart limit interval.')@click.option('--endpoint' help='Network endpoint.' type=(str int) multiple=<true>)@click.option('--debug/--no-debug' help='Do not start services.' is_flag=<true> default=<false>)@click.option('--debug-services' help='Do not start specified services.' type=cli.LIST)@click.argument('appname')@click.argument('command' nargs=-1)@cli.handle_exceptions(restclient.CLI_REST_EXCEPTIONS)<def_stmt>run count manifest memory cpu disk tickets traits service restart_limit restart_interval endpoint debug debug_services appname command<block_start>"""Schedule Treadmill app. With no options, will schedule already configured app, fail if app is not configured. When manifest (or other options) are specified, they will be merged on top of existing manifest if it exists. """<line_sep>apis=context.GLOBAL.cell_api()<line_sep><return>_run(apis count manifest memory cpu disk tickets traits service restart_limit restart_interval endpoint debug debug_services appname command)<block_end><return>run<block_end>
<def_stmt>default_getter attribute=<none><block_start>"""a default method for missing renderer method for example, the support to write data in a specific file type is missing but the support to read data exists """<def_stmt>none_presenter _ **__<block_start>"""docstring is assigned a few lines down the line"""<line_sep><raise>NotImplementedError("%s getter is not defined."%attribute)<block_end>none_presenter.__doc__="%s getter is not defined."%attribute<line_sep><return>none_presenter<block_end><def_stmt>default_setter attribute=<none><block_start>"""a default method for missing parser method for example, the support to read data in a specific file type is missing but the support to write data exists """<def_stmt>none_importer _x _y **_z<block_start>"""docstring is assigned a few lines down the line"""<line_sep><raise>NotImplementedError("%s setter is not defined."%attribute)<block_end>none_importer.__doc__="%s setter is not defined."%attribute<line_sep><return>none_importer<block_end><def_stmt>make_a_property cls attribute doc_string getter_func=default_getter setter_func=default_setter <block_start>""" create custom attributes for each class """<line_sep>getter=getter_func(attribute)<line_sep>setter=setter_func(attribute)<line_sep>attribute_property=property(# note: # without fget, fset, pypy 5.4.0 crashes randomly. fget=getter fset=setter doc=doc_string )<if_stmt>"."<in>attribute<block_start>attribute=attribute.replace("." "_")<block_end><else_stmt><block_start>attribute=attribute<block_end>setattr(cls attribute attribute_property)<line_sep>setattr(cls "get_%s"%attribute getter)<line_sep>setattr(cls "set_%s"%attribute setter)<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>aliyunsdkcore.request RpcRequest<import_from_stmt>aliyunsdkmoguan_sdk.endpoint endpoint_data<class_stmt>RegisterDeviceRequest(RpcRequest)<block_start><def_stmt>__init__ self<block_start>RpcRequest.__init__(self 'moguan-sdk' '2021-04-15' 'RegisterDevice')<line_sep>self.set_method('POST')<if_stmt>hasattr(self "endpoint_map")<block_start>setattr(self "endpoint_map" endpoint_data.getEndpointMap())<block_end><if_stmt>hasattr(self "endpoint_regional")<block_start>setattr(self "endpoint_regional" endpoint_data.getEndpointRegional())<block_end><block_end><def_stmt>get_UserDeviceId self<block_start><return>self.get_body_params().get('UserDeviceId')<block_end><def_stmt>set_UserDeviceId self UserDeviceId<block_start>self.add_body_params('UserDeviceId' UserDeviceId)<block_end><def_stmt>get_Extend self<block_start><return>self.get_body_params().get('Extend')<block_end><def_stmt>set_Extend self Extend<block_start>self.add_body_params('Extend' Extend)<block_end><def_stmt>get_SdkCode self<block_start><return>self.get_body_params().get('SdkCode')<block_end><def_stmt>set_SdkCode self SdkCode<block_start>self.add_body_params('SdkCode' SdkCode)<block_end><def_stmt>get_AppKey self<block_start><return>self.get_body_params().get('AppKey')<block_end><def_stmt>set_AppKey self AppKey<block_start>self.add_body_params('AppKey' AppKey)<block_end><def_stmt>get_DeviceId self<block_start><return>self.get_body_params().get('DeviceId')<block_end><def_stmt>set_DeviceId self DeviceId<block_start>self.add_body_params('DeviceId' DeviceId)<block_end><block_end>
""" Copyright (c) 2020 COTOBA DESIGN, Inc. Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. """<import_stmt>unittest<import_stmt>os<import_from_stmt>programy.storage.stores.file.config FileStorageConfiguration<import_from_stmt>programy.storage.stores.file.engine FileStorageEngine<import_from_stmt>programy.storage.stores.file.config FileStoreConfiguration<import_from_stmt>programy.storage.factory StorageFactory<import_from_stmt>programy.rdf.collection RDFCollection<class_stmt>RDFCollectionCreationTests(unittest.TestCase)<block_start><def_stmt>test_add_collection self<block_start>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection.add_entity("ACCOUNT" "hasSize" "0" "BANIKING")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<block_end><def_stmt>test_add_multi_object_collection self<block_start>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection.add_entity("ACTOR" "ISA" "PERSON" "TEST")<line_sep>collection.add_entity("ACTOR" "ISA" "MAN" "TEST")<line_sep>self.assertTrue(collection.has_subject('ACTOR'))<line_sep>self.assertTrue(collection.has_predicate('ACTOR' 'ISA'))<line_sep>self.assertTrue(collection.has_object('ACTOR' 'ISA' "PERSON"))<line_sep>self.assertTrue(collection.has_object('ACTOR' 'ISA' "MAN"))<block_end><def_stmt>test_delete_collection_subject self<block_start>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection.add_entity("ACCOUNT" "hasSize" "0" "BANIKING")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<line_sep>collection.delete_entity("ACCOUNT")<line_sep>self.assertFalse(collection.has_subject('ACCOUNT'))<block_end><def_stmt>test_delete_collection_subject_predicate self<block_start>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection.add_entity("ACCOUNT" "hasSize" "0" "BANIKING")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<line_sep>collection.delete_entity("ACCOUNT" "hasSize")<line_sep>self.assertFalse(collection.has_subject('ACCOUNT'))<block_end><def_stmt>test_delete_collection_subject_predicate_object self<block_start>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection.add_entity("ACCOUNT" "hasSize" "0" "BANIKING")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<line_sep>collection.delete_entity("ACCOUNT" "hasSize" "0")<line_sep>self.assertFalse(collection.has_subject('ACCOUNT'))<line_sep>self.assertFalse(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertFalse(collection.has_object('ACCOUNT' 'hasSize' "0"))<block_end><def_stmt>test_delete_collection_subject_predicate_diff_object self<block_start>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection.add_entity("ACCOUNT" "hasSize" "0" "BANKING" "BANIKING")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<line_sep>collection.delete_entity("ACCOUNT" "hasSize" "1")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<block_end><def_stmt>test_delete_collection_diff_subject self<block_start>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection.add_entity("ACCOUNT" "hasSize" "0" "BANKING" "BANIKING")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<line_sep>collection.delete_entity("ACCOUNT1" "hasSize" "0")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<block_end><def_stmt>test_delete_collection_diff_predicate self<block_start>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection.add_entity("ACCOUNT" "hasSize" "0" "BANKING" "BANIKING")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<line_sep>collection.delete_entity("ACCOUNT" "hasSize1" "1")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<block_end><def_stmt>test_delete_collection_diff_predicate_none_obj self<block_start>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection.add_entity("ACCOUNT" "hasSize" "0" "BANKING" "BANIKING")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<line_sep>collection.delete_entity("ACCOUNT" "hasSize1" <none>)<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<block_end><def_stmt>test_collection_update_to_updates_file self<block_start>config=FileStorageConfiguration()<line_sep>tmpdir=os.path.dirname(__file__)+os.sep+"rdf_updates"<line_sep>config.rdf_updates_storage._dirs=[tmpdir]<line_sep>config.rdf_updates_storage._has_single_file=<true><line_sep>factory=StorageFactory()<line_sep>storage_engine=FileStorageEngine(config)<line_sep>factory._storage_engines[StorageFactory.RDF_UPDATES]=storage_engine<line_sep>factory._store_to_engine_map[StorageFactory.RDF_UPDATES]=storage_engine<line_sep>updates_engine=factory.entity_storage_engine(StorageFactory.RDF_UPDATES)<line_sep>updates_store=updates_engine.rdf_updates_store()<line_sep>updates_store.empty()<line_sep>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection._storage_factory=factory<line_sep>collection.add_entity("ACCOUNT" "hasSize" "0" "BANKING" "BANIKING")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<line_sep>collection.delete_entity("ACCOUNT" "hasSize" "0")<line_sep>self.assertFalse(collection.has_subject('ACCOUNT'))<line_sep>updates_store.empty()<block_end><def_stmt>test_collection_others self<block_start>collection=RDFCollection()<line_sep>self.assertIsNotNone(collection)<line_sep>collection.add_entity("ACCOUNT" "hasSize" "0" "BANKING" "BANIKING")<line_sep>self.assertTrue(collection.has_subject('ACCOUNT'))<line_sep>self.assertTrue(collection.has_predicate('ACCOUNT' 'hasSize'))<line_sep>self.assertTrue(collection.has_object('ACCOUNT' 'hasSize' "0"))<line_sep>self.assertIsNone(collection.storename("BANKING1"))<line_sep>self.assertEqual(0 len(collection.predicates("account1")))<line_sep>self.assertEqual(0 len(collection.objects("ACCOUNT1" "hasSize")))<line_sep>self.assertEqual(0 len(collection.objects("ACCOUNT" "hasSize1")))<line_sep>self.assertFalse(collection.has_object("ACCOUNT" "hasSize" "1"))<block_end><block_end>
<import_from_stmt>models ResNet20<import_from_stmt>models ShiftResNet20<import_from_stmt>models ResNet56<import_from_stmt>models ShiftResNet56<import_from_stmt>models ResNet110<import_from_stmt>models ShiftResNet110<import_stmt>torch<import_from_stmt>torch.autograd Variable<import_stmt>numpy<as>np<import_stmt>argparse<line_sep>all_models={'resnet20':ResNet20 'shiftresnet20':ShiftResNet20 'resnet56':ResNet56 'shiftresnet56':ShiftResNet56 'resnet110':ResNet110 'shiftresnet110':ShiftResNet110 }<line_sep>parser=argparse.ArgumentParser(description='PyTorch CIFAR10 Training')<line_sep>parser.add_argument('--arch' choices=all_models.keys() help='Architecture to count parameters for' default='shiftresnet110')<line_sep>parser.add_argument('--expansion' type=int default=1 help='expansion for shift layers')<line_sep>parser.add_argument('--reduction' type=float default=1 help='reduction for resnet')<line_sep>parser.add_argument('--reduction-mode' choices=('block' 'net' 'depthwise' 'shuffle' 'mobile') help='"block" reduces inner representation for BasicBlock, "net" reduces for all layers' default='net')<line_sep>args=parser.parse_args()<def_stmt>count_params net<block_start><return>sum([np.prod(param.size())<for>name,param net.named_parameters()])<block_end><def_stmt>count_flops net<block_start>"""Approximately count number of FLOPs"""<line_sep>dummy=Variable(torch.randn(1 3 32 32)).cuda()# size is specific to cifar10, cifar100! net.cuda().forward(dummy)<line_sep><return>net.flops()<block_end>original=all_models[args.arch.replace('shift' '')]()<line_sep>original_count=count_params(original)<line_sep>original_flops=count_flops(original)<line_sep>cls=all_models[args.arch]<assert_stmt>'shift'<not><in>args.arch<or>args.reduction<eq>1 'Only default resnet supports reductions'<if_stmt>args.reduction<ne>1<block_start>print('==> %s with reduction %.2f'%(args.arch args.reduction))<line_sep>net=cls(reduction=args.reduction reduction_mode=args.reduction_mode)<block_end><else_stmt><block_start>net=cls()<if>'shift'<not><in>args.arch<else>cls(expansion=args.expansion)<block_end>new_count=count_params(net)<line_sep>new_flops=count_flops(net)<line_sep>print('Parameters: (new) %d (original) %d (reduction) %.2f'%(new_count original_count float(original_count)/new_count))<line_sep>print('FLOPs: (new) %d (original) %d (reduction) %.2f'%(new_flops original_flops float(original_flops)/new_flops))<line_sep>
# Copyright (C) 2017-2020 <NAME> <<EMAIL>> # # This Source Code Form is subject to the terms of the Mozilla Public # License, v. 2.0. If a copy of the MPL was not distributed with this # file, you can obtain one at http://mozilla.org/MPL/2.0/. # Exhibit B is not attached; this software is compatible with the # licenses expressed under Section 1.12 of the MPL v2. <import_stmt>sys<import_stmt>tensorflow<as>tf<def_stmt>keras <block_start>"""Define a trivial module for image (28x28x1) classification. Export it as a SavedModel without even training it. Rawly serialize an uninitialized Keras Sequential model."""<line_sep>model=tf.keras.Sequential([tf.keras.layers.Conv2D(8 (3 3) strides=(2 2) padding="valid" input_shape=(28 28 1) activation=tf.nn.relu name="inputs" ) # 14x14x8 tf.keras.layers.Conv2D(16 (3 3) strides=(2 2) padding="valid" activation=tf.nn.relu) # 7x716 tf.keras.layers.Flatten() tf.keras.layers.Dense(10 name="logits") # linear ])<line_sep>tf.saved_model.save(model "output/keras")<block_end><def_stmt>tf_function <block_start><pass><block_end><def_stmt>main <block_start>tf.io.gfile.makedirs("output")<line_sep>keras()<line_sep>tf_function()<block_end><if_stmt>__name__<eq>"__main__"<block_start>sys.exit(main())<block_end>
<import_from_stmt>lxml etree<import_from_stmt>sciencebeam.transformers.xslt _to_xslt_input<class_stmt>TestToXsltInput<block_start><def_stmt>test_should_tolerate_duplicate_ids self<block_start>result:etree.ElementBase=_to_xslt_input(''' <xml> <item xml:id="id1">item 1</item> <item xml:id="id1">item 2</item> </xml> ''')<line_sep>items=result.findall('item')<assert_stmt>len(items)<eq>2<assert_stmt>[item.text<for>item items]<eq>['item 1' 'item 2']<block_end><block_end>
print(()<eq>())<line_sep>print(()<g>())<line_sep>print(()<l>())<line_sep>print(()<eq>(1 ))<line_sep>print((1 )<eq>())<line_sep>print(()<g>(1 ))<line_sep>print((1 )<g>())<line_sep>print(()<l>(1 ))<line_sep>print((1 )<l>())<line_sep>print(()<ge>(1 ))<line_sep>print((1 )<ge>())<line_sep>print(()<le>(1 ))<line_sep>print((1 )<le>())<line_sep>print((1 )<eq>(1 ))<line_sep>print((1 )<ne>(1 ))<line_sep>print((1 )<eq>(2 ))<line_sep>print((1 )<eq>(1 0 ))<line_sep>print((1 )<g>(1 ))<line_sep>print((1 )<g>(2 ))<line_sep>print((2 )<g>(1 ))<line_sep>print((1 0 )<g>(1 ))<line_sep>print((1 -1 )<g>(1 ))<line_sep>print((1 )<g>(1 0 ))<line_sep>print((1 )<g>(1 -1 ))<line_sep>print((1 )<l>(1 ))<line_sep>print((2 )<l>(1 ))<line_sep>print((1 )<l>(2 ))<line_sep>print((1 )<l>(1 0 ))<line_sep>print((1 )<l>(1 -1 ))<line_sep>print((1 0 )<l>(1 ))<line_sep>print((1 -1 )<l>(1 ))<line_sep>print((1 )<ge>(1 ))<line_sep>print((1 )<ge>(2 ))<line_sep>print((2 )<ge>(1 ))<line_sep>print((1 0 )<ge>(1 ))<line_sep>print((1 -1 )<ge>(1 ))<line_sep>print((1 )<ge>(1 0 ))<line_sep>print((1 )<ge>(1 -1 ))<line_sep>print((1 )<le>(1 ))<line_sep>print((2 )<le>(1 ))<line_sep>print((1 )<le>(2 ))<line_sep>print((1 )<le>(1 0 ))<line_sep>print((1 )<le>(1 -1 ))<line_sep>print((1 0 )<le>(1 ))<line_sep>print((1 -1 )<le>(1 ))<line_sep>print((10 0)<g>(1 1))<line_sep>print((10 0)<l>(1 1))<line_sep>print((0 0 10 0)<g>(0 0 1 1))<line_sep>print((0 0 10 0)<l>(0 0 1 1))<line_sep>
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>os<import_stmt>sys<import_stmt>numpy<as>np<import_stmt>paddle<import_from_stmt>pgl.utils.data Dataset<as>BaseDataset<import_from_stmt>pgl.utils.data Dataloader<import_stmt>pgl<import_from_stmt>pgl.utils.logger log<class_stmt>Dataset(BaseDataset)<block_start>""" Dataset for CDR(cancer drug response) """<def_stmt>__init__ self processed_data<block_start>self.data=processed_data<line_sep>self.keys=list(processed_data.keys())<line_sep>self.num_samples=len(processed_data[self.keys[0]])<block_end><def_stmt>__getitem__ self idx<block_start><return>self.data[self.keys[0]][idx] self.data[self.keys[1]][idx] self.data[self.keys[2]][idx] self.data[self.keys[3]][idx] self.data[self.keys[4]][idx]<block_end><def_stmt>get_data_loader self batch_size num_workers=1 shuffle=<false> collate_fn=<none><block_start>"""Get dataloader. Args: batch_size (int): number of data items in a batch. num_workers (int): number of parallel workers. shuffle (int): whether to shuffle yield data. collate_fn: callable function that processes batch data to a list of paddle tensor. """<line_sep><return>Dataloader(self batch_size=batch_size num_workers=num_workers shuffle=shuffle collate_fn=collate_fn)<block_end><def_stmt>__len__ self<block_start><return>self.num_samples<block_end><block_end><def_stmt>collate_fn batch_data<block_start>""" Collation function to distribute data to samples :param batch_data: batch data """<line_sep>graphs=[]<line_sep>mut,gexpr,met,Y=[] [] [] []<for_stmt>g,mu,gex,me,y batch_data<block_start>graphs.append(g)<line_sep>mut.append(mu)<line_sep>gexpr.append(gex)<line_sep>met.append(me)<line_sep>Y.append(y)<block_end><return>graphs mut gexpr met Y<block_end>
<import_stmt>sys<if_stmt>sys.version_info<l>(3 )<block_start>range=xrange<block_end><import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>scipy.linalg<as>la<import_stmt>scipy.sparse<as>sp<import_stmt>scipy.stats<as>ss<import_from_stmt>scipy.stats multivariate_normal<import_from_stmt>.. arma<import_from_stmt>.. output<as>op<import_from_stmt>.. tests<as>tst<import_from_stmt>.. tsm<as>tsm<import_from_stmt>.. data_check<as>dc<import_from_stmt>.kernels *<class_stmt>GPNARX(tsm.TSM)<block_start>""" Inherits time series methods from TSM class. **** GAUSSIAN PROCESS NONLINEAR AUTOREGRESSIVE (GP-NARX) MODELS **** Parameters ---------- data : pd.DataFrame or np.array Field to specify the time series data that will be used. ar : int Field to specify how many AR terms the model will have. kernel : kernel object For example, SquaredExponential() or OrnsteinUhlenbeck() integ : int (default : 0) Specifies how many time to difference the time series. target : str (pd.DataFrame) or int (np.array) Specifies which column name or array index to use. By default, first column/array will be selected as the dependent variable. """<def_stmt>__init__ self data ar kernel integ=0 target=<none># Initialize TSM object <block_start>super(GPNARX self).__init__('GPNARX')<line_sep># Latent variables self.ar=ar<if_stmt>ar<l>1<block_start><raise>ValueError('Cannot have less than 1 AR term!')<block_end>self.integ=integ<line_sep>self.max_lag=self.ar<line_sep>self.model_name='GPNARX('+str(self.ar)+')'<line_sep>self._z_hide=0# Whether to cutoff variance latent variables from results self.supported_methods=["MLE" "PML" "Laplace" "M-H" "BBVI"]<line_sep>self.default_method="MLE"<line_sep>self.multivariate_model=<false><line_sep># Format the data self.data,self.data_name,self.is_pandas,self.index=dc.data_check(data target)<line_sep>self.data_original=self.data.copy()<line_sep># Difference data <for_stmt>order range(self.integ)<block_start>self.data=np.diff(self.data)<line_sep>self.data_name="Differenced "+self.data_name<block_end>self.index=self.index[self.integ:len(self.index)]<line_sep># Apply normalization self.data_full=self.data.copy()<line_sep>self.data=np.array(self.data_full[self.max_lag:self.data_full.shape[0]])# adjust for lags self._norm_mean=np.mean(self.data)<line_sep>self._norm_std=np.std(self.data)<line_sep>self.data=(self.data-self._norm_mean)/self._norm_std<line_sep>self.data_full=(self.data_full-self._norm_mean)/self._norm_std<line_sep>self.kernel=kernel<line_sep>self.kernel.X=self.X().T<line_sep># Define latent variables self._create_latent_variables()<line_sep>self.neg_loglik=self.full_neg_loglik<block_end><def_stmt>_alpha self L<block_start>""" Covariance-derived term to construct expectations. See Rasmussen & Williams. Parameters ---------- L : np.ndarray Cholesky triangular Returns ---------- np.ndarray (alpha) """<line_sep><return>la.cho_solve((L.T <true>) la.cho_solve((L <true>) np.transpose(self.data)))<block_end><def_stmt>_construct_predict self beta h<block_start>""" Creates h-step ahead forecasts for the Gaussian process Parameters ---------- beta : np.array Contains untransformed starting values for the latent variables h: int How many steps ahead to forecast Returns ---------- - predictions - variance of predictions """<line_sep># Refactor this entire code in future parm=np.array([self.latent_variables.z_list[k].prior.transform(beta[k])<for>k range(beta.shape[0])])<line_sep>Xstart=self.X().copy()<line_sep>Xstart=[i<for>i Xstart]<line_sep>predictions=np.zeros(h)<line_sep>variances=np.zeros(h)<for_stmt>step range(0 h)<block_start>Xstar=[]<for_stmt>lag range(0 self.max_lag)<block_start><if_stmt>lag<eq>0<block_start><if_stmt>step<eq>0<block_start>Xstar.append([self.data[-1]])<line_sep>Xstart[0]=np.append(Xstart[0] self.data[-1])<block_end><else_stmt><block_start>Xstar.append([predictions[step-1]])<line_sep>Xstart[0]=np.append(Xstart[0] predictions[step-1])<block_end><block_end><else_stmt><block_start>Xstar.append([Xstart[lag-1][-2]])<line_sep>Xstart[lag]=np.append(Xstart[lag] Xstart[lag-1][-2])<block_end><block_end>Kstar=self.kernel.Kstar(parm np.transpose(np.array(Xstar)))<line_sep>L=self._L(parm)<line_sep>alpha=self._alpha(L)<line_sep>predictions[step]=np.dot(np.transpose(Kstar) alpha)<line_sep>v=la.cho_solve((L <true>) Kstar)<line_sep>variances[step]=self.kernel.Kstarstar(parm np.transpose(np.array(Xstar)))-np.dot(v.T v)<block_end><return>predictions variances predictions-1.98<times>np.power(variances 0.5) predictions+1.98<times>np.power(variances 0.5)<block_end><def_stmt>_create_latent_variables self<block_start>""" Creates model latent variables Returns ---------- None (changes model attributes) """<line_sep># Create latent variables <for_stmt>no,i enumerate(self.kernel.build_latent_variables())<block_start>self.latent_variables.add_z(i[0] i[1] i[2])<line_sep>self.latent_variables.z_list[no].start=i[3]<block_end>self.z_no=len(self.kernel.build_latent_variables())<line_sep># Use an ARIMA model to find starting point for the initial noise latent variable arma_start=arma.ARIMA(self.data ar=self.ar ma=0 integ=self.integ)<line_sep>x=arma_start.fit()<line_sep>arma_starting_values=arma_start.latent_variables.get_z_values()<line_sep>self.latent_variables.z_list[0].start=np.log(np.exp(np.power(arma_starting_values[-1] 2)))<block_end><def_stmt>_L self parm<block_start>""" Creates cholesky decomposition of covariance matrix Parameters ---------- parm : np.array Contains transformed latent variables Returns ---------- The cholesky decomposition (L) of K """<line_sep><return>np.linalg.cholesky(self.kernel.K(parm)+np.identity(self.X().shape[1])<times>parm[0])<block_end><def_stmt>X self<block_start>""" Creates design matrix of variables to use in GP regression Returns ---------- The design matrix """<if_stmt>self.ar<eq>1<block_start><return>np.array([self.data_full[(self.max_lag-1):-1]])<block_end><else_stmt><block_start><for_stmt>i range(0 self.ar)<block_start>datapoint=self.data_full[(self.max_lag-i-1):-i-1]<if_stmt>i<eq>0<block_start>X=datapoint<block_end><else_stmt><block_start>X=np.vstack((X datapoint))<block_end><block_end><block_end><return>X<block_end><def_stmt>expected_values self beta<block_start>""" Expected values of the function given the covariance matrix and hyperparameters Parameters ---------- beta : np.ndarray Contains untransformed values for latent variables Returns ---------- The expected values of the function """<line_sep>parm=np.array([self.latent_variables.z_list[k].prior.transform(beta[k])<for>k range(beta.shape[0])])<line_sep>L=self._L(parm)<line_sep>alpha=self._alpha(L)<line_sep><return>np.dot(np.transpose(self.kernel.K(parm)) alpha)<block_end><def_stmt>variance_values self beta<block_start>""" Covariance matrix for the estimated function Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- Covariance matrix for the estimated function """<line_sep>parm=np.array([self.latent_variables.z_list[k].prior.transform(beta[k])<for>k range(beta.shape[0])])<line_sep>L=self._L(parm)<line_sep>v=la.cho_solve((L <true>) self.kernel.K(parm))<line_sep><return>self.kernel.K(parm)-np.dot(v.T v)<block_end><def_stmt>full_neg_loglik self beta<block_start>""" Creates the negative log marginal likelihood of the model Parameters ---------- beta : np.array Contains untransformed starting values for latent variables Returns ---------- The negative log marginal logliklihood of the model """<line_sep>parm=np.array([self.latent_variables.z_list[k].prior.transform(beta[k])<for>k range(beta.shape[0])])<line_sep>L=self._L(parm)<line_sep><return>-(-0.5<times>(np.dot(np.transpose(self.data) self._alpha(L)))-np.log(np.diag(L)).sum()-(self.data.shape[0]/2.0)<times>np.log(2.0<times>np.pi))<block_end><def_stmt>plot_fit self intervals=<true> **kwargs<block_start>""" Plots the fit of the Gaussian process model to the data Parameters ---------- beta : np.array Contains untransformed starting values for latent variables intervals : Boolean Whether to plot uncertainty intervals or not Returns ---------- None (plots the fit of the function) """<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<as>sns<line_sep>figsize=kwargs.get('figsize' (10 7))<line_sep>date_index=self.index[self.max_lag:]<line_sep>expectation=self.expected_values(self.latent_variables.get_z_values())<line_sep>variance=self.variance_values(self.latent_variables.get_z_values())<line_sep>upper=expectation+1.98<times>np.power(np.diag(variance) 0.5)<line_sep>lower=expectation-1.98<times>np.power(np.diag(variance) 0.5)<line_sep>plt.figure(figsize=figsize)<line_sep>plt.subplot(2 2 1)<line_sep>plt.title(self.data_name+" Raw")<line_sep>plt.plot(date_index self.data<times>self._norm_std+self._norm_mean 'k')<line_sep>plt.subplot(2 2 2)<line_sep>plt.title(self.data_name+" Raw and Expected")<line_sep>plt.plot(date_index self.data<times>self._norm_std+self._norm_mean 'k' alpha=0.2)<line_sep>plt.plot(date_index self.expected_values(self.latent_variables.get_z_values())<times>self._norm_std+self._norm_mean 'b')<line_sep>plt.subplot(2 2 3)<line_sep>plt.title(self.data_name+" Raw and Expected (with intervals)")<if_stmt>intervals<eq><true><block_start>plt.fill_between(date_index lower<times>self._norm_std+self._norm_mean upper<times>self._norm_std+self._norm_mean alpha=0.2)<block_end>plt.plot(date_index self.data<times>self._norm_std+self._norm_mean 'k' alpha=0.2)<line_sep>plt.plot(date_index self.expected_values(self.latent_variables.get_z_values())<times>self._norm_std+self._norm_mean 'b')<line_sep>plt.subplot(2 2 4)<line_sep>plt.title("Expected "+self.data_name+" (with intervals)")<if_stmt>intervals<eq><true><block_start>plt.fill_between(date_index lower<times>self._norm_std+self._norm_mean upper<times>self._norm_std+self._norm_mean alpha=0.2)<block_end>plt.plot(date_index self.expected_values(self.latent_variables.get_z_values())<times>self._norm_std+self._norm_mean 'b')<line_sep>plt.show()<block_end><def_stmt>plot_predict self h=5 past_values=20 intervals=<true> **kwargs<block_start>""" Plots forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? past_values : int (default : 20) How many past observations to show on the forecast graph? intervals : Boolean Would you like to show 95% prediction intervals for the forecast? Returns ---------- - Plot of the forecast - Error bars, forecasted_values, plot_values, plot_index """<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<as>sns<line_sep>figsize=kwargs.get('figsize' (10 7))<if_stmt>self.latent_variables.estimated<is><false><block_start><raise>Exception("No latent variables estimated!")<block_end><else_stmt><block_start>predictions,variance,lower,upper=self._construct_predict(self.latent_variables.get_z_values() h)<line_sep>full_predictions=np.append(self.data predictions)<line_sep>full_lower=np.append(self.data lower)<line_sep>full_upper=np.append(self.data upper)<line_sep>date_index=self.shift_dates(h)<line_sep># Plot values (how far to look back) plot_values=full_predictions[-h-past_values:]<times>self._norm_std+self._norm_mean<line_sep>plot_index=date_index[-h-past_values:]<line_sep># Lower and upper intervals lower=np.append(full_predictions[-h-1] lower)<line_sep>upper=np.append(full_predictions[-h-1] upper)<line_sep>plt.figure(figsize=figsize)<if_stmt>intervals<eq><true><block_start>plt.fill_between(date_index[-h-1:] lower<times>self._norm_std+self._norm_mean upper<times>self._norm_std+self._norm_mean alpha=0.2)<block_end>plt.plot(plot_index plot_values)<line_sep>plt.title("Forecast for "+self.data_name)<line_sep>plt.xlabel("Time")<line_sep>plt.ylabel(self.data_name)<line_sep>plt.show()<block_end><block_end><def_stmt>predict_is self h=5 fit_once=<true><block_start>""" Makes dynamic in-sample predictions with the estimated model Parameters ---------- h : int (default : 5) How many steps would you like to forecast? fit_once : boolean (default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint Returns ---------- - pd.DataFrame with predicted values """<line_sep>predictions=[]<for_stmt>t range(0 h)<block_start>x=GPNARX(ar=self.ar kernel=self.kernel integ=self.integ data=self.data_original[:-h+t])<if_stmt>fit_once<is><false><block_start>x.fit(printer=<false>)<block_end><if_stmt>t<eq>0<block_start><if_stmt>fit_once<is><true><block_start>x.fit(printer=<false>)<line_sep>saved_lvs=x.latent_variables<block_end>predictions=x.predict(1)<block_end><else_stmt><block_start><if_stmt>fit_once<is><true><block_start>x.latent_variables=saved_lvs<block_end>predictions=pd.concat([predictions x.predict(1)])<block_end><block_end>predictions.rename(columns={0:self.data_name} inplace=<true>)<line_sep>predictions.index=self.index[-h:]<line_sep><return>predictions<block_end><def_stmt>plot_predict_is self h=5 fit_once=<true> **kwargs<block_start>""" Plots forecasts with the estimated model against data (Simulated prediction with data) Parameters ---------- h : int (default : 5) How many steps to forecast fit_once : boolean (default: True) Fits only once before the in-sample prediction; if False, fits after every new datapoint Returns ---------- - Plot of the forecast against data """<import_stmt>matplotlib.pyplot<as>plt<import_stmt>seaborn<as>sns<line_sep>figsize=kwargs.get('figsize' (10 7))<line_sep>plt.figure(figsize=figsize)<line_sep>date_index=self.index[-h:]<line_sep>predictions=self.predict_is(h fit_once=fit_once)<line_sep>data=self.data[-h:]<line_sep>plt.plot(date_index data<times>self._norm_std+self._norm_mean label='Data')<line_sep>plt.plot(date_index predictions label='Predictions' c='black')<line_sep>plt.title(self.data_name)<line_sep>plt.legend(loc=2)<line_sep>plt.show()<block_end><def_stmt>predict self h=5<block_start>""" Makes forecast with the estimated model Parameters ---------- h : int (default : 5) How many steps ahead would you like to forecast? Returns ---------- - pd.DataFrame with predicted values """<if_stmt>self.latent_variables.estimated<is><false><block_start><raise>Exception("No latent variables estimated!")<block_end><else_stmt><block_start>predictions,_,_,_=self._construct_predict(self.latent_variables.get_z_values() h)<line_sep>predictions=predictions<times>self._norm_std+self._norm_mean<line_sep>date_index=self.shift_dates(h)<line_sep>result=pd.DataFrame(predictions)<line_sep>result.rename(columns={0:self.data_name} inplace=<true>)<line_sep>result.index=date_index[-h:]<line_sep><return>result<block_end><block_end><block_end>
<import_stmt>pytest<import_from_stmt>ddtrace.context Context<import_from_stmt>ddtrace.span Span<line_sep>@pytest.mark.parametrize("ctx1,ctx2" [(Context() Context()) (Context(trace_id=123) Context(trace_id=123)) (Context(trace_id=123 span_id=321 dd_origin="synthetics" sampling_priority=2) Context(trace_id=123 span_id=321 dd_origin="synthetics" sampling_priority=2) ) ] )<def_stmt>test_eq ctx1 ctx2<block_start><assert_stmt>ctx1<eq>ctx2<block_end>@pytest.mark.parametrize("ctx1,ctx2" [(Context() Span(<none> "")) (Context() <none>) (Context() object()) (<none> Context()) (Context() 5) (5 Context()) (Context(trace_id=123 span_id=321 dd_origin="synthetics" sampling_priority=2) Context(trace_id=1234 span_id=321 dd_origin="synthetics" sampling_priority=2) ) (Context(trace_id=123 span_id=321 dd_origin="synthetics" sampling_priority=2) Context(trace_id=123 span_id=3210 dd_origin="synthetics" sampling_priority=2) ) (Context(trace_id=123 span_id=321 dd_origin="synthetics" sampling_priority=2) Context(trace_id=123 span_id=321 dd_origin="synthetics1" sampling_priority=2) ) (Context(trace_id=123 span_id=321 dd_origin="synthetics" sampling_priority=2) Context(trace_id=123 span_id=321 dd_origin="synthetics" sampling_priority=0) ) ] )<def_stmt>test_not_eq ctx1 ctx2<block_start><assert_stmt>ctx1<ne>ctx2<block_end>
<import_stmt>numpy<import_from_stmt>bohrium_api _info<import_stmt>util<class_stmt>test_bh_opcodes<block_start><def_stmt>init self<block_start><for_stmt>op _info.op.values()<block_start><if_stmt>op["name"]<not><in>["identity"]<and>op['elementwise']<block_start><for_stmt>type_sig op["type_sig"]<block_start><yield>(op type_sig)<block_end><block_end><block_end><block_end>@util.add_bh107_cmd<def_stmt>test_ufunc self arg<block_start>(op type_sig)=arg<line_sep>cmd="R = bh.random.RandomState(42); "<for_stmt>i,dtype enumerate(type_sig[1:])<block_start>cmd<augadd>"a%d = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); "%(i dtype)<block_end><if_stmt>op["name"]<eq>"arccosh"<block_start>cmd<augadd>"a%d += 1;"%i<block_end>cmd<augadd>"res = M.%s("%(op["name"])<for_stmt>i range(op["nop"]-1)<block_start>cmd<augadd>"a%d, "%i<block_end>cmd=cmd[:-2]+");"<line_sep><return>cmd<block_end><block_end><class_stmt>test_bh_operators<block_start><def_stmt>init self<block_start><for_stmt>op ['+' '-' '*' '/' '//' '%' '==' '<=' '>=' '!=' '<' '>']<block_start><for_stmt>dtype ['float64' 'int64']<block_start><yield>(op dtype)<block_end><block_end><block_end>@util.add_bh107_cmd<def_stmt>test_arrays self arg<block_start>(op dtype)=arg<line_sep>cmd="R = bh.random.RandomState(42); "<line_sep>cmd<augadd>"a1 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); "%dtype<line_sep>cmd<augadd>"a2 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH) + 1; "%dtype<line_sep>cmd<augadd>"res = a1 %s a2"%op<line_sep><return>cmd<block_end>@util.add_bh107_cmd<def_stmt>test_scalar_rhs self arg<block_start>(op dtype)=arg<line_sep>cmd="R = bh.random.RandomState(42); "<line_sep>cmd<augadd>"a1 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); "%dtype<line_sep>cmd<augadd>"a2 = np.%s(42); "%dtype<line_sep>cmd<augadd>"res = a1 %s a2"%op<line_sep><return>cmd<block_end><block_end><class_stmt>test_bh_operators_lhs<block_start><def_stmt>init self<block_start><if_stmt>numpy.__version__<ge>"1.13"<block_start><for_stmt>op ['+' '-' '*' '/' '//' '%' '==' '<=' '>=' '!=' '<' '>']<block_start><for_stmt>dtype ['float64' 'int64']<block_start><yield>(op dtype)<block_end><block_end><block_end><else_stmt><block_start>print("The version of NumPy is too old (<= 1.13), ignoring test")<block_end><block_end>@util.add_bh107_cmd<def_stmt>test_scalar_lhs self arg<block_start>(op dtype)=arg<line_sep>cmd="R = bh.random.RandomState(42); "<line_sep>cmd<augadd>"a1 = np.%s(42); "%dtype<line_sep>cmd<augadd>"a2 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH) + 1; "%dtype<line_sep>cmd<augadd>"res = a1 %s a2"%op<line_sep><return>cmd<block_end><block_end><class_stmt>test_extra_binary_ops<block_start><def_stmt>init self<block_start><for_stmt>op ["true_divide" "floor_divide"]<block_start><for_stmt>dtype ["float64" "int64" "uint64"]<block_start><yield>(op dtype)<block_end><block_end><block_end>@util.add_bh107_cmd<def_stmt>test_ufunc self arg<block_start>(op dtype)=arg<line_sep>cmd="R = bh.random.RandomState(42); "<line_sep>cmd<augadd>"a0 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); "%dtype<line_sep>cmd<augadd>"a1 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); "%dtype<line_sep>cmd<augadd>"res = M.%s(a0, a1)"%op<line_sep><return>cmd<block_end><block_end><class_stmt>test_power<block_start><def_stmt>init self<block_start><for_stmt>op ["power"]<block_start><for_stmt>dtype ["float32" "float64"]<block_start><yield>(op dtype)<block_end><block_end><block_end>@util.add_bh107_cmd<def_stmt>test_ufunc self arg<block_start>(op dtype)=arg<line_sep>cmd="R = bh.random.RandomState(42); "<line_sep>cmd<augadd>"a0 = R.random_of_dtype(shape=10, dtype=np.%s, bohrium=BH); "%dtype<line_sep>cmd<augadd>"res = M.%s(a0, 1.42)"%op<line_sep><return>cmd<block_end><block_end>
# Generated by Django 3.1.8 on 2021-05-23 17:07 <import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('true_coders' '0034_auto_20210411_1726') ('chats' '0005_auto_20210523_1700') ]<line_sep>operations=[migrations.AddField(model_name='chatlog' name='coder' field=models.ForeignKey(blank=<true> default=<none> null=<true> on_delete=django.db.models.deletion.CASCADE to='true_coders.coder') ) ]<block_end>
<import_stmt>braintree<import_from_stmt>braintree.resource Resource<line_sep># NEXT_MAJOR_VERSION - rename to GooglePayCard <class_stmt>AndroidPayCard(Resource)<block_start>""" A class representing Braintree Android Pay card objects. """<def_stmt>__init__ self gateway attributes<block_start>Resource.__init__(self gateway attributes)<if_stmt>hasattr(self 'expired')<block_start>self.is_expired=self.expired<block_end><if_stmt>"subscriptions"<in>attributes<block_start>self.subscriptions=[braintree.subscription.Subscription(gateway subscription)<for>subscription self.subscriptions]<block_end><block_end>@property<def_stmt>expiration_date self<block_start><return>self.expiration_month+"/"+self.expiration_year<block_end>@property<def_stmt>last_4 self<block_start><return>self.virtual_card_last_4<block_end>@property<def_stmt>card_type self<block_start><return>self.virtual_card_type<block_end><block_end>
# A file to test if pyvm works from the command line. <def_stmt>it_works <block_start>print("Success!")<block_end>it_works()<line_sep>
# Type: Python 3 script # Author: <NAME> <<EMAIL>> # Date: Feb 28, 2020 # Notes: Put "Language.csv" in this folder and run the script. # It generates "Language_parsed.csv" for translation. # Download the latest file from "MediaInfo/Source/Resource". # Date: Mar 1, 2020 # Update: Fix bug in note adding (keyword mode). <import_stmt>csv<import_stmt>sys<import_stmt>os<line_sep>################################# ######### Configuration ######### # edit the filename if the CSV file is placed elsewhere language_file='Language.csv'<line_sep># output filename output_file='Language_parsed.csv'<line_sep># enter your language codes here # it should be one in # ar;be;bg;ca;cs;da;de;es;eu;fa;fr;gl;gr;hu;it;ja;ko;lt;nl; # pl;pt;pt-BR;ro;ru;sk;sq;sv;th;tr;uk;zh-CN;zh-HK;zh-TW;hr;hy;ka # the first one is target language # the others are reference languages (if any) # English (en) is the default source language lang_codes=['zh-CN' 'ja']<line_sep># comments file comments_file='Data_Comments.csv'<line_sep># notes file notes_file='Data_Notes.csv'<line_sep>######### Configuration ######### ################################# csv.register_dialect('MediaInfo' delimiter=';')<if_stmt><not>os.path.exists(language_file)<block_start>print('Error: Language.csv file does not exist!')<line_sep>sys.exit(1)<block_end><if_stmt><not>lang_codes<block_start>print('Error: No language code is specified!')<line_sep>sys.exit(1)<block_end>dict_comments={}<if_stmt>os.path.exists(comments_file)<block_start><with_stmt>open(comments_file 'r' encoding='utf_8_sig')<as>f<block_start>reader=csv.reader(f)<line_sep>next(reader)# skip header <for_stmt>row reader# key, type or unit, comment <block_start>dict_comments[row[0]]=[row[1] row[2]]<block_end><block_end><block_end><else_stmt><block_start>print('Info: Comments file does not exist. Ignoring comments.')<block_end>notes=<false><line_sep>dict_notes_key={}<line_sep>dict_notes_keyword={}<line_sep>dict_notes_key_keyword={}<if_stmt>os.path.exists(notes_file)<block_start>notes=<true><with_stmt>open(notes_file 'r' encoding='utf_8_sig')<as>f<block_start>reader=csv.reader(f)<line_sep>next(reader)# skip header <for_stmt>row reader<block_start>mode=row[1].lower().strip()<if_stmt>mode<eq>''<or>mode<eq>'keyword'<block_start>dict_notes_keyword[row[0].lower().strip()]=row[2]<block_end><elif_stmt>mode<eq>'key'<block_start>dict_notes_key[row[0]]=row[2]<block_end><elif_stmt>mode<eq>'key_keyword'<block_start>dict_notes_key_keyword[row[0].strip()]=row[2]<block_end><block_end><block_end><block_end><else_stmt><block_start>print('Info: Notes file does not exist. Ignoring notes.')<block_end>output_rows=[]<with_stmt>open(language_file 'r' encoding='utf_8')<as>f<block_start>reader=csv.reader(f dialect='MediaInfo')<line_sep>header=next(reader)<line_sep>index_lang=[]<line_sep>lang_codes.insert(0 'en')<for_stmt>lang_code lang_codes<block_start><if_stmt>lang_code<not><in>header<block_start>print(f'Error: Language code "{lang_code}" is not found in the language file!')<line_sep>sys.exit(1)<block_end>index_lang.append(header.index(lang_code))<block_end>row_header=['Key']<line_sep>row_header.extend(lang_codes)<if_stmt>dict_comments<block_start>row_header.extend(['Type or Unit' 'Comment'])<block_end><if_stmt>notes<block_start>row_header.append('Notes')<block_end>output_rows.append(row_header)<for_stmt>row reader<block_start>key=row[0]<line_sep>row_string=[key]<for_stmt>index index_lang<block_start><if_stmt>row[index].startswith(' :')<block_start>row_string.append(f'"{row[index]}"')<block_end><else_stmt><block_start>row_string.append(row[index])<block_end><block_end><if_stmt>dict_comments<block_start><if_stmt>key<in>dict_comments<block_start>row_string.extend(dict_comments[key])<block_end><else_stmt><block_start>row_string.extend(['' ''])<block_end><block_end><if_stmt>notes<block_start>row_notes=[]<if_stmt>key<in>dict_notes_key<block_start>row_notes.append(dict_notes_key[key])<block_end><for_stmt>key_keyword dict_notes_key_keyword<block_start><if_stmt>key_keyword<in>key<block_start>row_notes.append(dict_notes_key_keyword[key_keyword])<block_end><block_end><for_stmt>keyword dict_notes_keyword<block_start><if_stmt>keyword<in>row_string[1].lower()<block_start>row_notes.append(dict_notes_keyword[keyword])<block_end><block_end>row_string.append('; '.join(row_notes))<block_end>output_rows.append(row_string)<block_end><block_end><with_stmt>open(output_file 'w' encoding='utf_8_sig' newline='')<as>f<block_start>writer=csv.writer(f)<line_sep>writer.writerows(output_rows)<block_end>print('Info: Parsing completed!')<line_sep>
<import_stmt>kpm.platforms.kubernetes<import_stmt>kpm.formats<import_from_stmt>kpm.commands.command_base CommandBase LoadVariables<class_stmt>DeployCmd(CommandBase)<block_start>name='deploy'<line_sep>help_message="deploy a package on kubernetes"<def_stmt>__init__ self options<block_start>super(DeployCmd self).__init__(options)<line_sep>self.package=options.package<line_sep>self.registry_host=options.registry_host<line_sep>self.shards=options.shards<line_sep>self.force=options.force<line_sep>self.dry_run=options.dry_run<line_sep>self.namespace=options.namespace<line_sep>self.api_proxy=options.api_proxy<line_sep>self.version=options.version<line_sep>self.version_parts=options.version_parts<line_sep>self.tmpdir=options.tmpdir<line_sep>self.variables=options.variables<line_sep>self.target=options.platform<line_sep>self.format=options.media_type<line_sep>self.status=<none><line_sep>self._kub=<none><block_end>@classmethod<def_stmt>_add_arguments cls parser<block_start>cls._add_registryhost_option(parser)<line_sep>cls._add_mediatype_option(parser default='kpm')<line_sep>cls._add_packagename_option(parser)<line_sep>cls._add_packageversion_option(parser)<line_sep>parser.add_argument("--tmpdir" default="/tmp/" help="directory used to extract resources")<line_sep>parser.add_argument("--dry-run" action='store_true' default=<false> help="do not create the resources on kubernetes")<line_sep>parser.add_argument("--namespace" help="kubernetes namespace" default=<none>)<line_sep>parser.add_argument("--api-proxy" help="kubectl proxy url" nargs="?" const="http://localhost:8001")<line_sep>parser.add_argument("-x" "--variables" help="variables" default={} action=LoadVariables)<line_sep>parser.add_argument("--shards" help=("Shards list/dict/count: eg. --shards=5 ;"<concat>"--shards='[{\"name\": 1, \"name\": 2}]'") default=<none>)<line_sep>parser.add_argument("--force" action='store_true' default=<false> help="force upgrade, delete and recreate resources")<line_sep>parser.add_argument("--platform" default=<none> help=("[experimental] target platform to deploy"<concat>"the package: [kubernetes, docker-compose]"))<block_end><def_stmt>kub self<block_start><if_stmt>self._kub<is><none><block_start>self._kub=kpm.formats.kub_factory(self.format self.package convert_to=self.target endpoint=self.registry_host variables=self.variables namespace=self.namespace shards=self.shards version=self.version_parts)<block_end><return>self._kub<block_end><def_stmt>_call self<block_start>self.status=self.kub().deploy(dest=self.tmpdir force=self.force dry=self.dry_run proxy=self.api_proxy fmt=self.output)<block_end><def_stmt>_render_dict self<block_start><return>self.status<block_end><def_stmt>_render_console self<block_start>""" Handled by deploy """<if_stmt>self.kub().target<eq>"docker-compose"<block_start><return>self.status<block_end><return>''<block_end><block_end>
<import_stmt>PikaStdLib<line_sep>print('hello PikaScript @TC264')<line_sep>
# -*- coding: utf-8 -*- # Generated by Django 1.10.1 on 2017-06-25 17:50 <import_from_future_stmt> unicode_literals<import_from_stmt>decimal Decimal<import_stmt>django.core.validators<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>initial=<true><line_sep>dependencies=[('cadastro' '0001_initial') ('estoque' '0001_initial') ]<line_sep>operations=[migrations.CreateModel(name='CondicaoPagamento' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('descricao' models.CharField(max_length=255)) ('forma' models.CharField(choices=[('01' 'Dinheiro') ('02' 'Cheque') ('03' 'Cartão de Crédito') ('04' 'Cartão de Débito') ('05' 'Crédito Loja') ('10' 'Vale Alimentação') ('11' 'Vale Refeição') ('12' 'Vale Presente') ('13' 'Vale Combustível') ('99' 'Outros')] default='99' max_length=2)) ('n_parcelas' models.IntegerField()) ('dias_recorrencia' models.IntegerField(default=0)) ('parcela_inicial' models.IntegerField(default=0)) ] ) migrations.CreateModel(name='ItensVenda' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('quantidade' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('valor_unit' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('tipo_desconto' models.CharField(blank=<true> choices=[('0' 'Valor') ('1' 'Percentual')] max_length=1 null=<true>)) ('desconto' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('subtotal' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('inf_ad_prod' models.CharField(blank=<true> max_length=500 null=<true>)) ('valor_rateio_frete' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('valor_rateio_despesas' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('valor_rateio_seguro' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vbc_icms' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vbc_icms_st' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vbc_ipi' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vicms' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vicms_st' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vipi' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vfcp' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vicmsufdest' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vicmsufremet' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vicms_deson' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('p_icms' models.DecimalField(blank=<true> decimal_places=2 max_digits=5 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('p_icmsst' models.DecimalField(blank=<true> decimal_places=2 max_digits=5 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('p_ipi' models.DecimalField(blank=<true> decimal_places=2 max_digits=5 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vq_bcpis' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vq_bccofins' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vpis' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('vcofins' models.DecimalField(blank=<true> decimal_places=2 max_digits=13 null=<true> validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('icms_incluido_preco' models.BooleanField(default=<false>)) ('icmsst_incluido_preco' models.BooleanField(default=<false>)) ('ipi_incluido_preco' models.BooleanField(default=<false>)) ('incluir_bc_icms' models.BooleanField(default=<false>)) ('incluir_bc_icmsst' models.BooleanField(default=<false>)) ('auto_calcular_impostos' models.BooleanField(default=<true>)) ('produto' models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name='venda_produto' to='cadastro.Produto')) ] ) migrations.CreateModel(name='Pagamento' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('indice_parcela' models.IntegerField()) ('vencimento' models.DateField()) ('valor_parcela' models.DecimalField(decimal_places=2 max_digits=13 validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ] ) migrations.CreateModel(name='Venda' fields=[('id' models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name='ID')) ('ind_final' models.BooleanField(default=<false>)) ('mod_frete' models.CharField(choices=[('0' 'Por conta do emitente') ('1' 'Por conta do destinatário/remetente') ('2' 'Por conta de terceiros') ('9' 'Sem frete')] default='9' max_length=1)) ('movimentar_estoque' models.BooleanField(default=<true>)) ('data_emissao' models.DateField(blank=<true> null=<true>)) ('vendedor' models.CharField(blank=<true> max_length=255 null=<true>)) ('valor_total' models.DecimalField(decimal_places=2 default=Decimal('0.00') max_digits=13 validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('tipo_desconto' models.CharField(choices=[('0' 'Valor') ('1' 'Percentual')] default='0' max_length=1)) ('desconto' models.DecimalField(decimal_places=4 default=Decimal('0.00') max_digits=15 validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('despesas' models.DecimalField(decimal_places=2 default=Decimal('0.00') max_digits=13 validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('frete' models.DecimalField(decimal_places=2 default=Decimal('0.00') max_digits=13 validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('seguro' models.DecimalField(decimal_places=2 default=Decimal('0.00') max_digits=13 validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('impostos' models.DecimalField(decimal_places=2 default=Decimal('0.00') max_digits=13 validators=[django.core.validators.MinValueValidator(Decimal('0.00'))])) ('observacoes' models.CharField(blank=<true> max_length=1055 null=<true>)) ] ) migrations.CreateModel(name='OrcamentoVenda' fields=[('venda_ptr' models.OneToOneField(auto_created=<true> on_delete=django.db.models.deletion.CASCADE parent_link=<true> primary_key=<true> serialize=<false> to='vendas.Venda')) ('data_vencimento' models.DateField(blank=<true> null=<true>)) ('status' models.CharField(choices=[('0' 'Aberto') ('1' 'Baixado') ('2' 'Cancelado')] default='0' max_length=1)) ] bases=('vendas.venda' ) ) migrations.CreateModel(name='PedidoVenda' fields=[('venda_ptr' models.OneToOneField(auto_created=<true> on_delete=django.db.models.deletion.CASCADE parent_link=<true> primary_key=<true> serialize=<false> to='vendas.Venda')) ('data_entrega' models.DateField(blank=<true> null=<true>)) ('status' models.CharField(choices=[('0' 'Aberto') ('1' 'Faturado') ('2' 'Cancelado') ('3' 'Importado por XML')] default='0' max_length=1)) ('orcamento' models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='orcamento_pedido' to='vendas.OrcamentoVenda')) ] bases=('vendas.venda' ) ) migrations.AddField(model_name='venda' name='cliente' field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name='venda_cliente' to='cadastro.Cliente') ) migrations.AddField(model_name='venda' name='cond_pagamento' field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='venda_pagamento' to='vendas.CondicaoPagamento') ) migrations.AddField(model_name='venda' name='local_orig' field=models.ForeignKey(default=1 on_delete=django.db.models.deletion.CASCADE related_name='venda_local_estoque' to='estoque.LocalEstoque') ) migrations.AddField(model_name='venda' name='transportadora' field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.CASCADE related_name='venda_transportadora' to='cadastro.Transportadora') ) migrations.AddField(model_name='venda' name='veiculo' field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL related_name='venda_veiculo' to='cadastro.Veiculo') ) migrations.AddField(model_name='pagamento' name='venda_id' field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name='parcela_pagamento' to='vendas.Venda') ) migrations.AddField(model_name='itensvenda' name='venda_id' field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE related_name='itens_venda' to='vendas.Venda') ) ]<block_end>
""" nuScenes dev-kit. Code written by <NAME>, <NAME> and <NAME>, 2019. This code is based on: py-motmetrics at: https://github.com/cheind/py-motmetrics """<import_from_stmt>collections OrderedDict<import_from_stmt>itertools count<import_stmt>motmetrics<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<class_stmt>MOTAccumulatorCustom(motmetrics.mot.MOTAccumulator)<block_start><def_stmt>__init__ self<block_start>super().__init__()<block_end>@staticmethod<def_stmt>new_event_dataframe_with_data indices events<block_start>""" Create a new DataFrame filled with data. This version overwrites the original in MOTAccumulator achieves about 2x speedups. Params ------ indices: list list of tuples (frameid, eventid) events: list list of events where each event is a list containing 'Type', 'OId', HId', 'D' """<line_sep>idx=pd.MultiIndex.from_tuples(indices names=['FrameId' 'Event'])<line_sep>df=pd.DataFrame(events index=idx columns=['Type' 'OId' 'HId' 'D'])<line_sep><return>df<block_end>@staticmethod<def_stmt>new_event_dataframe <block_start>""" Create a new DataFrame for event tracking. """<line_sep>idx=pd.MultiIndex(levels=[[] []] codes=[[] []] names=['FrameId' 'Event'])<line_sep>cats=pd.Categorical([] categories=['RAW' 'FP' 'MISS' 'SWITCH' 'MATCH'])<line_sep>df=pd.DataFrame(OrderedDict([('Type' pd.Series(cats)) # Type of event. One of FP (false positive), MISS, SWITCH, MATCH ('OId' pd.Series(dtype=object)) # Object ID or -1 if FP. Using float as missing values will be converted to NaN anyways. ('HId' pd.Series(dtype=object)) # Hypothesis ID or NaN if MISS. Using float as missing values will be converted to NaN anyways. ('D' pd.Series(dtype=float)) # Distance or NaN when FP or MISS ]) index=idx)<line_sep><return>df<block_end>@property<def_stmt>events self<block_start><if_stmt>self.dirty_events<block_start>self.cached_events_df=MOTAccumulatorCustom.new_event_dataframe_with_data(self._indices self._events)<line_sep>self.dirty_events=<false><block_end><return>self.cached_events_df<block_end>@staticmethod<def_stmt>merge_event_dataframes dfs update_frame_indices=<true> update_oids=<true> update_hids=<true> return_mappings=<false><block_start>"""Merge dataframes. Params ------ dfs : list of pandas.DataFrame or MotAccumulator A list of event containers to merge Kwargs ------ update_frame_indices : boolean, optional Ensure that frame indices are unique in the merged container update_oids : boolean, unique Ensure that object ids are unique in the merged container update_hids : boolean, unique Ensure that hypothesis ids are unique in the merged container return_mappings : boolean, unique Whether or not to return mapping information Returns ------- df : pandas.DataFrame Merged event data frame """<line_sep>mapping_infos=[]<line_sep>new_oid=count()<line_sep>new_hid=count()<line_sep>r=MOTAccumulatorCustom.new_event_dataframe()<for_stmt>df dfs<block_start><if_stmt>isinstance(df MOTAccumulatorCustom)<block_start>df=df.events<block_end>copy=df.copy()<line_sep>infos={}<line_sep># Update index <if_stmt>update_frame_indices<block_start>next_frame_id=max(r.index.get_level_values(0).max()+1 r.index.get_level_values(0).unique().shape[0])<if_stmt>np.isnan(next_frame_id)<block_start>next_frame_id=0<block_end>copy.index=copy.index.map(<lambda>x:(x[0]+next_frame_id x[1]))<line_sep>infos['frame_offset']=next_frame_id<block_end># Update object / hypothesis ids <if_stmt>update_oids<block_start>oid_map=dict([oid str(next(new_oid))]<for>oid copy['OId'].dropna().unique())<line_sep>copy['OId']=copy['OId'].map(<lambda>x:oid_map[x] na_action='ignore')<line_sep>infos['oid_map']=oid_map<block_end><if_stmt>update_hids<block_start>hid_map=dict([hid str(next(new_hid))]<for>hid copy['HId'].dropna().unique())<line_sep>copy['HId']=copy['HId'].map(<lambda>x:hid_map[x] na_action='ignore')<line_sep>infos['hid_map']=hid_map<block_end>r=r.append(copy)<line_sep>mapping_infos.append(infos)<block_end><if_stmt>return_mappings<block_start><return>r mapping_infos<block_end><else_stmt><block_start><return>r<block_end><block_end><block_end>
# Copyright 2013 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>socket<import_stmt>unittest<import_from_stmt>telemetry.core.backends.chrome websocket<class_stmt>TestWebSocket(unittest.TestCase)<block_start><def_stmt>testExports self<block_start>self.assertNotEqual(websocket.create_connection <none>)<line_sep>self.assertNotEqual(websocket.WebSocketException <none>)<line_sep>self.assertNotEqual(websocket.WebSocketTimeoutException <none>)<block_end><def_stmt>testSockOpts self<block_start>ws=websocket.create_connection('ws://echo.websocket.org')<line_sep>self.assertNotEquals(ws.sock.getsockopt(socket.SOL_SOCKET socket.SO_REUSEADDR) 0)<line_sep>ws=websocket.create_connection('ws://echo.websocket.org' sockopt=[(socket.IPPROTO_TCP socket.TCP_NODELAY 1)])<line_sep>self.assertNotEquals(ws.sock.getsockopt(socket.SOL_SOCKET socket.SO_REUSEADDR) 0)<line_sep>self.assertNotEquals(ws.sock.getsockopt(socket.IPPROTO_TCP socket.TCP_NODELAY) 0)<block_end><block_end>
# terrascript/resource/NetApp/netapp_cloudmanager.py # Automatically generated by tools/makecode.py (24-Sep-2021 15:22:08 UTC) <import_stmt>terrascript<class_stmt>netapp_cloudmanager_aggregate(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_anf_volume(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_aws_fsx(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_cifs_server(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_connector_aws(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_connector_azure(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_connector_gcp(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_cvo_aws(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_cvo_azure(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_cvo_gcp(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_cvs_gcp_volume(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_nss_account(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_snapmirror(terrascript.Resource)<block_start><pass><block_end><class_stmt>netapp_cloudmanager_volume(terrascript.Resource)<block_start><pass><block_end>__all__=["netapp_cloudmanager_aggregate" "netapp_cloudmanager_anf_volume" "netapp_cloudmanager_aws_fsx" "netapp_cloudmanager_cifs_server" "netapp_cloudmanager_connector_aws" "netapp_cloudmanager_connector_azure" "netapp_cloudmanager_connector_gcp" "netapp_cloudmanager_cvo_aws" "netapp_cloudmanager_cvo_azure" "netapp_cloudmanager_cvo_gcp" "netapp_cloudmanager_cvs_gcp_volume" "netapp_cloudmanager_nss_account" "netapp_cloudmanager_snapmirror" "netapp_cloudmanager_volume" ]<line_sep>
''' Function: Implementation of FastSCNN Author: <NAME> '''<import_stmt>os<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torch.utils.model_zoo<as>model_zoo<import_from_stmt>.bricks BuildNormalization BuildActivation DepthwiseSeparableConv2d InvertedResidual<line_sep>'''model urls'''<line_sep>model_urls={}<line_sep>'''Pooling Pyramid Module used in PSPNet'''<class_stmt>PoolingPyramidModule(nn.ModuleList)<block_start><def_stmt>__init__ self pool_scales in_channels out_channels norm_cfg act_cfg align_corners **kwargs<block_start>super(PoolingPyramidModule self).__init__()<line_sep>self.pool_scales=pool_scales<line_sep>self.in_channels=in_channels<line_sep>self.out_channels=out_channels<line_sep>self.norm_cfg=norm_cfg<line_sep>self.act_cfg=act_cfg<line_sep>self.align_corners=align_corners<for_stmt>pool_scale pool_scales<block_start>self.append(nn.Sequential(nn.AdaptiveAvgPool2d(pool_scale) nn.Conv2d(in_channels out_channels kernel_size=1 stride=1 padding=0 bias=<false>) BuildNormalization(norm_cfg['type'] (out_channels norm_cfg['opts'])) BuildActivation(act_cfg['type'] **act_cfg['opts']) ))<block_end><block_end>'''forward'''<def_stmt>forward self x<block_start>ppm_outs=[]<for_stmt>ppm self<block_start>ppm_out=ppm(x)<line_sep>upsampled_ppm_out=F.interpolate(input=ppm_out size=x.shape[2:] mode='bilinear' align_corners=self.align_corners)<line_sep>ppm_outs.append(upsampled_ppm_out)<block_end><return>ppm_outs<block_end><block_end>'''Learning to downsample module'''<class_stmt>LearningToDownsample(nn.Module)<block_start><def_stmt>__init__ self in_channels dw_channels out_channels norm_cfg=<none> act_cfg=<none> dw_act_cfg=<none><block_start>super(LearningToDownsample self).__init__()<line_sep>self.norm_cfg=norm_cfg<line_sep>self.act_cfg=act_cfg<line_sep>self.dw_act_cfg=dw_act_cfg<line_sep>dw_channels1,dw_channels2=dw_channels<line_sep>self.conv=nn.Sequential(nn.Conv2d(in_channels dw_channels1 kernel_size=3 stride=2 padding=1 bias=<false>) BuildNormalization(norm_cfg['type'] (dw_channels1 norm_cfg['opts'])) BuildActivation(act_cfg['type'] **act_cfg['opts']) )<line_sep>self.dsconv1=DepthwiseSeparableConv2d(in_channels=dw_channels1 out_channels=dw_channels2 kernel_size=3 stride=2 padding=1 norm_cfg=self.norm_cfg act_cfg=self.act_cfg dw_act_cfg=self.dw_act_cfg )<line_sep>self.dsconv2=DepthwiseSeparableConv2d(in_channels=dw_channels2 out_channels=out_channels kernel_size=3 stride=2 padding=1 norm_cfg=self.norm_cfg act_cfg=self.act_cfg dw_act_cfg=self.dw_act_cfg )<block_end>'''forward'''<def_stmt>forward self x<block_start>x=self.conv(x)<line_sep>x=self.dsconv1(x)<line_sep>x=self.dsconv2(x)<line_sep><return>x<block_end><block_end>'''Global feature extractor module'''<class_stmt>GlobalFeatureExtractor(nn.Module)<block_start><def_stmt>__init__ self in_channels=64 block_channels=(64 96 128) out_channels=128 expand_ratio=6 num_blocks=(3 3 3) strides=(2 2 1) pool_scales=(1 2 3 6) norm_cfg=<none> act_cfg=<none> align_corners=<false><block_start>super(GlobalFeatureExtractor self).__init__()<line_sep># set attrs <assert_stmt>len(block_channels)<eq>len(num_blocks)<eq>3<line_sep>self.act_cfg=act_cfg<line_sep>self.norm_cfg=norm_cfg<line_sep># define modules self.bottleneck1=self.makelayer(in_channels block_channels[0] num_blocks[0] strides[0] expand_ratio)<line_sep>self.bottleneck2=self.makelayer(block_channels[0] block_channels[1] num_blocks[1] strides[1] expand_ratio)<line_sep>self.bottleneck3=self.makelayer(block_channels[1] block_channels[2] num_blocks[2] strides[2] expand_ratio)<line_sep>self.ppm=PoolingPyramidModule(pool_scales block_channels[2] block_channels[2]<floordiv>4 norm_cfg=self.norm_cfg act_cfg=self.act_cfg align_corners=align_corners)<line_sep>self.out=nn.Sequential(nn.Conv2d(block_channels[2]<times>2 out_channels kernel_size=3 stride=1 padding=1 bias=<false>) BuildNormalization(norm_cfg['type'] (out_channels norm_cfg['opts'])) BuildActivation(act_cfg['type'] **act_cfg['opts']) )<block_end>'''make layer'''<def_stmt>makelayer self in_channels out_channels blocks stride=1 expand_ratio=6<block_start>layers=[InvertedResidual(in_channels out_channels stride expand_ratio norm_cfg=self.norm_cfg act_cfg=self.act_cfg)]<for_stmt>i range(1 blocks)<block_start>layers.append(InvertedResidual(out_channels out_channels 1 expand_ratio norm_cfg=self.norm_cfg act_cfg=self.act_cfg))<block_end><return>nn.Sequential(*layers)<block_end>'''forward'''<def_stmt>forward self x<block_start>x=self.bottleneck1(x)<line_sep>x=self.bottleneck2(x)<line_sep>x=self.bottleneck3(x)<line_sep>x=torch.cat([x *self.ppm(x)] dim=1)<line_sep>x=self.out(x)<line_sep><return>x<block_end><block_end>'''Feature fusion module'''<class_stmt>FeatureFusionModule(nn.Module)<block_start><def_stmt>__init__ self higher_in_channels lower_in_channels out_channels norm_cfg=<none> dwconv_act_cfg=<none> conv_act_cfg=<none> align_corners=<false><block_start>super(FeatureFusionModule self).__init__()<line_sep># set attrs self.norm_cfg=norm_cfg<line_sep>self.dwconv_act_cfg=dwconv_act_cfg<line_sep>self.conv_act_cfg=conv_act_cfg<line_sep>self.align_corners=align_corners<line_sep># define modules self.dwconv=nn.Sequential(nn.Conv2d(lower_in_channels out_channels kernel_size=3 stride=1 padding=1 groups=out_channels bias=<false>) BuildNormalization(norm_cfg['type'] (out_channels norm_cfg['opts'])) BuildActivation(dwconv_act_cfg['type'] **dwconv_act_cfg['opts']) )<line_sep>self.conv_lower_res=nn.Sequential(nn.Conv2d(out_channels out_channels kernel_size=1 stride=1 padding=0 bias=<false>) BuildNormalization(norm_cfg['type'] (out_channels norm_cfg['opts'])) )<line_sep>self.conv_higher_res=nn.Sequential(nn.Conv2d(higher_in_channels out_channels kernel_size=1 stride=1 padding=0 bias=<false>) BuildNormalization(norm_cfg['type'] (out_channels norm_cfg['opts'])) )<line_sep>self.act=BuildActivation(conv_act_cfg['type'] **conv_act_cfg['opts'])<block_end>'''forward'''<def_stmt>forward self higher_res_feature lower_res_feature<block_start>lower_res_feature=F.interpolate(lower_res_feature size=higher_res_feature.size()[2:] mode='bilinear' align_corners=self.align_corners)<line_sep>lower_res_feature=self.dwconv(lower_res_feature)<line_sep>lower_res_feature=self.conv_lower_res(lower_res_feature)<line_sep>higher_res_feature=self.conv_higher_res(higher_res_feature)<line_sep>out=higher_res_feature+lower_res_feature<line_sep><return>self.act(out)<block_end><block_end>'''FastSCNN'''<class_stmt>FastSCNN(nn.Module)<block_start><def_stmt>__init__ self in_channels=3 downsample_dw_channels=(32 48) global_in_channels=64 global_block_channels=(64 96 128) global_block_strides=(2 2 1) global_out_channels=128 higher_in_channels=64 lower_in_channels=128 fusion_out_channels=128 out_indices=(0 1 2) norm_cfg=<none> act_cfg=<none> align_corners=<false> dw_act_cfg=<none> **kwargs<block_start>super(FastSCNN self).__init__()<assert_stmt>global_in_channels<eq>higher_in_channels 'Global Input Channels must be the same with Higher Input Channels...'<assert_stmt>global_out_channels<eq>lower_in_channels 'Global Output Channels must be the same with Lower Input Channels...'<line_sep># set attrs self.in_channels=in_channels<line_sep>self.downsample_dw_channels1=downsample_dw_channels[0]<line_sep>self.downsample_dw_channels2=downsample_dw_channels[1]<line_sep>self.global_in_channels=global_in_channels<line_sep>self.global_block_channels=global_block_channels<line_sep>self.global_block_strides=global_block_strides<line_sep>self.global_out_channels=global_out_channels<line_sep>self.higher_in_channels=higher_in_channels<line_sep>self.lower_in_channels=lower_in_channels<line_sep>self.fusion_out_channels=fusion_out_channels<line_sep>self.out_indices=out_indices<line_sep>self.norm_cfg=norm_cfg<line_sep>self.act_cfg=act_cfg<line_sep>self.align_corners=align_corners<line_sep>self.dw_act_cfg=dw_act_cfg<line_sep># define modules self.learning_to_downsample=LearningToDownsample(in_channels=in_channels dw_channels=downsample_dw_channels out_channels=global_in_channels norm_cfg=self.norm_cfg act_cfg=self.act_cfg dw_act_cfg=self.dw_act_cfg)<line_sep>self.global_feature_extractor=GlobalFeatureExtractor(in_channels=global_in_channels block_channels=global_block_channels out_channels=global_out_channels strides=self.global_block_strides norm_cfg=self.norm_cfg act_cfg=self.act_cfg align_corners=self.align_corners )<line_sep>self.feature_fusion=FeatureFusionModule(higher_in_channels=higher_in_channels lower_in_channels=lower_in_channels out_channels=fusion_out_channels norm_cfg=self.norm_cfg dwconv_act_cfg=self.act_cfg conv_act_cfg=self.act_cfg align_corners=self.align_corners )<block_end>'''forward'''<def_stmt>forward self x<block_start>higher_res_features=self.learning_to_downsample(x)<line_sep>lower_res_features=self.global_feature_extractor(higher_res_features)<line_sep>fusion_output=self.feature_fusion(higher_res_features lower_res_features)<line_sep>outs=[higher_res_features lower_res_features fusion_output]<line_sep>outs=[outs[i]<for>i self.out_indices]<line_sep><return>tuple(outs)<block_end><block_end>'''build fastscnn'''<def_stmt>BuildFastSCNN fastscnn_type=<none> **kwargs# assert whether support <block_start><assert_stmt>fastscnn_type<is><none><line_sep># parse args default_args={'in_channels':3 'downsample_dw_channels':(32 48) 'global_in_channels':64 'global_block_channels':(64 96 128) 'global_block_strides':(2 2 1) 'global_out_channels':128 'higher_in_channels':64 'lower_in_channels':128 'fusion_out_channels':128 'out_indices':(0 1 2) 'norm_cfg':<none> 'act_cfg':{'type':'relu' 'opts':{'inplace':<true>}} 'align_corners':<false> 'dw_act_cfg':{'type':'relu' 'opts':{'inplace':<true>}} 'pretrained':<false> 'pretrained_model_path':'' }<for_stmt>key,value kwargs.items()<block_start><if_stmt>key<in>default_args<block_start>default_args.update({key:value})<block_end><block_end># obtain args for instanced fastscnn fastscnn_args=default_args.copy()<line_sep># obtain the instanced fastscnn model=FastSCNN(**fastscnn_args)<line_sep># load weights of pretrained model <if_stmt>default_args['pretrained']<and>os.path.exists(default_args['pretrained_model_path'])<block_start>checkpoint=torch.load(default_args['pretrained_model_path'])<if_stmt>'state_dict'<in>checkpoint<block_start>state_dict=checkpoint['state_dict']<block_end><else_stmt><block_start>state_dict=checkpoint<block_end>model.load_state_dict(state_dict strict=<false>)<block_end><elif_stmt>default_args['pretrained']<block_start>checkpoint=model_zoo.load_url(model_urls[fastscnn_type])<if_stmt>'state_dict'<in>checkpoint<block_start>state_dict=checkpoint['state_dict']<block_end><else_stmt><block_start>state_dict=checkpoint<block_end>model.load_state_dict(state_dict strict=<false>)<block_end># return the model <return>model<block_end>
# # The MIT License (MIT) # Copyright (c) 2007-2017 <NAME>, <NAME>, and contributors. # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_from_future_stmt> print_function<import_stmt>sys<import_stmt>re<import_stmt>copy<import_from_stmt>jsbeautifier.__version__ __version__<import_from_stmt>cssbeautifier.css.options BeautifierOptions<import_from_stmt>cssbeautifier.css.beautifier Beautifier<def_stmt>default_options <block_start><return>BeautifierOptions()<block_end><def_stmt>beautify string opts=default_options()<block_start>b=Beautifier(string opts)<line_sep><return>b.beautify()<block_end><def_stmt>beautify_file file_name opts=default_options()<block_start><if_stmt>file_name<eq>'-'# stdin <block_start>stream=sys.stdin<block_end><else_stmt><block_start>stream=open(file_name)<block_end>content=''.join(stream.readlines())<line_sep>b=Beautifier(content opts)<line_sep><return>b.beautify()<block_end><def_stmt>usage stream=sys.stdout<block_start>print("cssbeautifier.py@"+__version__+""" CSS beautifier (http://jsbeautifier.org/) """ file=stream)<if_stmt>stream<eq>sys.stderr<block_start><return>1<block_end><else_stmt><block_start><return>0<block_end><block_end>
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. #-*- coding: utf-8 -*- <import_stmt>copy<import_stmt>paddle.fluid<as>fluid<import_stmt>parl<import_from_stmt>parl layers<class_stmt>DQN(parl.Algorithm)<block_start><def_stmt>__init__ self model act_dim=<none> gamma=<none> lr=<none><block_start>""" DQN algorithm Args: model (parl.Model): 定义Q函数的前向网络结构 act_dim (int): action空间的维度,即有几个action gamma (float): reward的衰减因子 lr (float): learning_rate,学习率. """<line_sep>self.model=model<line_sep>self.target_model=copy.deepcopy(model)<assert_stmt>isinstance(act_dim int)<assert_stmt>isinstance(gamma float)<assert_stmt>isinstance(lr float)<line_sep>self.act_dim=act_dim<line_sep>self.gamma=gamma<line_sep>self.lr=lr<block_end><def_stmt>predict self obs<block_start>""" 使用self.model的value网络来获取 [Q(s,a1),Q(s,a2),...] """<line_sep><return>self.model.value(obs)<block_end><def_stmt>learn self obs action reward next_obs terminal<block_start>""" 使用DQN算法更新self.model的value网络 """<line_sep># 从target_model中获取 max Q' 的值,用于计算target_Q next_pred_value=self.target_model.value(next_obs)<line_sep>best_v=layers.reduce_max(next_pred_value dim=1)<line_sep>best_v.stop_gradient=<true># 阻止梯度传递 terminal=layers.cast(terminal dtype='float32')<line_sep>target=reward+(1.0-terminal)<times>self.gamma<times>best_v<line_sep>pred_value=self.model.value(obs)# 获取Q预测值 # 将action转onehot向量,比如:3 => [0,0,0,1,0] action_onehot=layers.one_hot(action self.act_dim)<line_sep>action_onehot=layers.cast(action_onehot dtype='float32')<line_sep># 下面一行是逐元素相乘,拿到action对应的 Q(s,a) # 比如:pred_value = [[2.3, 5.7, 1.2, 3.9, 1.4]], action_onehot = [[0,0,0,1,0]] # ==> pred_action_value = [[3.9]] pred_action_value=layers.reduce_sum(layers.elementwise_mul(action_onehot pred_value) dim=1)<line_sep># 计算 Q(s,a) 与 target_Q的均方差,得到loss cost=layers.square_error_cost(pred_action_value target)<line_sep>cost=layers.reduce_mean(cost)<line_sep>optimizer=fluid.optimizer.Adam(learning_rate=self.lr)# 使用Adam优化器 optimizer.minimize(cost)<line_sep><return>cost<block_end><def_stmt>sync_target self<block_start>""" 把 self.model 的模型参数值同步到 self.target_model """<line_sep>self.model.sync_weights_to(self.target_model)<block_end><block_end>