content
stringlengths
0
1.55M
######################################### # Servo01_stop.py # categories: intro # more info @: http://myrobotlab.org/service/Intro ######################################### # uncomment for virtual hardware # Platform.setVirtual(True) # Every settings like limits / port number / controller are saved after initial use # so you can share them between differents script # servoPin01 = 4 # port = "/dev/ttyUSB0" # port = "COM15" # release a servo controller and a servo Runtime.releaseService("arduino")<line_sep>Runtime.releaseService("servo01")<line_sep># we tell to the service what is going on # intro.isServoActivated = False ## FIXME this gives error readonly intro.broadcastState()<line_sep>
<import_stmt>os<import_stmt>pytest<line_sep>@pytest.fixture(autouse=<true>)<def_stmt>setup_hellworld_env tmpdir<block_start>os.environ['HW_WORKDIR']=str(tmpdir)<line_sep><yield><line_sep>os.environ.pop("HW_WORKDIR")<block_end>
<import_stmt>os<import_stmt>unittest<import_from_stmt>approvaltests.approvals verify_all<import_from_stmt>approvaltests.reporters.generic_diff_reporter_factory GenericDiffReporterFactory <import_from_stmt>robot.utils WINDOWS<import_from_stmt>selenium webdriver<import_from_stmt>SeleniumLibrary.keywords WebDriverCreator<class_stmt>FireFoxProfileParsingTests(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.log_dir="/log/dir"<line_sep>cls.creator=WebDriverCreator(cls.log_dir)<line_sep>path=os.path.dirname(__file__)<line_sep>reporter_json=os.path.abspath(os.path.join(path ".." "approvals_reporters.json"))<line_sep>factory=GenericDiffReporterFactory()<line_sep>factory.load(reporter_json)<line_sep>cls.reporter=factory.get_first_working()<block_end><def_stmt>setUp self<block_start>self.results=[]<block_end>@unittest.skipIf(WINDOWS reason="ApprovalTest do not support different line feeds")<def_stmt>test_single_method self<block_start>self._parse_result(self.creator._get_ff_profile('set_preference("key1", "arg1")'))<line_sep>self._parse_result(self.creator._get_ff_profile('set_preference("key1", "arg1");set_preference("key1", "arg1")'))<line_sep>self._parse_result(self.creator._get_ff_profile('set_preference("key1", "arg1") ; set_preference("key2", "arg2")'))<line_sep>profile=self.creator._get_ff_profile("update_preferences()")<line_sep>self.results.append(isinstance(profile webdriver.FirefoxProfile))<try_stmt><block_start>self.creator._get_ff_profile('wrong_name("key1", "arg1")')<block_end><except_stmt>AttributeError<as>error<block_start>self.results.append(error)<block_end><try_stmt><block_start>self.creator._get_ff_profile('set_proxy("foo")')<block_end><except_stmt>Exception<as>error<block_start>self.results.append(str(error))<block_end>verify_all("Firefox profile parsing" self.results reporter=self.reporter)<block_end><def_stmt>_parse_result self result<block_start>to_str=""<if_stmt>"key1"<in>result.default_preferences<block_start>to_str=f"{to_str} key1 {result.default_preferences['key1']}"<block_end><if_stmt>"key2"<in>result.default_preferences<block_start>to_str=f"{to_str} key2 {result.default_preferences['key2']}"<block_end>self.results.append(to_str)<block_end><block_end>
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT license. <import_stmt>codecs<import_stmt>json<import_stmt>os<import_stmt>tempfile<import_stmt>random<import_stmt>string<import_stmt>copy<import_stmt>torch<import_stmt>logging<import_stmt>shutil<import_from_stmt>losses.BaseLossConf BaseLossConf<line_sep>#import traceback <import_from_stmt>settings LanguageTypes ProblemTypes TaggingSchemes SupportedMetrics PredictionTypes DefaultPredictionFields ConstantStatic<import_from_stmt>utils.common_utils log_set prepare_dir md5 load_from_json dump_to_json<import_from_stmt>utils.exceptions ConfigurationError<import_stmt>numpy<as>np<class_stmt>ConstantStaticItems(ConstantStatic)<block_start>@staticmethod<def_stmt>concat_key_desc key_prefix_desc key<block_start><return>key_prefix_desc+'.'+key<block_end>@staticmethod<def_stmt>get_value_by_key json key key_prefix='' use_default=<false> default=<none><block_start>""" Args: json: a json object key: a key pointing to the value wanted to acquire use_default: if you really want to use default value when key can not be found in json object, set use_default=True default: if key is not found and default is None, we would raise an Exception, except that use_default is True Returns: value: """<try_stmt><block_start>value=json[key]<block_end><except_stmt><block_start><if_stmt><not>use_default<block_start><raise>ConfigurationError("key[%s] can not be found in configuration file"%(key_prefix+key))<block_end><else_stmt><block_start>value=default<block_end><block_end><return>value<block_end>@staticmethod<def_stmt>add_item item_name use_default=<false> default=<none><block_start><def_stmt>add_item_loading_func use_default default func_get_value_by_key<block_start>@classmethod<def_stmt>load_data cls obj json key_prefix_desc='' use_default=use_default default=default func_get_value_by_key=func_get_value_by_key<block_start>obj.__dict__[cls.__name__]=func_get_value_by_key(json cls.__name__ key_prefix_desc use_default default)<line_sep><return>obj<block_end><return>load_data<block_end><return>type(item_name (ConstantStatic ) dict(load_data=add_item_loading_func(use_default default __class__.get_value_by_key)))<block_end>@classmethod<def_stmt>load_data cls obj json key_prefix_desc=''<block_start><if_stmt>cls.__name__<in>json.keys()<block_start>json=json[cls.__name__]<block_end><for_stmt>key cls.__dict__.keys()<block_start><if_stmt><not>hasattr(cls.__dict__[key] 'load_data')<block_start><continue><block_end>item=cls.__dict__[key]<line_sep>obj=item.load_data(obj json cls.concat_key_desc(key_prefix_desc item.__name__))<block_end><return>obj<block_end><block_end><class_stmt>ModelConf(object)<block_start><def_stmt>__init__ self phase conf_path nb_version params=<none> mode='normal'<block_start>""" loading configuration from configuration file and argparse parameters Args: phase: train/test/predict/cache specially, 'cache' phase is used for verifying old cache conf_path: params: mode: 'normal', 'philly' """<line_sep>self.phase=phase<assert_stmt>self.phase<in>set(['train' 'test' 'predict' 'cache'])<line_sep>self.conf_path=conf_path<line_sep>self.params=params<line_sep>self.mode=mode.lower()<assert_stmt>self.mode<in>set(['normal' 'philly']) 'Your mode %s is illegal, supported modes are: normal and philly!'<line_sep>self.load_from_file(conf_path)<line_sep>self.check_version_compat(nb_version self.tool_version)<if_stmt>phase<ne>'cache'<block_start>self.check_conf()<block_end>logging.debug('Print ModelConf below:')<line_sep>logging.debug('='<times>80)<line_sep># print ModelConf <for_stmt>name,value vars(self).items()<block_start><if_stmt>name.startswith("__")<is><false><block_start>logging.debug('%s: %s'%(str(name) str(value)))<block_end><block_end>logging.debug('='<times>80)<block_end><class_stmt>Conf(ConstantStaticItems)<block_start>license=ConstantStaticItems.add_item('license')<line_sep>tool_version=ConstantStaticItems.add_item('tool_version')<line_sep>model_description=ConstantStaticItems.add_item('model_description')<line_sep>language=ConstantStaticItems.add_item('language' use_default=<true> default='english')<class_stmt>inputs(ConstantStaticItems)<block_start>use_cache=ConstantStaticItems.add_item('use_cache' use_default=<true> default=<true>)<line_sep>dataset_type=ConstantStaticItems.add_item('dataset_type')<line_sep>tagging_scheme=ConstantStaticItems.add_item('tagging_scheme' use_default=<true> default=<none>)<class_stmt>data_paths(ConstantStaticItems)<block_start>train_data_path=ConstantStaticItems.add_item('train_data_path' use_default=<true> default=<none>)<line_sep>valid_data_path=ConstantStaticItems.add_item('valid_data_path' use_default=<true> default=<none>)<line_sep>test_data_path=ConstantStaticItems.add_item('test_data_path' use_default=<true> default=<none>)<line_sep>predict_data_path=ConstantStaticItems.add_item('predict_data_path' use_default=<true> default=<none>)<line_sep>pre_trained_emb=ConstantStaticItems.add_item('pre_trained_emb' use_default=<true> default=<none>)<line_sep>pretrained_model_path=ConstantStaticItems.add_item('pretrained_model_path' use_default=<true> default=<none>)<block_end>file_with_col_header=ConstantStaticItems.add_item('file_with_col_header' use_default=<true> default=<false>)<line_sep>pretrained_emb_type=ConstantStaticItems.add_item('pretrained_emb_type' use_default=<true> default='glove')<line_sep>pretrained_emb_binary_or_text=ConstantStaticItems.add_item('pretrained_emb_binary_or_text' use_default=<true> default='text')<line_sep>involve_all_words_in_pretrained_emb=ConstantStaticItems.add_item('involve_all_words_in_pretrained_emb' use_default=<true> default=<false>)<line_sep>add_start_end_for_seq=ConstantStaticItems.add_item('add_start_end_for_seq' use_default=<true> default=<false>)<line_sep>file_header=ConstantStaticItems.add_item('file_header' use_default=<true> default=<none>)<line_sep>predict_file_header=ConstantStaticItems.add_item('predict_file_header' use_default=<true> default=<none>)<line_sep>model_inputs=ConstantStaticItems.add_item('model_inputs')<line_sep>target=ConstantStaticItems.add_item('target' use_default=<true> default=<none>)<line_sep>positive_label=ConstantStaticItems.add_item('positive_label' use_default=<true> default=<none>)<block_end><class_stmt>outputs(ConstantStaticItems)<block_start>save_base_dir=ConstantStaticItems.add_item('save_base_dir' use_default=<true> default=<none>)<line_sep>model_name=ConstantStaticItems.add_item('model_name')<line_sep>train_log_name=ConstantStaticItems.add_item('train_log_name' use_default=<true> default=<none>)<line_sep>test_log_name=ConstantStaticItems.add_item('test_log_name' use_default=<true> default=<none>)<line_sep>predict_log_name=ConstantStaticItems.add_item('predict_log_name' use_default=<true> default=<none>)<line_sep>predict_fields=ConstantStaticItems.add_item('predict_fields' use_default=<true> default=<none>)<line_sep>predict_output_name=ConstantStaticItems.add_item('predict_output_name' use_default=<true> default='predict.tsv')<line_sep>cache_dir=ConstantStaticItems.add_item('cache_dir' use_default=<true> default=<none>)<block_end><class_stmt>training_params(ConstantStaticItems)<block_start><class_stmt>vocabulary(ConstantStaticItems)<block_start>min_word_frequency=ConstantStaticItems.add_item('min_word_frequency' use_default=<true> default=3)<line_sep>max_vocabulary=ConstantStaticItems.add_item('max_vocabulary' use_default=<true> default=800<times>1000)<line_sep>max_building_lines=ConstantStaticItems.add_item('max_building_lines' use_default=<true> default=1000<times>1000)<block_end>optimizer=ConstantStaticItems.add_item('optimizer' use_default=<true> default=<none>)<line_sep>clip_grad_norm_max_norm=ConstantStaticItems.add_item('clip_grad_norm_max_norm' use_default=<true> default=-1)<line_sep>chunk_size=ConstantStaticItems.add_item('chunk_size' use_default=<true> default=1000<times>1000)<line_sep>lr_decay=ConstantStaticItems.add_item('lr_decay' use_default=<true> default=1)<line_sep>minimum_lr=ConstantStaticItems.add_item('minimum_lr' use_default=<true> default=0)<line_sep>epoch_start_lr_decay=ConstantStaticItems.add_item('epoch_start_lr_decay' use_default=<true> default=1)<line_sep>use_gpu=ConstantStaticItems.add_item('use_gpu' use_default=<true> default=<false>)<line_sep>cpu_num_workers=ConstantStaticItems.add_item('cpu_num_workers' use_default=<true> default=-1)#by default, use all workers cpu supports batch_size=ConstantStaticItems.add_item('batch_size' use_default=<true> default=1)<line_sep>batch_num_to_show_results=ConstantStaticItems.add_item('batch_num_to_show_results' use_default=<true> default=10)<line_sep>max_epoch=ConstantStaticItems.add_item('max_epoch' use_default=<true> default=float('inf'))<line_sep>valid_times_per_epoch=ConstantStaticItems.add_item('valid_times_per_epoch' use_default=<true> default=<none>)<line_sep>steps_per_validation=ConstantStaticItems.add_item('steps_per_validation' use_default=<true> default=10)<line_sep>text_preprocessing=ConstantStaticItems.add_item('text_preprocessing' use_default=<true> default=list())<line_sep>max_lengths=ConstantStaticItems.add_item('max_lengths' use_default=<true> default=<none>)<line_sep>fixed_lengths=ConstantStaticItems.add_item('fixed_lengths' use_default=<true> default=<none>)<line_sep>tokenizer=ConstantStaticItems.add_item('tokenizer' use_default=<true> default=<none>)<block_end>architecture=ConstantStaticItems.add_item('architecture')<line_sep>loss=ConstantStaticItems.add_item('loss' use_default=<true> default=<none>)<line_sep>metrics=ConstantStaticItems.add_item('metrics' use_default=<true> default=<none>)<block_end><def_stmt>raise_configuration_error self key<block_start><raise>ConfigurationError("The configuration file %s is illegal. the item [%s] is not found."%(self.conf_path key))<block_end><def_stmt>load_from_file self conf_path# load file <block_start>self.conf=load_from_json(conf_path debug=<false>)<line_sep>self=self.Conf.load_data(self {'Conf':self.conf} key_prefix_desc='Conf')<line_sep>self.language=self.language.lower()<line_sep>self.configurate_outputs()<line_sep>self.configurate_inputs()<line_sep>self.configurate_training_params()<line_sep>self.configurate_architecture()<line_sep>self.configurate_loss()<line_sep>self.configurate_cache()<block_end><def_stmt>configurate_outputs self<block_start><def_stmt>configurate_logger self<block_start><if_stmt>self.phase<eq>'cache'<block_start><return><block_end># dir <if_stmt>hasattr(self.params 'log_dir')<and>self.params.log_dir<block_start>self.log_dir=self.params.log_dir<line_sep>prepare_dir(self.log_dir <true> allow_overwrite=<true>)<block_end><else_stmt><block_start>self.log_dir=self.save_base_dir<block_end># path self.train_log_path=os.path.join(self.log_dir self.train_log_name)<line_sep>self.test_log_path=os.path.join(self.log_dir self.test_log_name)<line_sep>self.predict_log_path=os.path.join(self.log_dir self.predict_log_name)<if_stmt>self.phase<eq>'train'<block_start>log_path=self.train_log_path<block_end><elif_stmt>self.phase<eq>'test'<block_start>log_path=self.test_log_path<block_end><elif_stmt>self.phase<eq>'predict'<block_start>log_path=self.predict_log_path<block_end><if_stmt>log_path<is><none><block_start>self.raise_configuration_error(self.phase+'_log_name')<block_end># log level <if_stmt>self.mode<eq>'philly'<or>self.params.debug<block_start>log_set(log_path console_level='DEBUG' console_detailed=<true> disable_log_file=self.params.disable_log_file)<block_end><else_stmt><block_start>log_set(log_path disable_log_file=self.params.disable_log_file)<block_end><block_end># save base dir <if_stmt>hasattr(self.params 'model_save_dir')<and>self.params.model_save_dir<block_start>self.save_base_dir=self.params.model_save_dir<block_end><elif_stmt>self.save_base_dir<is><none><block_start>self.raise_configuration_error('save_base_dir')<block_end># prepare save base dir <if_stmt>self.phase<ne>'cache'<block_start>prepare_dir(self.save_base_dir <true> allow_overwrite=self.params.force<or>self.mode<eq>'philly' extra_info='will overwrite model file and train.log'<if>self.phase<eq>'train'<else>'will add %s.log and predict file'%self.phase)<block_end># logger configurate_logger(self)<line_sep># predict output path <if_stmt>self.phase<ne>'cache'<block_start><if_stmt>self.params.predict_output_path<block_start>self.predict_output_path=self.params.predict_output_path<block_end><else_stmt><block_start>self.predict_output_path=os.path.join(self.save_base_dir self.predict_output_name)<block_end>logging.debug('Prepare dir for: %s'%self.predict_output_path)<line_sep>prepare_dir(self.predict_output_path <false> allow_overwrite=self.params.force<or>self.mode<eq>'philly')<block_end><if_stmt>self.predict_fields<is><none><block_start>self.predict_fields=DefaultPredictionFields[ProblemTypes[self.problem_type]]<block_end>self.model_save_path=os.path.join(self.save_base_dir self.model_name)<block_end><def_stmt>configurate_inputs self<block_start><def_stmt>configurate_data_path self<block_start>self.pretrained_emb_path=self.pre_trained_emb<if_stmt>self.mode<ne>"normal"<block_start>self.train_data_path=<none><line_sep>self.valid_data_path=<none><line_sep>self.test_data_path=<none><line_sep>self.predict_data_path=<none><line_sep>self.pretrained_emb_path=<none><block_end><if_stmt>hasattr(self.params 'train_data_path')<and>self.params.train_data_path<block_start>self.train_data_path=self.params.train_data_path<block_end><if_stmt>hasattr(self.params 'valid_data_path')<and>self.params.valid_data_path<block_start>self.valid_data_path=self.params.valid_data_path<block_end><if_stmt>hasattr(self.params 'test_data_path')<and>self.params.test_data_path<block_start>self.test_data_path=self.params.test_data_path<block_end><if_stmt>hasattr(self.params 'predict_data_path')<and>self.params.predict_data_path<block_start>self.predict_data_path=self.params.predict_data_path<block_end><if_stmt>hasattr(self.params 'pretrained_emb_path')<and>self.params.pretrained_emb_path<block_start>self.pretrained_emb_path=self.params.pretrained_emb_path<block_end><if_stmt>self.phase<eq>'train'<or>self.phase<eq>'cache'<block_start><if_stmt>self.valid_data_path<is><none><and>self.test_data_path<is><not><none># We support test_data_path == None, if someone set valid_data_path to None while test_data_path is not None, # swap the valid_data_path and test_data_path <block_start>self.valid_data_path=self.test_data_path<line_sep>self.test_data_path=<none><block_end><block_end><elif_stmt>self.phase<eq>'predict'<block_start><if_stmt>self.predict_data_path<is><none><and>self.test_data_path<is><not><none><block_start>self.predict_data_path=self.test_data_path<line_sep>self.test_data_path=<none><block_end><block_end><return>self<block_end><def_stmt>configurate_data_format self# file columns <block_start><if_stmt>self.phase<eq>'train'<or>self.phase<eq>'test'<or>self.phase<eq>'cache'<block_start>self.file_columns=self.file_header<if_stmt>self.file_columns<is><none><block_start>self.raise_configuration_error('file_columns')<block_end><block_end><if_stmt>self.phase<eq>'predict'<block_start>self.file_columns,self.predict_file_columns=self.file_header self.predict_file_header<if_stmt>self.file_columns<is><none><and>self.predict_file_columns<is><none><block_start>self.raise_configuration_error('predict_file_columns')<block_end><if_stmt>self.file_columns<and>self.predict_file_columns<is><none><block_start>self.predict_file_columns=self.file_columns<block_end><block_end># target <if_stmt>self.phase<ne>'predict'<block_start>self.answer_column_name=self.target<if_stmt>self.target<is><none><and>self.phase<ne>'cache'<block_start>self.raise_configuration_error('target')<block_end><block_end><if_stmt>ProblemTypes[self.problem_type]<eq>ProblemTypes.sequence_tagging<and>self.add_start_end_for_seq<is><none><block_start>self.add_start_end_for_seq=<true><block_end># pretrained embedding <if_stmt>'word'<in>self.architecture[0]['conf']<and>self.pretrained_emb_path<block_start><if_stmt>hasattr(self.params 'involve_all_words_in_pretrained_emb')<and>self.params.involve_all_words_in_pretrained_emb<block_start>self.involve_all_words_in_pretrained_emb=self.params.involve_all_words_in_pretrained_emb<block_end><if_stmt>hasattr(self.params 'pretrained_emb_type')<and>self.params.pretrained_emb_type<block_start>self.pretrained_emb_type=self.params.pretrained_emb_type<block_end><if_stmt>hasattr(self.params 'pretrained_emb_binary_or_text')<and>self.params.pretrained_emb_binary_or_text<block_start>self.pretrained_emb_binary_or_text=self.params.pretrained_emb_binary_or_text<block_end>self.pretrained_emb_dim=self.architecture[0]['conf']['word']['dim']<block_end><else_stmt><block_start>self.pretrained_emb_path=<none><line_sep>self.involve_all_words_in_pretrained_emb=<none><line_sep>self.pretrained_emb_type=<none><line_sep>self.pretrained_emb_binary_or_text=<none><line_sep>self.pretrained_emb_dim=<none><block_end><return>self<block_end><def_stmt>configurate_model_input self<block_start>self.object_inputs=self.model_inputs<line_sep>self.object_inputs_names=[name<for>name self.object_inputs]<line_sep><return>self<block_end>self.problem_type=self.dataset_type.lower()<line_sep># previous model path <if_stmt>hasattr(self.params 'previous_model_path')<and>self.params.previous_model_path<block_start>self.previous_model_path=self.params.previous_model_path<block_end><else_stmt><block_start>self.previous_model_path=os.path.join(self.save_base_dir self.model_name)<block_end># pretrained model path <if_stmt>hasattr(self.params 'pretrained_model_path')<and>self.params.pretrained_model_path<block_start>self.pretrained_model_path=self.params.pretrained_model_path<block_end># saved problem path model_path=<none><if_stmt>self.phase<eq>'train'<block_start>model_path=self.pretrained_model_path<block_end><elif_stmt>self.phase<eq>'test'<or>self.phase<eq>'predict'<block_start>model_path=self.previous_model_path<block_end><if_stmt>model_path<block_start>model_path_dir=os.path.dirname(model_path)<line_sep>self.saved_problem_path=os.path.join(model_path_dir '.necessary_cache' 'problem.pkl')<if_stmt><not>os.path.isfile(self.saved_problem_path)<block_start>self.saved_problem_path=os.path.join(model_path_dir 'necessary_cache' 'problem.pkl')<block_end><if_stmt><not>(os.path.isfile(model_path)<and>os.path.isfile(self.saved_problem_path))<block_start><raise>Exception('Previous trained model %s or its dictionaries %s does not exist!'%(model_path self.saved_problem_path))<block_end><block_end>configurate_data_path(self)<line_sep>configurate_data_format(self)<line_sep>configurate_model_input(self)<block_end><def_stmt>configurate_training_params self# optimizer <block_start><if_stmt>self.phase<eq>'train'<block_start><if_stmt>self.optimizer<is><none><block_start>self.raise_configuration_error('training_params.optimizer')<block_end><if_stmt>'name'<not><in>self.optimizer.keys()<block_start>self.raise_configuration_error('training_params.optimizer.name')<block_end>self.optimizer_name=self.optimizer['name']<if_stmt>'params'<not><in>self.optimizer.keys()<block_start>self.raise_configuration_error('training_params.optimizer.params')<block_end>self.optimizer_params=self.optimizer['params']<if_stmt>hasattr(self.params 'learning_rate')<and>self.params.learning_rate<block_start>self.optimizer_params['lr']=self.params.learning_rate<block_end><block_end># batch size self.batch_size_each_gpu=self.batch_size# the batch_size in conf file is the batch_size on each GPU <if_stmt>hasattr(self.params 'batch_size')<and>self.params.batch_size<block_start>self.batch_size_each_gpu=self.params.batch_size<block_end><if_stmt>self.batch_size_each_gpu<is><none><block_start>self.raise_configuration_error('training_params.batch_size')<block_end>self.batch_size_total=self.batch_size_each_gpu<if_stmt>torch.cuda.device_count()<g>1<block_start>self.batch_size_total=torch.cuda.device_count()<times>self.batch_size_each_gpu<line_sep>self.batch_num_to_show_results=self.batch_num_to_show_results<floordiv>torch.cuda.device_count()<block_end><if_stmt>hasattr(self.params 'max_epoch')<and>self.params.max_epoch<block_start>self.max_epoch=self.params.max_epoch<block_end><if_stmt>self.valid_times_per_epoch<is><not><none><block_start>logging.info("configuration[training_params][valid_times_per_epoch] is deprecated, please use configuration[training_params][steps_per_validation] instead")<block_end># sequence length <if_stmt>self.fixed_lengths<block_start>self.max_lengths=<none><block_end><if_stmt>ProblemTypes[self.problem_type]<eq>ProblemTypes.sequence_tagging<block_start>self.fixed_lengths=<none><line_sep>self.max_lengths=<none><block_end># text preprocessing self.__text_preprocessing=self.text_preprocessing<line_sep>self.DBC2SBC=<true><if>'DBC2SBC'<in>self.__text_preprocessing<else><false><line_sep>self.unicode_fix=<true><if>'unicode_fix'<in>self.__text_preprocessing<else><false><line_sep>self.remove_stopwords=<true><if>'remove_stopwords'<in>self.__text_preprocessing<else><false><line_sep># tokenzier <if_stmt>self.tokenizer<is><none><block_start>self.tokenizer='jieba'<if>self.language<eq>'chinese'<else>'nltk'<block_end># GPU/CPU <if_stmt>self.phase<ne>'cache'<block_start><if_stmt>torch.cuda.is_available()<and>torch.cuda.device_count()<g>0<and>self.use_gpu<block_start>logging.info("Activating GPU mode, there are %d GPUs available"%torch.cuda.device_count())<block_end><else_stmt><block_start>self.use_gpu=<false><line_sep>logging.info("Activating CPU mode")<block_end><block_end><block_end><def_stmt>configurate_architecture self<block_start>self.input_types=self.architecture[0]['conf']<line_sep># extra feature feature_all=set([_.lower()<for>_ self.input_types.keys()])<line_sep>formal_feature=set(['word' 'char'])<line_sep>extra_feature_num=feature_all-formal_feature<line_sep>self.extra_feature=len(extra_feature_num)<ne>0<if_stmt>self.extra_feature<block_start><if_stmt>self.DBC2SBC<block_start>logging.warning("Detect the extra feature %s, set the DBC2sbc is False."%''.join(list(extra_feature_num)))<block_end><if_stmt>self.unicode_fix<block_start>logging.warning("Detect the extra feature %s, set the unicode_fix is False."%''.join(list(extra_feature_num)))<block_end><if_stmt>self.remove_stopwords<block_start>logging.warning("Detect the extra feature %s, set the remove_stopwords is False."%''.join(list(extra_feature_num)))<block_end><block_end># output layer self.output_layer_id=[]<for_stmt>single_layer self.architecture<block_start><if_stmt>'output_layer_flag'<in>single_layer<and>single_layer['output_layer_flag']<block_start>self.output_layer_id.append(single_layer['layer_id'])<block_end><block_end># check CNN layer & change min sentence length cnn_rele_layers=['Conv' 'ConvPooling']<line_sep>self.min_sentence_len=0<for_stmt>layer_index,single_layer enumerate(self.architecture)<block_start><if_stmt>layer_index<eq>0<block_start><continue><block_end><if_stmt>sum([_<eq>single_layer['layer']<for>_ cnn_rele_layers])# get window_size conf: type maybe int or list <block_start><for_stmt>single_conf,single_conf_value single_layer['conf'].items()<block_start><if_stmt>'window'<in>single_conf.lower()<block_start>self.min_sentence_len=max(self.min_sentence_len np.max(np.array([single_conf_value])))<line_sep><break><block_end><block_end><block_end><block_end><block_end><def_stmt>configurate_loss self<block_start><if_stmt>self.phase<ne>'train'<and>self.phase<ne>'test'<block_start><return><block_end><if_stmt>self.loss<is><none><or>self.metrics<is><none><block_start>self.raise_configuration_error('loss/metrics')<block_end>self.loss=BaseLossConf.get_conf(**self.loss)<if_stmt>'auc'<in>self.metrics<and>ProblemTypes[self.problem_type]<eq>ProblemTypes.classification<block_start>self.pos_label=self.positive_label<block_end><block_end><def_stmt>configurate_cache self# whether use cache <block_start><if_stmt>self.mode<eq>'philly'<block_start>self.use_cache=<true><block_end># cache dir <if_stmt>self.phase<eq>'train'<block_start><if_stmt>hasattr(self.params 'cache_dir')<and>self.params.cache_dir<block_start>self.cache_dir=self.params.cache_dir<block_end><else_stmt><block_start><if_stmt>self.mode<eq>'normal'<block_start><if_stmt>self.use_cache<is><false><block_start>self.cache_dir=os.path.join(tempfile.gettempdir() 'neuron_blocks' ''.join(random.sample(string.ascii_letters+string.digits 16)))<block_end><block_end><else_stmt># for philly mode, we can only save files in model_path or scratch_path <block_start>self.cache_dir=os.path.join(self.save_base_dir 'cache')<block_end><block_end>self.problem_path=os.path.join(self.cache_dir 'problem.pkl')<if_stmt>self.pretrained_emb_path<is><not><none><block_start>self.emb_pkl_path=os.path.join(self.cache_dir 'emb.pkl')<block_end><else_stmt><block_start>self.emb_pkl_path=<none><block_end><block_end><else_stmt><block_start>tmp_problem_path=os.path.join(self.save_base_dir '.necessary_cache' 'problem.pkl')<line_sep>self.problem_path=tmp_problem_path<if>os.path.isfile(tmp_problem_path)<else>os.path.join(self.save_base_dir 'necessary_cache' 'problem.pkl')<block_end># md5 of training data and problem self.train_data_md5=<none><if_stmt>self.phase<eq>'train'<and>self.train_data_path<block_start>logging.info("Calculating the md5 of traing data ...")<line_sep>self.train_data_md5=md5([self.train_data_path])<line_sep>logging.info("the md5 of traing data is %s"%(self.train_data_md5))<block_end>self.problem_md5=<none><line_sep># encoding self.encoding_cache_dir=<none><line_sep>self.encoding_cache_index_file_path=<none><line_sep>self.encoding_cache_index_file_md5_path=<none><line_sep>self.encoding_file_index=<none><line_sep>self.encoding_cache_legal_line_cnt=0<line_sep>self.encoding_cache_illegal_line_cnt=0<line_sep>self.load_encoding_cache_generator=<none><block_end><def_stmt>check_conf self<block_start>""" verify if the configuration is legal or not Returns: """<line_sep># In philly mode, ensure the data and model etc. are not the local paths defined in configuration file. <if_stmt>self.mode<eq>'philly'<block_start><assert_stmt><not>(hasattr(self.params 'train_data_path')<and>self.params.train_data_path<is><none><and>hasattr(self 'train_data_path')<and>self.train_data_path) 'In philly mode, but you define a local train_data_path:%s in your configuration file'%self.train_data_path<assert_stmt><not>(hasattr(self.params 'valid_data_path')<and>self.params.valid_data_path<is><none><and>hasattr(self 'valid_data_path')<and>self.valid_data_path) 'In philly mode, but you define a local valid_data_path:%s in your configuration file'%self.valid_data_path<assert_stmt><not>(hasattr(self.params 'test_data_path')<and>self.params.test_data_path<is><none><and>hasattr(self 'test_data_path')<and>self.test_data_path) 'In philly mode, but you define a local test_data_path:%s in your configuration file'%self.test_data_path<if_stmt>self.phase<eq>'train'<block_start><assert_stmt>hasattr(self.params 'model_save_dir')<and>self.params.model_save_dir 'In philly mode, you must define a model save dir through the training params'<assert_stmt><not>(self.params.pretrained_model_path<is><none><and>self.pretrained_model_path) 'In philly mode, but you define a local pretrained model path:%s in your configuration file'%self.pretrained_model_path<assert_stmt><not>(self.pretrained_model_path<is><none><and>self.params.pretrained_emb_path<is><none><and>self.pretrained_emb_path) 'In philly mode, but you define a local pretrained embedding:%s in your configuration file'%self.pretrained_emb_path<block_end><elif_stmt>self.phase<eq>'test'<or>self.phase<eq>'predict'<block_start><assert_stmt><not>(self.params.previous_model_path<is><none><and>self.previous_model_path) 'In philly mode, but you define a local model trained previously %s in your configuration file'%self.previous_model_path<block_end><block_end># check inputs # it seems that os.path.isfile cannot detect hdfs files <if_stmt>self.phase<eq>'train'<block_start><assert_stmt>self.train_data_path<is><not><none> "Please define train_data_path"<assert_stmt>os.path.isfile(self.train_data_path) "Training data %s does not exist!"%self.train_data_path<assert_stmt>self.valid_data_path<is><not><none> "Please define valid_data_path"<assert_stmt>os.path.isfile(self.valid_data_path) "Training data %s does not exist!"%self.valid_data_path<if_stmt>hasattr(self 'pretrained_emb_type')<and>self.pretrained_emb_type<block_start><assert_stmt>self.pretrained_emb_type<in>set(['glove' 'word2vec' 'fasttext']) 'Embedding type %s is not supported! We support glove, word2vec, fasttext now.'<block_end><if_stmt>hasattr(self 'pretrained_emb_binary_or_text')<and>self.pretrained_emb_binary_or_text<block_start><assert_stmt>self.pretrained_emb_binary_or_text<in>set(['text' 'binary']) 'Embedding file type %s is not supported! We support text and binary.'<block_end><block_end><elif_stmt>self.phase<eq>'test'<block_start><assert_stmt>self.test_data_path<is><not><none> "Please define test_data_path"<assert_stmt>os.path.isfile(self.test_data_path) "Training data %s does not exist!"%self.test_data_path<block_end><elif_stmt>self.phase<eq>'predict'<block_start><assert_stmt>self.predict_data_path<is><not><none> "Please define predict_data_path"<assert_stmt>os.path.isfile(self.predict_data_path) "Training data %s does not exist!"%self.predict_data_path<block_end># check language types SUPPORTED_LANGUAGES=set(LanguageTypes._member_names_)<assert_stmt>self.language<in>SUPPORTED_LANGUAGES "Language type %s is not supported now. Supported types: %s"%(self.language ",".join(SUPPORTED_LANGUAGES))<line_sep># check problem types SUPPORTED_PROBLEMS=set(ProblemTypes._member_names_)<assert_stmt>self.problem_type<in>SUPPORTED_PROBLEMS "Data type %s is not supported now. Supported types: %s"%(self.problem_type ",".join(SUPPORTED_PROBLEMS))<if_stmt>ProblemTypes[self.problem_type]<eq>ProblemTypes.sequence_tagging<block_start>SUPPORTED_TAGGING_SCHEMES=set(TaggingSchemes._member_names_)<assert_stmt>self.tagging_scheme<is><not><none> "For sequence tagging proble, tagging scheme must be defined at configuration[\'inputs\'][\'tagging_scheme\']!"<assert_stmt>self.tagging_scheme<in>SUPPORTED_TAGGING_SCHEMES "Tagging scheme %s is not supported now. Supported schemes: %s"%(self.tagging_scheme ",".join(SUPPORTED_TAGGING_SCHEMES))<line_sep># the max_lengths of all the inputs and targets should be consistent <if_stmt>self.max_lengths<block_start>max_lengths=list(self.max_lengths.values())<for_stmt>i range(len(max_lengths)-1)<block_start><assert_stmt>max_lengths[i]<eq>max_lengths[i+1] "For sequence tagging tasks, the max_lengths of all the inputs and targets should be consistent!"<block_end><block_end><block_end># check appliable metrics <if_stmt>self.phase<eq>'train'<or>self.phase<eq>'test'<block_start>self.metrics_post_check=set()# saved to check later diff=set(self.metrics)-SupportedMetrics[ProblemTypes[self.problem_type]]<line_sep>illegal_metrics=[]<for_stmt>diff_metric diff<block_start><if_stmt>diff_metric.find('@')<ne>-1<block_start>field,target=diff_metric.split('@')<line_sep>#if not field in PredictionTypes[ProblemTypes[self.problem_type]]: <if_stmt>field<ne>'auc'<block_start>illegal_metrics.append(diff_metric)<block_end><else_stmt><block_start><if_stmt>target<ne>'average'<block_start>self.metrics_post_check.add(diff_metric)<block_end><block_end><block_end><block_end><if_stmt>len(illegal_metrics)<g>0<block_start><raise>Exception("Metrics %s are not supported for %s tasks!"%(",".join(list(illegal_metrics)) self.problem_type))<block_end><block_end># check predict fields <if_stmt>self.phase<eq>'predict'<block_start>self.predict_fields_post_check=set()# saved to check later diff=set(self.predict_fields)-PredictionTypes[ProblemTypes[self.problem_type]]<line_sep>illegal_fields=[]<for_stmt>diff_field diff<block_start><if_stmt>diff_field.find('@')<ne>-1<and>diff_field.startswith('confidence')<block_start>field,target=diff_field.split('@')<line_sep>#if not field in PredictionTypes[ProblemTypes[self.problem_type]]: <if_stmt>field<ne>'confidence'<block_start>illegal_fields.append(diff_field)<block_end><else_stmt># don't know if the target exists in the output dictionary, check after problem loaded <block_start>self.predict_fields_post_check.add(diff_field)<block_end><block_end><else_stmt><block_start>illegal_fields.append(diff_field)<block_end><block_end><if_stmt>len(illegal_fields)<g>0<block_start><raise>Exception("The prediction fields %s is/are not supported!"%",".join(illegal_fields))<block_end><block_end><block_end><def_stmt>check_version_compat self nb_version conf_version<block_start>""" check if the version of toolkit and configuration file is compatible Args: nb_version: x.y.z conf_version: x.y.z Returns: If the x field and y field are both the same, return True, else return False """<line_sep>nb_version_split=nb_version.split('.')<line_sep>conf_version_split=conf_version.split('.')<if_stmt>len(nb_version_split)<ne>len(conf_version_split)<block_start><raise>ConfigurationError('The tool_version field of your configuration is illegal!')<block_end><if_stmt><not>(nb_version_split[0]<eq>conf_version_split[0]<and>nb_version_split[1]<eq>conf_version_split[1])<block_start><raise>ConfigurationError('The NeuronBlocks version is %s, but the configuration version is %s, please update your configuration to %s.%s.X'%(nb_version conf_version nb_version_split[0] nb_version_split[1]))<block_end><block_end><def_stmt>back_up self params<block_start>shutil.copy(params.conf_path self.save_base_dir)<line_sep>logging.info('Configuration file is backed up to %s'%(self.save_base_dir))<block_end><block_end>
<import_from_stmt>django.db models<import_from_stmt>django.conf settings<import_from_stmt>django.utils.timezone make_naive<import_stmt>pytz<class_stmt>BaseModel(models.Model)<block_start><class_stmt>Meta<block_start>abstract=<true><block_end><block_end><class_stmt>Item(BaseModel)<block_start>user=models.ForeignKey(settings.AUTH_USER_MODEL)<line_sep>image=models.ImageField(upload_to='items')<line_sep>source_url=models.TextField()<line_sep>message=models.TextField(blank=<true> null=<true>)<line_sep>pin_count=models.IntegerField(default=0)<line_sep># class Meta: # db_table = 'pinterest_example_item' <block_end><class_stmt>Board(BaseModel)<block_start>user=models.ForeignKey(settings.AUTH_USER_MODEL)<line_sep>name=models.CharField(max_length=255)<line_sep>description=models.TextField(blank=<true> null=<true>)<line_sep>slug=models.SlugField()<block_end><class_stmt>Pin(BaseModel)<block_start>user=models.ForeignKey(settings.AUTH_USER_MODEL)<line_sep>item=models.ForeignKey(Item)<line_sep>board=models.ForeignKey(Board)<line_sep>influencer=models.ForeignKey(settings.AUTH_USER_MODEL related_name='influenced_pins')<line_sep>message=models.TextField(blank=<true> null=<true>)<line_sep>created_at=models.DateTimeField(auto_now_add=<true>)<def_stmt>create_activity self<block_start><import_from_stmt>stream_framework.activity Activity<import_from_stmt>core.verbs Pin<as>PinVerb<line_sep>activity=Activity(self.user_id PinVerb self.id self.influencer_id time=make_naive(self.created_at pytz.utc) extra_context=dict(item_id=self.item_id))<line_sep><return>activity<block_end><block_end><class_stmt>Follow(BaseModel)<block_start>''' A simple table mapping who a user is following. For example, if user is Kyle and Kyle is following Alex, the target would be Alex. '''<line_sep>user=models.ForeignKey(settings.AUTH_USER_MODEL related_name='following_set')<line_sep>target=models.ForeignKey(settings.AUTH_USER_MODEL related_name='follower_set')<line_sep>created_at=models.DateTimeField(auto_now_add=<true>)<block_end><import_from_stmt>core verbs<line_sep>
<import_stmt>os<import_stmt>subprocess<import_stmt>re<import_stmt>shutil<import_stmt>logging<import_stmt>tempfile<import_from_stmt>pathlib Path<line_sep>logger=logging.getLogger('blendtorch')<line_sep>script=r''' import zmq '''<def_stmt>discover_blender additional_blender_paths=<none><block_start>'''Return Blender info as dict with keys `path`, `major`, `minor`.'''<line_sep>my_env=os.environ.copy()<if_stmt>additional_blender_paths<is><not><none><block_start>my_env['PATH']=additional_blender_paths+os.pathsep+my_env['PATH']<block_end># Determine path bpath=shutil.which('blender' path=my_env['PATH'])<if_stmt>bpath<is><none><block_start>logger.warning('Could not find Blender.')<line_sep><return><none><block_end><else_stmt><block_start>logger.debug(f'Discovered Blender in {bpath}')<block_end>bpath=Path(bpath).resolve()<line_sep>p=subprocess.Popen(f'"{bpath}" --version' shell=<true> stdout=subprocess.PIPE stderr=subprocess.PIPE env=my_env)<line_sep>out,err=p.communicate()<line_sep>errcode=p.returncode<line_sep># Determine version r=re.compile(r'Blender\s(\d+)\.(\d+)' re.IGNORECASE)<line_sep>g=re.search(r str(out))<line_sep>version=(<none> <none>)<if_stmt>errcode<eq>0<and>g<is><not><none><block_start>version=(int(g[1]) int(g[2]))<block_end><else_stmt><block_start>logger.warning('Failed to parse Blender version.')<line_sep><return><none><block_end># Check if a minimal Python script works <with_stmt>tempfile.NamedTemporaryFile(mode='w' delete=<false>)<as>fp<block_start>fp.write(script)<block_end>p=subprocess.Popen(f'"{bpath}" --background --python-use-system-env --python-exit-code 255 --python {fp.name}' shell=<true> stdout=subprocess.PIPE stderr=subprocess.PIPE env=my_env)<line_sep>out,err=p.communicate()<line_sep>errcode=p.returncode<line_sep>os.remove(fp.name)<if_stmt>errcode<ne>0<block_start>logger.warning('Failed to run minimal Blender script; ensure Python requirements are installed.')<line_sep><return><none><block_end><return>{'path':bpath 'major':version[0] 'minor':version[1]}<block_end><def_stmt>_main <block_start>print(discover_blender())<block_end><if_stmt>__name__<eq>'__main__'<block_start>_main()<block_end>
# killroy was here
# Copyright 2017-2019 typed_python Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>typed_python TypeFunction Int16 UInt64 Float32 Alternative Forward Dict ConstDict ListOf Compiled OneOf <import_stmt>typed_python._types<as>_types<import_from_stmt>typed_python Entrypoint<import_stmt>unittest<import_stmt>pytest<import_stmt>time<import_stmt>psutil<import_from_stmt>math trunc floor ceil<class_stmt>TestAlternativeCompilation(unittest.TestCase)<block_start><def_stmt>test_default_constructor self<block_start>@Entrypoint<def_stmt>setIt d x<block_start><return>d.setdefault(x)<block_end>Simple=Alternative("Simple" A={} B={} C={})<line_sep>Complex=Alternative("Complex" A=dict(x=str) B=dict(x=str y=float) C={})<assert_stmt>setIt(Dict(int Simple)() 10).matches.A<assert_stmt>setIt(Dict(int Complex)() 10).matches.A<block_end><def_stmt>test_simple_alternative_passing self<block_start>Simple=Alternative("Simple" A={} B={} C={})<line_sep>@Compiled<def_stmt>f s:Simple<block_start>y=s<line_sep><return>y<block_end>self.assertEqual(f(Simple.A()) Simple.A())<line_sep>self.assertEqual(f(Simple.B()) Simple.B())<line_sep>self.assertEqual(f(Simple.C()) Simple.C())<block_end><def_stmt>test_complex_alternative_passing self<block_start>Complex=Forward("Complex")<line_sep>Complex=Complex.define(Alternative("Complex" A={'a':str 'b':int} B={'a':str 'c':int} C={'a':str 'd':Complex}))<line_sep>c=Complex.A(a="hi" b=20)<line_sep>c2=Complex.C(a="hi" d=c)<line_sep>@Compiled<def_stmt>f c:Complex<block_start>y=c<line_sep><return>y<block_end>self.assertEqual(f(c) c)<line_sep>self.assertEqual(f(c2) c2)<line_sep>self.assertEqual(_types.refcount(c) 2)<line_sep>self.assertEqual(_types.refcount(c2) 1)<block_end><def_stmt>test_construct_alternative self<block_start>A=Alternative("A" X={'x':int})<line_sep>@Compiled<def_stmt>f <block_start><return>A.X(x=10)<block_end>self.assertTrue(f().matches.X)<line_sep>self.assertEqual(f().x 10)<block_end><def_stmt>test_alternative_matches self<block_start>A=Alternative("A" X={'x':int} Y={'x':int})<line_sep>@Compiled<def_stmt>f x:A<block_start><return>x.matches.X<block_end>self.assertTrue(f(A.X()))<line_sep>self.assertFalse(f(A.Y()))<block_end><def_stmt>test_alternative_member_homogenous self<block_start>A=Alternative("A" X={'x':int} Y={'x':int})<line_sep>@Compiled<def_stmt>f x:A<block_start><return>x.x<block_end>self.assertEqual(f(A.X(x=10)) 10)<line_sep>self.assertEqual(f(A.Y(x=10)) 10)<block_end><def_stmt>test_alternative_member_diverse self<block_start>A=Alternative("A" X={'x':int} Y={'x':float})<line_sep>@Compiled<def_stmt>f x:A<block_start><return>x.x<block_end>self.assertEqual(f(A.X(x=10)) 10)<line_sep>self.assertEqual(f(A.Y(x=10.5)) 10.5)<block_end><def_stmt>test_alternative_member_distinct self<block_start>A=Alternative("A" X={'x':int} Y={'y':float})<line_sep>@Compiled<def_stmt>f x:A<block_start><if_stmt>x.matches.X<block_start><return>x.x<block_end><if_stmt>x.matches.Y<block_start><return>x.y<block_end><block_end>self.assertEqual(f(A.X(x=10)) 10)<line_sep>self.assertEqual(f(A.Y(y=10.5)) 10.5)<block_end><def_stmt>test_matching_recursively self<block_start>@TypeFunction<def_stmt>Tree T<block_start>TreeType=Forward("TreeType")<line_sep>TreeType=TreeType.define(Alternative("Tree" Leaf={'value':T} Node={'left':TreeType 'right':TreeType}))<line_sep><return>TreeType<block_end><def_stmt>treeSum x:Tree(int)<block_start>matches=x.matches.Leaf<if_stmt>matches<block_start><return>x.value<block_end><if_stmt>x.matches.Node<block_start><return>treeSum(x.left)+treeSum(x.right)<block_end><return>0<block_end><def_stmt>buildTree depth:int offset:int<arrow>Tree(int)<block_start><if_stmt>depth<g>0<block_start><return>Tree(int).Node(left=buildTree(depth-1 offset) right=buildTree(depth-1 offset+1) )<block_end><return>Tree(int).Leaf(value=offset)<block_end>aTree=Compiled(buildTree)(15 0)<line_sep>treeSumCompiled=Compiled(treeSum)<line_sep>t0=time.time()<line_sep>sum=treeSum(aTree)<line_sep>t1=time.time()<line_sep>sumCompiled=treeSumCompiled(aTree)<line_sep>t2=time.time()<line_sep>self.assertEqual(sum sumCompiled)<line_sep>speedup=(t1-t0)/(t2-t1)<line_sep>self.assertGreater(speedup 20)<block_end># I get about 50 <def_stmt>test_compile_alternative_magic_methods self<block_start>A=Alternative("A" a={'a':int} b={'b':str} __bool__=<lambda>self:<false> __str__=<lambda>self:"my str" __repr__=<lambda>self:"my repr" __call__=<lambda>self i:"my call" __len__=<lambda>self:42 __contains__=<lambda>self item:item<eq>1 __bytes__=<lambda>self:b'my bytes' __format__=<lambda>self spec:"my format" __int__=<lambda>self:43 __float__=<lambda>self:44.44 __complex__=<lambda>self:3+4j __add__=<lambda>self other:A.b("add") __sub__=<lambda>self other:A.b("sub") __mul__=<lambda>self other:A.b("mul") __matmul__=<lambda>self other:A.b("matmul") __truediv__=<lambda>self other:A.b("truediv") __floordiv__=<lambda>self other:A.b("floordiv") __divmod__=<lambda>self other:A.b("divmod") __mod__=<lambda>self other:A.b("mod") __pow__=<lambda>self other:A.b("pow") __lshift__=<lambda>self other:A.b("lshift") __rshift__=<lambda>self other:A.b("rshift") __and__=<lambda>self other:A.b("and") __or__=<lambda>self other:A.b("or") __xor__=<lambda>self other:A.b("xor") __iadd__=<lambda>self other:A.b("iadd") __isub__=<lambda>self other:A.b("isub") __imul__=<lambda>self other:A.b("imul") __imatmul__=<lambda>self other:A.b("imatmul") __itruediv__=<lambda>self other:A.b("itruediv") __ifloordiv__=<lambda>self other:A.b("ifloordiv") __imod__=<lambda>self other:A.b("imod") __ipow__=<lambda>self other:A.b("ipow") __ilshift__=<lambda>self other:A.b("ilshift") __irshift__=<lambda>self other:A.b("irshift") __iand__=<lambda>self other:A.b("iand") __ior__=<lambda>self other:A.b("ior") __ixor__=<lambda>self other:A.b("ixor") __neg__=<lambda>self:A.b("neg") __pos__=<lambda>self:A.b("pos") __invert__=<lambda>self:A.b("invert") __abs__=<lambda>self:A.b("abs") )<def_stmt>f_bool x:A<block_start><return>bool(x)<block_end><def_stmt>f_str x:A<block_start><return>str(x)<block_end><def_stmt>f_repr x:A<block_start><return>repr(x)<block_end><def_stmt>f_call x:A<block_start><return>x(1)<block_end><def_stmt>f_1in x:A<block_start><return>1<in>x<block_end><def_stmt>f_0in x:A<block_start><return>0<in>x<block_end><def_stmt>f_len x:A<block_start><return>len(x)<block_end><def_stmt>f_int x:A<block_start><return>int(x)<block_end><def_stmt>f_float x:A<block_start><return>float(x)<block_end><def_stmt>f_add x:A<block_start><return>x+A.a()<block_end><def_stmt>f_sub x:A<block_start><return>x-A.a()<block_end><def_stmt>f_mul x:A<block_start><return>x<times>A.a()<block_end><def_stmt>f_div x:A<block_start><return>x/A.a()<block_end><def_stmt>f_floordiv x:A<block_start><return>x<floordiv>A.a()<block_end><def_stmt>f_matmul x:A<block_start><return>[email protected]()<block_end><def_stmt>f_mod x:A<block_start><return>x%A.a()<block_end><def_stmt>f_and x:A<block_start><return>x&A.a()<block_end><def_stmt>f_or x:A<block_start><return>x|A.a()<block_end><def_stmt>f_xor x:A<block_start><return>x^A.a()<block_end><def_stmt>f_rshift x:A<block_start><return>x<rshift>A.a()<block_end><def_stmt>f_lshift x:A<block_start><return>x<lshift>A.a()<block_end><def_stmt>f_pow x:A<block_start><return>x<power>A.a()<block_end><def_stmt>f_neg x:A<block_start><return>-x<block_end><def_stmt>f_pos x:A<block_start><return>+x<block_end><def_stmt>f_invert x:A<block_start><return>~x<block_end><def_stmt>f_abs x:A<block_start><return>abs(x)<block_end><def_stmt>f_iadd x:A<block_start>x<augadd>A.a()<line_sep><return>x<block_end><def_stmt>f_isub x:A<block_start>x<augsub>A.a()<line_sep><return>x<block_end><def_stmt>f_imul x:A<block_start>x<augmul>A.a()<line_sep><return>x<block_end><def_stmt>f_idiv x:A<block_start>x<augdiv>A.a()<line_sep><return>x<block_end><def_stmt>f_ifloordiv x:A<block_start>x<augfloordiv>A.a()<line_sep><return>x<block_end><def_stmt>f_imatmul x:A<block_start>x<augmatmul>A.a()<line_sep><return>x<block_end><def_stmt>f_imod x:A<block_start>x<augmod>A.a()<line_sep><return>x<block_end><def_stmt>f_iand x:A<block_start>x<augand>A.a()<line_sep><return>x<block_end><def_stmt>f_ior x:A<block_start>x<augor>A.a()<line_sep><return>x<block_end><def_stmt>f_ixor x:A<block_start>x<augxor>A.a()<line_sep><return>x<block_end><def_stmt>f_irshift x:A<block_start>x<augrshift>A.a()<line_sep><return>x<block_end><def_stmt>f_ilshift x:A<block_start>x<auglshift>A.a()<line_sep><return>x<block_end><def_stmt>f_ipow x:A<block_start>x<augpow>A.a()<line_sep><return>x<block_end>test_cases=[f_int f_float f_bool f_str f_repr f_call f_0in f_1in f_len f_add f_sub f_mul f_div f_floordiv f_matmul f_mod f_and f_or f_xor f_rshift f_lshift f_pow f_neg f_pos f_invert f_abs f_iadd f_isub f_imul f_idiv f_ifloordiv f_imatmul f_imod f_iand f_ior f_ixor f_irshift f_ilshift f_ipow]<for_stmt>f test_cases<block_start>compiled_f=Compiled(f)<line_sep>r1=f(A.a())<line_sep>r2=compiled_f(A.a())<if_stmt>r1<ne>r2<block_start>print("mismatch")<block_end>self.assertEqual(r1 r2)<block_end><block_end><def_stmt>test_compile_alternative_reverse_methods self<block_start>A=Alternative("A" a={'a':int} b={'b':str} __radd__=<lambda>self other:"radd"+repr(other) __rsub__=<lambda>self other:"rsub"+repr(other) __rmul__=<lambda>self other:"rmul"+repr(other) __rmatmul__=<lambda>self other:"rmatmul"+repr(other) __rtruediv__=<lambda>self other:"rtruediv"+repr(other) __rfloordiv__=<lambda>self other:"rfloordiv"+repr(other) __rmod__=<lambda>self other:"rmod"+repr(other) __rpow__=<lambda>self other:"rpow"+repr(other) __rlshift__=<lambda>self other:"rlshift"+repr(other) __rrshift__=<lambda>self other:"rrshift"+repr(other) __rand__=<lambda>self other:"rand"+repr(other) __rxor__=<lambda>self other:"rxor"+repr(other) __ror__=<lambda>self other:"ror"+repr(other) )<line_sep>values=[1 Int16(1) UInt64(1) 1.234 Float32(1.234) <true> "abc" ListOf(int)((1 2)) ConstDict(str str)({"a":"1"})]<for_stmt>v values<block_start>T=type(v)<def_stmt>f_radd v:T x:A<block_start><return>v+x<block_end><def_stmt>f_rsub v:T x:A<block_start><return>v-x<block_end><def_stmt>f_rmul v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rmatmul v:T x:A<block_start><return>v@x<block_end><def_stmt>f_rtruediv v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rfloordiv v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rmod v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rpow v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rlshift v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rrshift v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rand v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rxor v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_ror v:T x:A<block_start><return>v<times>x<block_end><for_stmt>f [f_radd f_rsub f_rmul f_rmatmul f_rtruediv f_rfloordiv f_rmod f_rpow f_rlshift f_rrshift f_rand f_rxor f_ror]<block_start>r1=f(v A.a())<line_sep>compiled_f=Compiled(f)<line_sep>r2=compiled_f(v A.a())<line_sep>self.assertEqual(r1 r2)<block_end><block_end><block_end><def_stmt>test_compile_alternative_format self<block_start>A1=Alternative("A1" a={'a':int} b={'b':str})<line_sep>A2=Alternative("A2" a={'a':int} b={'b':str} __str__=<lambda>self:"my str")<line_sep>A3=Alternative("A3" a={'a':int} b={'b':str} __format__=<lambda>self spec:"my format "+spec)<def_stmt>a1_format x:A1<block_start><return>format(x)<block_end><def_stmt>a2_format x:A2<block_start><return>format(x)<block_end><def_stmt>a3_format x:A3<block_start><return>format(x)<block_end><def_stmt>a3_format_spec x:A3<block_start><return>format(x "spec")<block_end>r1=a1_format(A1.a())<line_sep>c1_format=Compiled(a1_format)<line_sep>r2=c1_format(A1.a())<line_sep>self.assertEqual(r1 r2)<line_sep>r1=a2_format(A2.a())<line_sep>c2_format=Compiled(a2_format)<line_sep>r2=c2_format(A2.a())<line_sep>self.assertEqual(r1 r2)<line_sep>r1=a3_format(A3.a())<line_sep>c3_format=Compiled(a3_format)<line_sep>r2=c3_format(A3.a())<line_sep>self.assertEqual(r1 r2)<line_sep>r1=a3_format_spec(A3.a())<line_sep>c3_format_spec=Compiled(a3_format_spec)<line_sep>r2=c3_format_spec(A3.a())<line_sep>self.assertEqual(r1 r2)<line_sep># This failed when I forgot to support ConcreteAlternativeWrappers @Entrypoint<def_stmt>specialized_format x<block_start><return>format(x)<block_end>test_values=[A1.a() A1.b() A2.a() A2.b() A3.a() A3.b()]<for_stmt>v test_values<block_start>r1=format(v)<line_sep>r2=specialized_format(v)<line_sep>self.assertEqual(r1 r2 type(v))<block_end><block_end><def_stmt>test_compile_alternative_bytes self<block_start>A=Alternative("A" a={'a':int} b={'b':str} __bytes__=<lambda>self:b'my bytes')<def_stmt>f_bytes x:A<block_start><return>bytes(x)<block_end>v=A.a()<line_sep>r1=f_bytes(v)<line_sep>c_f=Compiled(f_bytes)<line_sep>r2=c_f(v)<line_sep>self.assertEqual(r1 r2)<block_end><def_stmt>test_compile_alternative_attr self<block_start><def_stmt>A_getattr self n<block_start><return>self.d[n]<block_end><def_stmt>A_setattr self n v<block_start>self.d[n]=v<block_end><def_stmt>A_delattr self n<block_start><del_stmt>self.d[n]<block_end>A=Alternative("A" a={'d':Dict(str str) 'i':int} __getattr__=A_getattr __setattr__=A_setattr __delattr__=A_delattr)<def_stmt>f_getattr1 x:A<block_start><return>x.q<block_end><def_stmt>f_getattr2 x:A<block_start><return>x.z<block_end><def_stmt>f_setattr1 x:A s:str<block_start>x.q=s<block_end><def_stmt>f_setattr2 x:A s:str<block_start>x.z=s<block_end><def_stmt>f_delattr1 x:A<block_start><del_stmt>x.q<block_end><def_stmt>f_delattr2 x:A<block_start><del_stmt>x.z<block_end>c_getattr1=Compiled(f_getattr1)<line_sep>c_getattr2=Compiled(f_getattr2)<line_sep>c_setattr1=Compiled(f_setattr1)<line_sep>c_setattr2=Compiled(f_setattr2)<line_sep>c_delattr1=Compiled(f_delattr1)<line_sep>c_delattr2=Compiled(f_delattr2)<for_stmt>v [A.a()]<block_start>f_setattr1(v "0")<line_sep>f_setattr2(v "0")<line_sep>self.assertEqual(f_getattr1(v) "0")<line_sep>self.assertEqual(f_getattr1(v) c_getattr1(v))<line_sep>self.assertEqual(f_getattr2(v) "0")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>f_setattr1(v "1")<line_sep>self.assertEqual(f_getattr1(v) "1")<line_sep>self.assertEqual(f_getattr1(v) c_getattr1(v))<line_sep>self.assertEqual(f_getattr2(v) "0")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>c_setattr1(v "2")<line_sep>self.assertEqual(f_getattr1(v) "2")<line_sep>self.assertEqual(f_getattr1(v) c_getattr1(v))<line_sep>self.assertEqual(f_getattr2(v) "0")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>f_setattr2(v "3")<line_sep>self.assertEqual(f_getattr1(v) "2")<line_sep>self.assertEqual(f_getattr1(v) c_getattr1(v))<line_sep>self.assertEqual(f_getattr2(v) "3")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>c_setattr2(v "4")<line_sep>self.assertEqual(f_getattr1(v) "2")<line_sep>self.assertEqual(f_getattr1(v) c_getattr1(v))<line_sep>self.assertEqual(f_getattr2(v) "4")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>f_delattr1(v)<with_stmt>self.assertRaises(KeyError)<block_start>f_getattr1(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>c_getattr1(v)<block_end>self.assertEqual(f_getattr2(v) "4")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>f_delattr2(v)<with_stmt>self.assertRaises(KeyError)<block_start>f_getattr1(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>c_getattr1(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>f_getattr2(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>c_getattr2(v)<block_end>f_setattr1(v "5")<line_sep>f_setattr2(v "6")<line_sep>c_delattr1(v)<with_stmt>self.assertRaises(KeyError)<block_start>f_getattr1(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>c_getattr1(v)<block_end>self.assertEqual(f_getattr2(v) "6")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>c_delattr2(v)<with_stmt>self.assertRaises(KeyError)<block_start>f_getattr1(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>c_getattr1(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>f_getattr2(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>c_getattr2(v)<block_end><block_end><block_end><def_stmt>test_compile_alternative_float_methods self# if __float__ is defined, then floor() and ceil() are based off this conversion, # when __floor__ and __ceil__ are not defined <block_start>A=Alternative("A" a={'a':int} b={'b':str} __float__=<lambda>self:1234.5)<def_stmt>f_floor x:A<block_start><return>floor(x)<block_end><def_stmt>f_ceil x:A<block_start><return>ceil(x)<block_end>test_cases=[f_floor f_ceil]<for_stmt>f test_cases<block_start>r1=f(A.a())<line_sep>compiled_f=Compiled(f)<line_sep>r2=compiled_f(A.a())<line_sep>self.assertEqual(r1 r2)<block_end>B=Alternative("B" a={'a':int} b={'b':str} __round__=<lambda>self n:1234+n __trunc__=<lambda>self:1 __floor__=<lambda>self:2 __ceil__=<lambda>self:3)<def_stmt>f_round0 x:B<block_start><return>round(x 0)<block_end><def_stmt>f_round1 x:B<block_start><return>round(x 1)<block_end><def_stmt>f_round2 x:B<block_start><return>round(x 2)<block_end><def_stmt>f_round_1 x:B<block_start><return>round(x -1)<block_end><def_stmt>f_round_2 x:B<block_start><return>round(x -2)<block_end><def_stmt>f_trunc x:B<block_start><return>trunc(x)<block_end><def_stmt>f_floor x:B<block_start><return>floor(x)<block_end><def_stmt>f_ceil x:B<block_start><return>ceil(x)<block_end>test_cases=[f_round0 f_round1 f_round2 f_round_1 f_round_2 f_trunc f_floor f_ceil]<for_stmt>f test_cases<block_start>r1=f(B.a())<line_sep>compiled_f=Compiled(f)<line_sep>r2=compiled_f(B.a())<line_sep>self.assertEqual(r1 r2)<block_end><block_end><def_stmt>test_compile_alternative_dir self# The interpreted dir() calls __dir__() and sorts the result. # I expected the compiled dir() to do the same thing, but it doesn't sort. # So if you append these elements out of order, the test will fail. <block_start>A0=Alternative("A" a={'a':int} b={'b':str})<def_stmt>A_dir self<block_start>x=ListOf(str)()<line_sep>x.append("x")<line_sep>x.append("y")<line_sep>x.append("z")<line_sep><return>x<block_end>A=Alternative("A" a={'a':int} b={'b':str} __dir__=A_dir )<def_stmt>f_dir0 x:A0<block_start><return>dir(x)<block_end><def_stmt>f_dir x:A<block_start><return>dir(x)<block_end><for_stmt>f [f_dir0]<block_start>compiled_f=Compiled(f)<line_sep>r1=f(A0.a())<line_sep>r2=compiled_f(A0.a())<line_sep>self.assertEqual(r1 r2)<block_end><for_stmt>f [f_dir]<block_start>compiled_f=Compiled(f)<line_sep>r1=f(A.a())<line_sep>r2=compiled_f(A.a())<line_sep>self.assertEqual(r1 r2)<block_end>c0=Compiled(f_dir0)<line_sep>c=Compiled(f_dir)<line_sep>initMem=psutil.Process().memory_info().rss/1024<power>2<for_stmt>i range(10000)<block_start>c0(A0.a(i))<line_sep>c(A.a(i))<block_end>finalMem=psutil.Process().memory_info().rss/1024<power>2<line_sep>self.assertTrue(finalMem<l>initMem+2)<block_end><def_stmt>test_compile_alternative_comparison_defaults self<block_start>B=Alternative("B" a={'a':int} b={'b':str})<def_stmt>f_eq x:B y:B<block_start><return>x<eq>y<block_end><def_stmt>f_ne x:B y:B<block_start><return>x<ne>y<block_end><def_stmt>f_lt x:B y:B<block_start><return>x<l>y<block_end><def_stmt>f_gt x:B y:B<block_start><return>x<g>y<block_end><def_stmt>f_le x:B y:B<block_start><return>x<le>y<block_end><def_stmt>f_ge x:B y:B<block_start><return>x<ge>y<block_end><def_stmt>f_hash x:B<block_start><return>hash(x)<block_end>values=[B.a(0) B.a(1) B.b("a") B.b("b")]<line_sep>test_cases=[f_eq f_ne f_lt f_gt f_le f_ge]<for_stmt>f test_cases<block_start><for_stmt>v1 values<block_start><for_stmt>v2 values<block_start>compiled_f=Compiled(f)<line_sep>r1=f(v1 v2)<line_sep>r2=compiled_f(v1 v2)<line_sep>self.assertEqual(r1 r2)<block_end><block_end><block_end>test_cases=[f_hash]<for_stmt>f test_cases<block_start><for_stmt>v values<block_start>compiled_f=Compiled(f)<line_sep>r1=f(v)<line_sep>r2=compiled_f(v)<line_sep>self.assertEqual(r1 r2)<block_end><block_end><block_end><def_stmt>test_compile_alternative_comparison_methods self<block_start>C=Alternative("C" a={'a':int} b={'b':str} __eq__=<lambda>self other:<true> __ne__=<lambda>self other:<false> __lt__=<lambda>self other:<true> __gt__=<lambda>self other:<false> __le__=<lambda>self other:<true> __ge__=<lambda>self other:<false> __hash__=<lambda>self:123 )<def_stmt>f_eq x:C<block_start><return>x<eq>C.a()<block_end><def_stmt>f_ne x:C<block_start><return>x<ne>C.a()<block_end><def_stmt>f_lt x:C<block_start><return>x<l>C.a()<block_end><def_stmt>f_gt x:C<block_start><return>x<g>C.a()<block_end><def_stmt>f_le x:C<block_start><return>x<le>C.a()<block_end><def_stmt>f_ge x:C<block_start><return>x<ge>C.a()<block_end><def_stmt>f_hash x:C<block_start><return>hash(x)<block_end>test_cases=[f_eq f_ne f_lt f_gt f_le f_ge f_hash]<for_stmt>f test_cases<block_start>compiled_f=Compiled(f)<line_sep>r1=f(C.a())<line_sep>r2=compiled_f(C.a())<line_sep>self.assertEqual(r1 r2)<block_end><block_end><def_stmt>test_compile_alternative_getsetitem self<block_start><def_stmt>A2_getitem self i<block_start><if_stmt>i<not><in>self.d<block_start><return>i<block_end><return>self.d[i]<block_end><def_stmt>A2_setitem self i v<block_start>self.d[i]=v<block_end>A2=Alternative("A2" d={'d':Dict(int int)} __getitem__=A2_getitem __setitem__=A2_setitem)<def_stmt>f_getitem a:A2 i:int<arrow>int<block_start><return>a[i]<block_end><def_stmt>f_setitem a:A2 i:int v:int<block_start>a[i]=v<block_end>c_getitem=Compiled(f_getitem)<line_sep>c_setitem=Compiled(f_setitem)<line_sep>a=A2.d()<line_sep>a[123]=7<line_sep>self.assertEqual(a[123] 7)<for_stmt>i range(10 20)<block_start>self.assertEqual(f_getitem(a i) i)<line_sep>self.assertEqual(c_getitem(a i) i)<line_sep>f_setitem(a i i+100)<line_sep>self.assertEqual(f_getitem(a i) i+100)<line_sep>self.assertEqual(c_getitem(a i) i+100)<line_sep>c_setitem(a i i+200)<line_sep>self.assertEqual(f_getitem(a i) i+200)<line_sep>self.assertEqual(c_getitem(a i) i+200)<block_end><block_end><def_stmt>test_compile_simple_alternative_magic_methods self<block_start>A=Alternative("A" a={} b={} __bool__=<lambda>self:<false> __str__=<lambda>self:"my str" __repr__=<lambda>self:"my repr" __call__=<lambda>self i:"my call" __len__=<lambda>self:42 __contains__=<lambda>self item:item<eq>1 __bytes__=<lambda>self:b'my bytes' __format__=<lambda>self spec:"my format" __int__=<lambda>self:43 __float__=<lambda>self:44.44 __complex__=<lambda>self:3+4j __add__=<lambda>self other:"add" __sub__=<lambda>self other:"sub" __mul__=<lambda>self other:"mul" __matmul__=<lambda>self other:"matmul" __truediv__=<lambda>self other:"truediv" __floordiv__=<lambda>self other:"floordiv" __divmod__=<lambda>self other:"divmod" __mod__=<lambda>self other:"mod" __pow__=<lambda>self other:"pow" __lshift__=<lambda>self other:"lshift" __rshift__=<lambda>self other:"rshift" __and__=<lambda>self other:"and" __or__=<lambda>self other:"or" __xor__=<lambda>self other:"xor" __iadd__=<lambda>self other:"iadd" __isub__=<lambda>self other:"isub" __imul__=<lambda>self other:"imul" __imatmul__=<lambda>self other:"imatmul" __itruediv__=<lambda>self other:"itruediv" __ifloordiv__=<lambda>self other:"ifloordiv" __imod__=<lambda>self other:"imod" __ipow__=<lambda>self other:"ipow" __ilshift__=<lambda>self other:"ilshift" __irshift__=<lambda>self other:"irshift" __iand__=<lambda>self other:"iand" __ior__=<lambda>self other:"ior" __ixor__=<lambda>self other:"ixor" __neg__=<lambda>self:"neg" __pos__=<lambda>self:"pos" __invert__=<lambda>self:"invert" __abs__=<lambda>self:"abs" )<def_stmt>f_bool x:A<block_start><return>bool(x)<block_end><def_stmt>f_str x:A<block_start><return>str(x)<block_end><def_stmt>f_repr x:A<block_start><return>repr(x)<block_end><def_stmt>f_call x:A<block_start><return>x(1)<block_end><def_stmt>f_1in x:A<block_start><return>1<in>x<block_end><def_stmt>f_0in x:A<block_start><return>0<in>x<block_end><def_stmt>f_len x:A<block_start><return>len(x)<block_end><def_stmt>f_int x:A<block_start><return>int(x)<block_end><def_stmt>f_float x:A<block_start><return>float(x)<block_end><def_stmt>f_add x:A<block_start><return>x+A.a()<block_end><def_stmt>f_sub x:A<block_start><return>x-A.a()<block_end><def_stmt>f_mul x:A<block_start><return>x<times>A.a()<block_end><def_stmt>f_div x:A<block_start><return>x/A.a()<block_end><def_stmt>f_floordiv x:A<block_start><return>x<floordiv>A.a()<block_end><def_stmt>f_matmul x:A<block_start><return>[email protected]()<block_end><def_stmt>f_mod x:A<block_start><return>x%A.a()<block_end><def_stmt>f_and x:A<block_start><return>x&A.a()<block_end><def_stmt>f_or x:A<block_start><return>x|A.a()<block_end><def_stmt>f_xor x:A<block_start><return>x^A.a()<block_end><def_stmt>f_rshift x:A<block_start><return>x<rshift>A.a()<block_end><def_stmt>f_lshift x:A<block_start><return>x<lshift>A.a()<block_end><def_stmt>f_pow x:A<block_start><return>x<power>A.a()<block_end><def_stmt>f_neg x:A<block_start><return>-x<block_end><def_stmt>f_pos x:A<block_start><return>+x<block_end><def_stmt>f_invert x:A<block_start><return>~x<block_end><def_stmt>f_abs x:A<block_start><return>abs(x)<block_end><def_stmt>f_iadd x:A<block_start>x<augadd>A.a()<line_sep><return>x<block_end><def_stmt>f_isub x:A<block_start>x<augsub>A.a()<line_sep><return>x<block_end><def_stmt>f_imul x:A<block_start>x<augmul>A.a()<line_sep><return>x<block_end><def_stmt>f_idiv x:A<block_start>x<augdiv>A.a()<line_sep><return>x<block_end><def_stmt>f_ifloordiv x:A<block_start>x<augfloordiv>A.a()<line_sep><return>x<block_end><def_stmt>f_imatmul x:A<block_start>x<augmatmul>A.a()<line_sep><return>x<block_end><def_stmt>f_imod x:A<block_start>x<augmod>A.a()<line_sep><return>x<block_end><def_stmt>f_iand x:A<block_start>x<augand>A.a()<line_sep><return>x<block_end><def_stmt>f_ior x:A<block_start>x<augor>A.a()<line_sep><return>x<block_end><def_stmt>f_ixor x:A<block_start>x<augxor>A.a()<line_sep><return>x<block_end><def_stmt>f_irshift x:A<block_start>x<augrshift>A.a()<line_sep><return>x<block_end><def_stmt>f_ilshift x:A<block_start>x<auglshift>A.a()<line_sep><return>x<block_end><def_stmt>f_ipow x:A<block_start>x<augpow>A.a()<line_sep><return>x<block_end>test_cases=[f_int f_float f_bool f_str f_repr f_call f_0in f_1in f_len f_add f_sub f_mul f_div f_floordiv f_matmul f_mod f_and f_or f_xor f_rshift f_lshift f_pow f_neg f_pos f_invert f_abs]<line_sep># not supported: # [f_iadd, f_isub, f_imul, f_idiv, f_ifloordiv, f_imatmul, # f_imod, f_iand, f_ior, f_ixor, f_irshift, f_ilshift, f_ipow] <for_stmt>f test_cases<block_start>compiled_f=Compiled(f)<line_sep>r1=f(A.a())<line_sep>r2=compiled_f(A.a())<line_sep>self.assertEqual(r1 r2)<block_end><block_end><def_stmt>test_compile_simple_alternative_reverse_methods self<block_start>A=Alternative("A" a={} b={} __radd__=<lambda>self other:"radd"+repr(other) __rsub__=<lambda>self other:"rsub"+repr(other) __rmul__=<lambda>self other:"rmul"+repr(other) __rmatmul__=<lambda>self other:"rmatmul"+repr(other) __rtruediv__=<lambda>self other:"rtruediv"+repr(other) __rfloordiv__=<lambda>self other:"rfloordiv"+repr(other) __rmod__=<lambda>self other:"rmod"+repr(other) __rpow__=<lambda>self other:"rpow"+repr(other) __rlshift__=<lambda>self other:"rlshift"+repr(other) __rrshift__=<lambda>self other:"rrshift"+repr(other) __rand__=<lambda>self other:"rand"+repr(other) __rxor__=<lambda>self other:"rxor"+repr(other) __ror__=<lambda>self other:"ror"+repr(other) )<line_sep>values=[1 Int16(1) UInt64(1) 1.234 Float32(1.234) <true> "abc" ListOf(int)((1 2)) ConstDict(str str)({"a":"1"})]<for_stmt>v values<block_start>T=type(v)<def_stmt>f_radd v:T x:A<block_start><return>v+x<block_end><def_stmt>f_rsub v:T x:A<block_start><return>v-x<block_end><def_stmt>f_rmul v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rmatmul v:T x:A<block_start><return>v@x<block_end><def_stmt>f_rtruediv v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rfloordiv v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rmod v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rpow v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rlshift v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rrshift v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rand v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_rxor v:T x:A<block_start><return>v<times>x<block_end><def_stmt>f_ror v:T x:A<block_start><return>v<times>x<block_end><for_stmt>f [f_radd f_rsub f_rmul f_rmatmul f_rtruediv f_rfloordiv f_rmod f_rpow f_rlshift f_rrshift f_rand f_rxor f_ror]<block_start>r1=f(v A.a())<line_sep>compiled_f=Compiled(f)<line_sep>r2=compiled_f(v A.a())<line_sep>self.assertEqual(r1 r2)<block_end><block_end><block_end><def_stmt>test_compile_simple_alternative_format self<block_start>A1=Alternative("A1" a={} b={})<line_sep>A2=Alternative("A2" a={} b={} __str__=<lambda>self:"my str")<line_sep>A3=Alternative("A3" a={} b={} __format__=<lambda>self spec:"my format "+spec)<def_stmt>a1_format x:A1<block_start><return>format(x)<block_end><def_stmt>a2_format x:A2<block_start><return>format(x)<block_end><def_stmt>a3_format x:A3<block_start><return>format(x)<block_end><def_stmt>a3_format_spec x:A3<block_start><return>format(x "spec")<block_end>r1=a1_format(A1.a())<line_sep>c1_format=Compiled(a1_format)<line_sep>r2=c1_format(A1.a())<line_sep>self.assertEqual(r1 r2)<line_sep>r1=a2_format(A2.a())<line_sep>c2_format=Compiled(a2_format)<line_sep>r2=c2_format(A2.a())<line_sep>self.assertEqual(r1 r2)<line_sep>r1=a3_format(A3.a())<line_sep>c3_format=Compiled(a3_format)<line_sep>r2=c3_format(A3.a())<line_sep>self.assertEqual(r1 r2)<line_sep>r1=a3_format_spec(A3.a())<line_sep>c3_format_spec=Compiled(a3_format_spec)<line_sep>r2=c3_format_spec(A3.a())<line_sep>self.assertEqual(r1 r2)<line_sep># This failed when I forgot to support ConcreteAlternativeWrappers @Entrypoint<def_stmt>specialized_format x<block_start><return>format(x)<block_end>test_values=[A1.a() A1.b() A2.a() A2.b() A3.a() A3.b()]<for_stmt>v test_values<block_start>r1=format(v)<line_sep>r2=specialized_format(v)<line_sep>self.assertEqual(r1 r2)<block_end><block_end><def_stmt>test_compile_simple_alternative_bytes self<block_start>A=Alternative("A" a={} b={} __bytes__=<lambda>self:b'my bytes')<def_stmt>f_bytes x:A<block_start><return>bytes(x)<block_end>v=A.a()<line_sep>r1=f_bytes(v)<line_sep>c_f=Compiled(f_bytes)<line_sep>r2=c_f(v)<line_sep>self.assertEqual(r1 r2)<block_end># I think this would require nonlocal data @pytest.mark.skip(reason="not supported")<def_stmt>test_compile_simple_alternative_attr self<block_start><def_stmt>A_getattr self n<block_start><return>self.d[n]<block_end><def_stmt>A_setattr self n v<block_start>self.d[n]=v<block_end><def_stmt>A_delattr self n<block_start><del_stmt>self.d[n]<block_end>A=Alternative("A" a={} b={} __getattr__=A_getattr __setattr__=A_setattr __delattr__=A_delattr)<def_stmt>f_getattr1 x:A<block_start><return>x.q<block_end><def_stmt>f_getattr2 x:A<block_start><return>x.z<block_end><def_stmt>f_setattr1 x:A s:str<block_start>x.q=s<block_end><def_stmt>f_setattr2 x:A s:str<block_start>x.z=s<block_end><def_stmt>f_delattr1 x:A<block_start><del_stmt>x.q<block_end><def_stmt>f_delattr2 x:A<block_start><del_stmt>x.z<block_end>c_getattr1=Compiled(f_getattr1)<line_sep>c_getattr2=Compiled(f_getattr2)<line_sep>c_setattr1=Compiled(f_setattr1)<line_sep>c_setattr2=Compiled(f_setattr2)<line_sep>c_delattr1=Compiled(f_delattr1)<line_sep>c_delattr2=Compiled(f_delattr2)<for_stmt>v [A.a()]<block_start>f_setattr1(v "0")<line_sep>f_setattr2(v "0")<line_sep>self.assertEqual(f_getattr1(v) "0")<line_sep>self.assertEqual(f_getattr1(v) c_getattr1(v))<line_sep>self.assertEqual(f_getattr2(v) "0")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>f_setattr1(v "1")<line_sep>self.assertEqual(f_getattr1(v) "1")<line_sep>self.assertEqual(f_getattr1(v) c_getattr1(v))<line_sep>self.assertEqual(f_getattr2(v) "0")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>c_setattr1(v "2")<line_sep>self.assertEqual(f_getattr1(v) "2")<line_sep>self.assertEqual(f_getattr1(v) c_getattr1(v))<line_sep>self.assertEqual(f_getattr2(v) "0")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>f_setattr2(v "3")<line_sep>self.assertEqual(f_getattr1(v) "2")<line_sep>self.assertEqual(f_getattr1(v) c_getattr1(v))<line_sep>self.assertEqual(f_getattr2(v) "3")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>c_setattr2(v "4")<line_sep>self.assertEqual(f_getattr1(v) "2")<line_sep>self.assertEqual(f_getattr1(v) c_getattr1(v))<line_sep>self.assertEqual(f_getattr2(v) "4")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>f_delattr1(v)<with_stmt>self.assertRaises(KeyError)<block_start>f_getattr1(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>c_getattr1(v)<block_end>self.assertEqual(f_getattr2(v) "4")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>f_delattr2(v)<with_stmt>self.assertRaises(KeyError)<block_start>f_getattr1(v)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>c_getattr1(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>f_getattr2(v)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>c_getattr2(v)<block_end>f_setattr1(v "5")<line_sep>f_setattr2(v "6")<line_sep>c_delattr1(v)<with_stmt>self.assertRaises(KeyError)<block_start>f_getattr1(v)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>c_getattr1(v)<block_end>self.assertEqual(f_getattr2(v) "6")<line_sep>self.assertEqual(f_getattr2(v) c_getattr2(v))<line_sep>c_delattr2(v)<with_stmt>self.assertRaises(KeyError)<block_start>f_getattr1(v)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>c_getattr1(v)<block_end><with_stmt>self.assertRaises(KeyError)<block_start>f_getattr2(v)<block_end><with_stmt>self.assertRaises(TypeError)<block_start>c_getattr2(v)<block_end><block_end><block_end><def_stmt>test_compile_simple_alternative_float_methods self# if __float__ is defined, then floor() and ceil() are based off this conversion, # when __floor__ and __ceil__ are not defined <block_start>A=Alternative("A" a={} b={} __float__=<lambda>self:1234.5)<def_stmt>f_floor x:A<block_start><return>floor(x)<block_end><def_stmt>f_ceil x:A<block_start><return>ceil(x)<block_end>test_cases=[f_floor f_ceil]<for_stmt>f test_cases<block_start>r1=f(A.a())<line_sep>compiled_f=Compiled(f)<line_sep>r2=compiled_f(A.a())<line_sep>self.assertEqual(r1 r2)<block_end>B=Alternative("B" a={} b={} __round__=<lambda>self n:1234+n __trunc__=<lambda>self:1 __floor__=<lambda>self:2 __ceil__=<lambda>self:3)<def_stmt>f_round0 x:B<block_start><return>round(x 0)<block_end><def_stmt>f_round1 x:B<block_start><return>round(x 1)<block_end><def_stmt>f_round2 x:B<block_start><return>round(x 2)<block_end><def_stmt>f_round_1 x:B<block_start><return>round(x -1)<block_end><def_stmt>f_round_2 x:B<block_start><return>round(x -2)<block_end><def_stmt>f_trunc x:B<block_start><return>trunc(x)<block_end><def_stmt>f_floor x:B<block_start><return>floor(x)<block_end><def_stmt>f_ceil x:B<block_start><return>ceil(x)<block_end>test_cases=[f_round0 f_round1 f_round2 f_round_1 f_round_2 f_trunc f_floor f_ceil]<for_stmt>f test_cases<block_start>r1=f(B.a())<line_sep>compiled_f=Compiled(f)<line_sep>r2=compiled_f(B.a())<line_sep>self.assertEqual(r1 r2)<block_end><block_end><def_stmt>test_compile_simple_dir self# The interpreted dir() calls __dir__() and sorts the result. # I expected the compiled dir() to do the same thing, but it doesn't sort. # So if you append these elements out of order, the test will fail. <block_start>A0=Alternative("A" a={} b={})<def_stmt>A_dir self<block_start>x=ListOf(str)()<line_sep>x.append("x")<line_sep>x.append("y")<line_sep>x.append("z")<line_sep><return>x<block_end>A=Alternative("A" a={} b={} __dir__=A_dir )<def_stmt>f_dir0 x:A0<block_start><return>dir(x)<block_end><def_stmt>f_dir x:A<block_start><return>dir(x)<block_end><for_stmt>f [f_dir0]<block_start>compiled_f=Compiled(f)<line_sep>r1=f(A0.a())<line_sep>r2=compiled_f(A0.a())<line_sep>self.assertEqual(r1 r2)<block_end><for_stmt>f [f_dir]<block_start>compiled_f=Compiled(f)<line_sep>r1=f(A.a())<line_sep>r2=compiled_f(A.a())<line_sep>self.assertEqual(r1 r2)<block_end>c0=Compiled(f_dir0)<line_sep>c=Compiled(f_dir)<line_sep>initMem=psutil.Process().memory_info().rss/1024<power>2<for_stmt>i range(10000)<block_start>c0(A0.a())<line_sep>c(A.a())<block_end>finalMem=psutil.Process().memory_info().rss/1024<power>2<line_sep>self.assertTrue(finalMem<l>initMem+2)<block_end><def_stmt>test_compile_simple_alternative_comparison_defaults self<block_start>B=Alternative("B" a={} b={})<def_stmt>f_eq x:B y:B<block_start><return>x<eq>y<block_end><def_stmt>f_ne x:B y:B<block_start><return>x<ne>y<block_end><def_stmt>f_lt x:B y:B<block_start><return>x<l>y<block_end><def_stmt>f_gt x:B y:B<block_start><return>x<g>y<block_end><def_stmt>f_le x:B y:B<block_start><return>x<le>y<block_end><def_stmt>f_ge x:B y:B<block_start><return>x<ge>y<block_end><def_stmt>f_hash x:B<block_start><return>hash(x)<block_end>values=[B.a() B.b()]<line_sep>test_cases=[f_eq f_ne f_lt f_gt f_le f_ge]<for_stmt>f test_cases<block_start><for_stmt>v1 values<block_start><for_stmt>v2 values<block_start>compiled_f=Compiled(f)<line_sep>r1=f(v1 v2)<line_sep>r2=compiled_f(v1 v2)<line_sep>self.assertEqual(r1 r2)<block_end><block_end><block_end>test_cases=[f_hash]<for_stmt>f test_cases<block_start><for_stmt>v values<block_start>compiled_f=Compiled(f)<line_sep>r1=f(v)<line_sep>r2=compiled_f(v)<line_sep>self.assertEqual(r1 r2)<block_end><block_end><block_end><def_stmt>test_compile_simple_alternative_comparison_methods self<block_start>C=Alternative("C" a={} b={} __eq__=<lambda>self other:<true> __ne__=<lambda>self other:<false> __lt__=<lambda>self other:<true> __gt__=<lambda>self other:<false> __le__=<lambda>self other:<true> __ge__=<lambda>self other:<false> __hash__=<lambda>self:123 )<def_stmt>f_eq x:C<block_start><return>x<eq>C.a()<block_end><def_stmt>f_ne x:C<block_start><return>x<ne>C.a()<block_end><def_stmt>f_lt x:C<block_start><return>x<l>C.a()<block_end><def_stmt>f_gt x:C<block_start><return>x<g>C.a()<block_end><def_stmt>f_le x:C<block_start><return>x<le>C.a()<block_end><def_stmt>f_ge x:C<block_start><return>x<ge>C.a()<block_end><def_stmt>f_hash x:C<block_start><return>hash(x)<block_end>test_cases=[f_eq f_ne f_lt f_gt f_le f_ge f_hash]<for_stmt>f test_cases<block_start>compiled_f=Compiled(f)<line_sep>r1=f(C.a())<line_sep>r2=compiled_f(C.a())<line_sep>self.assertEqual(r1 r2)<block_end><block_end><def_stmt>test_compile_alternative_float_conv self<block_start>A0=Alternative("A0" a={} b={} __int__=<lambda>self:123 __float__=<lambda>self:1234.5)<line_sep>A=Alternative("A" a={'a':int} b={'b':str} __int__=<lambda>self:123 __float__=<lambda>self:1234.5)<def_stmt>f x:float<block_start><return>x<block_end><def_stmt>g x:int<block_start><return>x<block_end>c_f=Compiled(f)<line_sep>c_g=Compiled(g)<with_stmt>self.assertRaises(TypeError)<block_start>c_f(A.a())<block_end><with_stmt>self.assertRaises(TypeError)<block_start>c_f(A0.a())<block_end><with_stmt>self.assertRaises(TypeError)<block_start>c_g(A.a())<block_end><with_stmt>self.assertRaises(TypeError)<block_start>c_g(A0.a())<block_end><block_end><def_stmt>test_compile_alternative_missing_inplace_fallback self<block_start><def_stmt>A_add self other<block_start><return>A.b(" add"+other.b)<block_end><def_stmt>A_sub self other<block_start><return>A.b(" sub"+other.b)<block_end><def_stmt>A_mul self other<block_start>self.s<augadd>" mul"+other.s<line_sep><return>self<block_end><def_stmt>A_matmul self other<block_start>self.s<augadd>" matmul"+other.s<line_sep><return>self<block_end><def_stmt>A_truediv self other<block_start>self.s<augadd>" truediv"+other.s<line_sep><return>self<block_end><def_stmt>A_floordiv self other<block_start>self.s<augadd>" floordiv"+other.s<line_sep><return>self<block_end><def_stmt>A_mod self other<block_start>self.s<augadd>" mod"+other.s<line_sep><return>self<block_end><def_stmt>A_pow self other<block_start>self.s<augadd>" pow"+other.s<line_sep><return>self<block_end><def_stmt>A_lshift self other<block_start>self.s<augadd>" lshift"+other.s<line_sep><return>self<block_end><def_stmt>A_rshift self other<block_start>self.s<augadd>" rshift"+other.s<line_sep><return>self<block_end><def_stmt>A_and self other<block_start>self.s<augadd>" and"+other.s<line_sep><return>self<block_end><def_stmt>A_or self other<block_start>self.s<augadd>" or"+other.s<line_sep><return>self<block_end><def_stmt>A_xor self other<block_start>self.s<augadd>" xor"+other.s<line_sep><return>self<block_end>A=Alternative("A" a={'a':int} b={'b':str} __add__=<lambda>x y:A.b(x.b+" add"+y.b) __sub__=<lambda>x y:A.b(x.b+" sub"+y.b) __mul__=<lambda>x y:A.b(x.b+" mul"+y.b) __matmul__=<lambda>x y:A.b(x.b+" matmul"+y.b) __truediv__=<lambda>x y:A.b(x.b+" truediv"+y.b) __floordiv__=<lambda>x y:A.b(x.b+" floordiv"+y.b) __mod__=<lambda>x y:A.b(x.b+" mod"+y.b) __pow__=<lambda>x y:A.b(x.b+" pow"+y.b) __lshift__=<lambda>x y:A.b(x.b+" lshift"+y.b) __rshift__=<lambda>x y:A.b(x.b+" rshift"+y.b) __and__=<lambda>x y:A.b(x.b+" and"+y.b) __or__=<lambda>x y:A.b(x.b+" or"+y.b) __xor__=<lambda>x y:A.b(x.b+" xor"+y.b))<def_stmt>inplace x:A<block_start>x<augadd>A.b()<line_sep>x<augsub>A.b()<line_sep>x<augmul>A.b()<line_sep>x<augmatmul>A.b()<line_sep>x<augdiv>A.b()<line_sep>x<augfloordiv>A.b()<line_sep>x<augmod>A.b()<line_sep>x<augpow>A.b()<line_sep>x<auglshift>A.b()<line_sep>x<augrshift>A.b()<line_sep>x<augand>A.b()<line_sep>x<augor>A.b()<line_sep>x<augxor>A.b()<line_sep><return>x<block_end>expected=A.b("start add sub mul matmul truediv floordiv mod pow lshift rshift and or xor")<line_sep>v=A.b("start")<line_sep>r1=inplace(v)<line_sep>self.assertEqual(r1 expected)<line_sep>v=A.b("start")<line_sep>r2=Compiled(inplace)(v)<line_sep>self.assertEqual(r2 expected)<block_end><def_stmt>test_compile_alternative_methods self<block_start><def_stmt>method self x<block_start><return>self.y+x<block_end>A=Alternative("A" Y=dict(y=int) method=method )<def_stmt>callMethod a:A x<block_start><return>a.method(x)<block_end>self.assertEqual(callMethod(A.Y(y=20) 10) Entrypoint(callMethod)(A.Y(y=20) 10))<def_stmt>callMethod2 a:A.Y x<block_start><return>a.method(x)<block_end>self.assertEqual(callMethod2(A.Y(y=20) 10) Entrypoint(callMethod2)(A.Y(y=20) 10))<block_end>@pytest.mark.skip(reason="not supported")<def_stmt>test_context_manager self<block_start><def_stmt>A_enter self<block_start>self.a.append("enter")<line_sep><return>"target"<block_end><def_stmt>A_exit self exc_type exc_val exc_tb<block_start>self.a.append("exit")<line_sep>self.a.append(str(exc_type))<line_sep><return><true><block_end>A=Alternative("A" a={'a':ListOf(str)} __enter__=A_enter __exit__=A_exit)<def_stmt>f0 x:int<arrow>ListOf(str)<block_start>context_manager=A.a()<with_stmt>context_manager<block_start>context_manager.a.append(str(1/x))<block_end><return>context_manager.a<block_end><def_stmt>f x:int<arrow>ListOf(str)<block_start>context_manager=A.a()<with_stmt>context_manager<as>v<block_start>context_manager.a.append(v)<line_sep>context_manager.a.append(str(1/x))<block_end><return>context_manager.a<block_end><class_stmt>ConMan()<block_start><def_stmt>__init__ self<block_start>self.a=[]<block_end><def_stmt>__enter__ self<block_start>self.a.append("Enter")<line_sep><return>"Target"<block_end><def_stmt>__exit__ self exc_type exc_val exc_tb<block_start>self.a.append("Exit")<if_stmt>exc_type<is><not><none><block_start>self.a.append(str(exc_type))<block_end><if_stmt>exc_val<is><not><none><block_start>self.a.append(str(exc_val))<block_end><if_stmt>exc_tb<is><not><none><block_start>self.a.append(str(exc_tb))<block_end><return><true><block_end><block_end><def_stmt>g x:int<arrow>ListOf(str)<block_start>context_manager=ConMan()<with_stmt>context_manager<as>v<block_start>context_manager.a.append(v)<line_sep>context_manager.a.append(str(1/x))<block_end><return>context_manager.a<block_end><for_stmt>fn [f g]<block_start>c_fn=Compiled(fn)<for_stmt>v [0 1]<block_start>r0=fn(v)<line_sep>r1=c_fn(v)<line_sep>self.assertEqual(r0 r1)<block_end><block_end><block_end><def_stmt>test_matches_on_alternative self<block_start>A=Alternative("A" X=dict(x=int))<line_sep>@Entrypoint<def_stmt>checkMatchesX x<block_start><return>x.matches.X<block_end><assert_stmt>checkMatchesX(A.X())<block_end><def_stmt>test_matches_on_oneof_alternative self<block_start>A=Alternative("A" X=dict(x=int))<line_sep>B=Alternative("B" Y=dict(y=int))<line_sep>@Entrypoint<def_stmt>checkMatchesX x:OneOf(A B int)<block_start><return>x.matches.X<block_end><assert_stmt>checkMatchesX(A.X())<assert_stmt><not>checkMatchesX(B.Y())<block_end><block_end>
"""Test that the following CLI command returns the expected outputs label-maker labels -d integration-od -c test/fixtures/integration/config.integration.object_detection.json"""<import_stmt>unittest<import_from_stmt>os makedirs<import_from_stmt>shutil copyfile rmtree<import_stmt>subprocess<import_stmt>numpy<as>np<class_stmt>TestObjectDetectionLabel(unittest.TestCase)<block_start>"""Tests for object detection label creation"""<line_sep>@classmethod<def_stmt>setUpClass cls<block_start>makedirs('integration-od')<line_sep>copyfile('test/fixtures/integration/portugal-z17.mbtiles' 'integration-od/portugal-z17.mbtiles')<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>rmtree('integration-od')<block_end><def_stmt>test_cli self<block_start>"""Verify labels.npz produced by CLI"""<line_sep>cmd='label-maker labels -d integration-od -c test/fixtures/integration/config.integration.object_detection.json'<line_sep>cmd=cmd.split(' ')<line_sep>subprocess.run(cmd universal_newlines=<true>)<line_sep>labels=np.load("integration-od/labels.npz")<line_sep>expected_bboxes=dict()<line_sep>expected_bboxes['62092-50162-17']=np.empty((0 5))<line_sep>expected_bboxes['62092-50163-17']=np.array([[209 192 255 255 6] [253 251 255 255 6]])<line_sep>expected_bboxes['62092-50164-17']=np.array([[209 0 250 28 6] [242 0 255 28 6] [222 13 235 66 6] [87 20 250 255 6]])<line_sep>expected_bboxes['62093-50162-17']=np.array([[81 145 128 255 6] [124 0 218 255 6] [207 0 247 153 6] [140 108 193 255 6] [125 236 152 255 6] [162 177 176 216 6] [170 151 214 179 6] [141 166 244 255 6] [203 88 255 186 6]])<line_sep>expected_bboxes['62093-50163-17']=np.array([[81 0 125 15 6] [117 0 133 17 6] [119 0 151 36 6] [125 0 140 7 6] [141 0 187 7 6] [64 32 91 60 4] [84 50 106 64 6] [111 9 127 26 6] [111 18 127 35 6] [84 15 119 52 6] [74 6 129 69 5] [93 24 123 46 6] [88 27 127 93 6] [0 85 96 213 6] [0 85 96 255 6] [115 38 255 100 6]])<line_sep>expected_bboxes['62094-50162-17']=np.array([[67 0 172 248 6] [0 172 90 255 6] [91 170 255 227 6]])<line_sep>expected_bboxes['62093-50164-17']=np.array([[0 0 12 22 6] [207 158 255 195 6]])<line_sep>expected_bboxes['62094-50163-17']=np.array([[73 0 255 78 6] [30 166 60 196 1] [30 166 60 196 2] [203 129 255 255 6] [0 90 255 138 6]])<line_sep>expected_bboxes['62094-50164-17']=np.array([[158 0 216 82 6] [0 108 147 173 6] [139 74 254 143 6] [240 90 255 232 6]])<line_sep>self.assertEqual(len(labels.files) len(expected_bboxes.keys()))# First check the number of tiles <for_stmt>tile labels.files<block_start>self.assertTrue(np.array_equal(expected_bboxes[tile] labels[tile]))<block_end><block_end><block_end># Now, bboxes
<import_from_stmt>dependencies random np torch os<import_from_stmt>settings *<line_sep>print("Fixing random seed for reproducibility...")<line_sep>SEED=35202#123 #35202 #int(time.time()) # random.seed(SEED)<line_sep>np.random.seed(SEED)<line_sep>torch.manual_seed(SEED)<line_sep>torch.cuda.manual_seed_all(SEED)<line_sep>print('\tSetting random seed to {}.'.format(SEED))<line_sep>print('')<line_sep>torch.backends.cudnn.benchmark=<true>##uses the inbuilt cudnn auto-tuner to find the fastest convolution algorithms. - torch.backends.cudnn.enabled=<true><line_sep>print('Setting CUDA environment...')<line_sep>print('\ttorch.__version__ =' torch.__version__)<line_sep>print('\ttorch.version.cuda =' torch.version.cuda)<line_sep>print('\ttorch.backends.cudnn.version() =' torch.backends.cudnn.version())<line_sep>os.environ['CUDA_VISIBLE_DEVICES']=CUDA_DEVICES<if_stmt>MODE<eq>'cpu'<block_start>print("Warning: you've set the mode to CPU; \nthe code won't run on NVIDIA GPU even the CUDA and CUDNN queries are successful.")<block_end><try_stmt><block_start>print('\tos[\'CUDA_VISIBLE_DEVICES\'] =' os.environ['CUDA_VISIBLE_DEVICES'])<line_sep>NUM_CUDA_DEVICES=len(os.environ['CUDA_VISIBLE_DEVICES'].split(','))<block_end><except_stmt>Exception<block_start>print('\tos[\'CUDA_VISIBLE_DEVICES\'] =' 'None')<line_sep>NUM_CUDA_DEVICES=1<block_end>print('\ttorch.cuda.device_count() =' torch.cuda.device_count())<line_sep>print('')<line_sep>
# from Detectron2: (https://github.com/facebookresearch/detectron2) <import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch nn<import_from_stmt>torch.nn functional<as>F<import_from_stmt>detectron2.layers Conv2d get_norm<import_from_stmt>detectron2.modeling.backbone build_backbone<as>d2_build_backbone<import_stmt>fvcore.nn.weight_init<as>weight_init<def_stmt>build_backbone2d cfg<block_start>""" Builds 2D feature extractor backbone network from Detectron2."""<line_sep>output_dim=cfg.MODEL.BACKBONE3D.CHANNELS[0]<line_sep>norm=cfg.MODEL.FPN.NORM<line_sep>output_stride=4# TODO: make configurable backbone=d2_build_backbone(cfg)<line_sep>feature_extractor=FPNFeature(backbone.output_shape() output_dim output_stride norm)<line_sep># load pretrained backbone <if_stmt>cfg.MODEL.BACKBONE.WEIGHTS<block_start>state_dict=torch.load(cfg.MODEL.BACKBONE.WEIGHTS)<line_sep>backbone.load_state_dict(state_dict)<block_end><return>nn.Sequential(backbone feature_extractor) output_stride<block_end><class_stmt>FPNFeature(nn.Module)<block_start>""" Converts feature pyrimid to singe feature map (from Detectron2)"""<def_stmt>__init__ self input_shape output_dim=32 output_stride=4 norm='BN'<block_start>super().__init__()<line_sep># fmt: off self.in_features=["p2" "p3" "p4" "p5"]<line_sep>feature_strides={k:v.stride<for>k,v input_shape.items()}<line_sep>feature_channels={k:v.channels<for>k,v input_shape.items()}<line_sep># fmt: on self.scale_heads=[]<for_stmt>in_feature self.in_features<block_start>head_ops=[]<line_sep>head_length=max(1 int(np.log2(feature_strides[in_feature])-np.log2(output_stride)))<for_stmt>k range(head_length)<block_start>conv=Conv2d(feature_channels[in_feature]<if>k<eq>0<else>output_dim output_dim kernel_size=3 stride=1 padding=1 bias=<not>norm norm=get_norm(norm output_dim) activation=F.relu )<line_sep>weight_init.c2_msra_fill(conv)<line_sep>head_ops.append(conv)<if_stmt>feature_strides[in_feature]<ne>output_stride<block_start>head_ops.append(nn.Upsample(scale_factor=2 mode="bilinear" align_corners=<false>))<block_end><block_end>self.scale_heads.append(nn.Sequential(*head_ops))<line_sep>self.add_module(in_feature self.scale_heads[-1])<block_end><block_end><def_stmt>forward self features<block_start><for_stmt>i,f enumerate(self.in_features)<block_start><if_stmt>i<eq>0<block_start>x=self.scale_heads[i](features[f])<block_end><else_stmt><block_start>x=x+self.scale_heads[i](features[f])<block_end><block_end><return>x<block_end><block_end>
<import_stmt>octohatrack<line_sep>octohatrack.main()<line_sep>
<import_from_stmt>django forms<import_from_stmt>django.conf settings<import_from_stmt>django.contrib.sites.models Site<import_from_stmt>django.core.mail send_mail<import_from_stmt>django.template loader<def_stmt>set_placeholder value<block_start><return>forms.TextInput(attrs={'placeholder':value 'required':'required'})<block_end><class_stmt>EventForm(forms.Form)<block_start>event_name=forms.CharField(widget=set_placeholder('Name of the event (including the user group name for '<concat>'user group events)'))<line_sep>event_type=forms.CharField(widget=set_placeholder('conference, bar camp, sprint, user group meeting, etc.'))<line_sep>python_focus=forms.CharField(widget=set_placeholder('Data analytics, Web Development, Country-wide conference, etc...'))<line_sep>expected_attendees=forms.CharField(widget=set_placeholder('300+'))<line_sep>location=forms.CharField(widget=set_placeholder('IFEMA building, Madrid, Spain'))<line_sep>date_from=forms.DateField(widget=forms.SelectDateWidget())<line_sep>date_to=forms.DateField(widget=forms.SelectDateWidget())<line_sep>recurrence=forms.CharField(widget=set_placeholder('None, every second Thursday, monthly, etc.'))<line_sep>link=forms.URLField(label='Website URL')<line_sep>description=forms.CharField(widget=forms.Textarea)<def_stmt>send_email self creator<block_start>context={'event':self.cleaned_data 'creator':creator 'site':Site.objects.get_current() }<line_sep>text_message_template=loader.get_template('events/email/new_event.txt')<line_sep>text_message=text_message_template.render(context)<line_sep>send_mail(subject='New event submission: "{}"'.format(self.cleaned_data['event_name']) message=text_message from_email=creator.email recipient_list=[settings.EVENTS_TO_EMAIL] )<block_end><block_end>
<import_stmt>argparse<import_stmt>bisect<import_stmt>functools<import_stmt>itertools<import_stmt>operator<as>op<import_stmt>pickle<import_stmt>random<import_stmt>string<import_stmt>sys<import_stmt>unicodedata<import_from_stmt>collections Counter<line_sep># Simple Markov n-gram based generator. <def_stmt>generate_ngrams iterable n<block_start>"""Generator that yields n-grams from a sequence."""<line_sep><return>zip(*[itertools.islice(it i <none>)<for>i,it enumerate(itertools.tee(iterable n))])<block_end><def_stmt>counter_random counter filter=<none><block_start>"""Return a single random elements from the Counter collection, weighted by count."""<if_stmt>filter<is><not><none><block_start>counter={k:v<for>k,v counter.items()<if>filter(k)}<block_end><if_stmt>len(counter)<eq>0<block_start><raise>Exception("No matching elements in Counter collection")<block_end>seq=list(counter.keys())<line_sep>cum=list(itertools.accumulate(list(counter.values()) op.add))<line_sep><return>seq[bisect.bisect_left(cum random.uniform(0 cum[-1]))]<block_end><def_stmt>latin_normalise i letters=string.ascii_letters+" " lowercase=<true><block_start>"""Example normalisation function that strips everything apart from letters and spaces (even accents)."""<line_sep><return>(nc<for>c i<for>cc (c.lower()<if>lowercase<else>c)<for>nc (cc<if>cc<in>letters<else>unicodedata.normalize("NFKD" cc))<if>nc<in>letters)<block_end><class_stmt>MarkovGenerator(object)<block_start>"""Markov Chain n-gram-based generator for arbitrary iterables."""<def_stmt>__init__ self order<block_start>"""Initialise generator for a given n-gram order."""<line_sep>self.n=order<line_sep>self.markov_dict={}<line_sep>self.prob_dict=Counter()<block_end><def_stmt>reset self<block_start>"""Reset generator."""<line_sep>self.__init__(self.n)<block_end><def_stmt>train self iterable<block_start>"""Train generator on an iterable."""<for_stmt>ngram generate_ngrams(iterable self.n+1)<block_start>self.markov_dict.setdefault(ngram[:self.n] Counter()).update([ngram[self.n]])<line_sep>self.prob_dict.update([ngram[:self.n]])<block_end><block_end><def_stmt>train_file self filename encoding="utf-8" convert=itertools.chain.from_iterable normalise=<lambda>i:i<block_start>"""Train generator on a file. Accepts optional convert function (defaults to reading characters) and normalise function (defaults to the identity)."""<with_stmt>open(filename "r" encoding=encoding)<as>f<block_start>self.train(normalise(convert(f)))<block_end><block_end><def_stmt>render self stop_when start_ngram=<none><block_start>"""Return a tuple using the trained probabilities. Stop condition can be a maximum length or function."""<line_sep>stop_fn=stop_when<if>callable(stop_when)<else><lambda>o:len(o)<ge>stop_when<line_sep>start_fn=start_ngram<if>(callable(start_ngram)<or>start_ngram<is><none>)<else><lambda>n:n<eq>tuple(start_ngram)<line_sep>ngram=counter_random(self.prob_dict filter=start_fn)<line_sep>output=ngram<while_stmt><true><block_start><if_stmt>stop_fn(output)<block_start><break><block_end><elif_stmt>ngram<in>self.markov_dict<block_start>v=counter_random(self.markov_dict[ngram])<line_sep>output<augadd>(v )<line_sep>ngram=ngram[1:]+(v )<block_end><else_stmt><block_start>ngram=counter_random(self.prob_dict)<block_end><block_end><return>output<block_end><def_stmt>render_word self min_length=3 max_length=12<block_start>"""Generates a word. Assumes training on characters including spaces. Doesn't filter out real words."""<while_stmt><true><block_start>word="".join(self.render(<lambda>o:len(o)<g>1<and>o[-1]<eq>" " <lambda>n:n[0]<eq>" "))<if_stmt>min_length<le>len(word.strip())<le>max_length<block_start><return>word.strip()<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>parser=argparse.ArgumentParser(description="Generate pseudowords using Markov chains")<line_sep>parser.add_argument("corpus" type=str help="text corpus name")<line_sep>parser.add_argument("number" type=int help="number of words to generate")<line_sep>parser.add_argument("-n" "--order" type=int help="n-gram order [2]" default=2)<line_sep>parser.add_argument("-l" "--letters" type=str help="letters to keep [a-z/A-Z]" default=string.ascii_letters)<line_sep>parser.add_argument("-c" "--casesensitive" action="store_true" help="case sensitive generator [False]")<line_sep>parser.add_argument("-r" "--regenerate" action="store_true" help="always regenerate generator [False]")<line_sep>args=parser.parse_args()<line_sep>pickled_dict="{}_{}.p".format(args.corpus args.order)<try_stmt><block_start><if_stmt>args.regenerate<block_start><raise>FileNotFoundError<block_end>print("Checking for cached generator at {}".format(pickled_dict) file=sys.stderr)<with_stmt>open(pickled_dict "rb")<as>f<block_start>mk=pickle.load(f)<block_end><block_end><except_stmt>FileNotFoundError<block_start>print("Training from corpus (may take a while)." file=sys.stderr)<line_sep>mk=MarkovGenerator(order=args.order)<line_sep>mk.train_file(args.corpus normalise=functools.partial(latin_normalise letters=args.letters+" " lowercase=<not>args.casesensitive))<line_sep>print("Saving generated generator to {}".format(pickled_dict) file=sys.stderr)<with_stmt>open(pickled_dict "wb")<as>f<block_start>pickle.dump(mk f pickle.HIGHEST_PROTOCOL)<block_end><block_end><for_stmt>i range(args.number)<block_start>print(mk.render_word())<block_end><block_end>
<import_stmt>json<import_stmt>plotly.graph_objects<as>go<import_stmt>pandas<as>pd<import_stmt>os<import_from_stmt>jsonmerge merge<def_stmt>get_pooling_metrics _data<block_start>count_of_multi_passenger_pool_trips=0<line_sep>count_of_one_passenger_pool_trips=0<line_sep>count_of_solo_trips=0<line_sep>count_of_unmatched_pool_requests=0<line_sep>count_of_unmatched_solo_requests=0<line_sep>sum_deadheading_distance_traveled=0.0<line_sep>sum_ride_hail_distance_traveled=0.0<line_sep>mode_choice_attempt={}<line_sep>person_has_shared_a_trip={}<line_sep>passengers_per_veh={}<line_sep>person_in_veh={}<line_sep>ct_nb_requests={}<line_sep>chained_trips_requests=0<line_sep>chained_trips_count=0<for_stmt>row _data.itertuples()<block_start>person=row.person<line_sep>vehicle=row.vehicle<line_sep>mode=row.mode<line_sep>event=row.type<line_sep>passengers=row.numPassengers<line_sep>distance=row.length<if_stmt>event<eq>"ModeChoice"<block_start><if_stmt>str(person).startswith("rideHailAgent")<block_start>print("ride hail driver with mode choice, does it ever occur !?")<block_end><elif_stmt>mode.startswith("ride_hail")<block_start>mode_choice_attempt[person]=mode<block_end><elif_stmt>person<in>mode_choice_attempt<and><not>mode_choice_attempt[person].endswith("unmatched")<block_start>mode_choice_attempt[person]=mode_choice_attempt[person]+"_unmatched"<block_end><block_end><elif_stmt>event<eq>"PersonEntersVehicle"<block_start><if_stmt>person<not><in>mode_choice_attempt<block_start><continue><block_end>chosen_mode=mode_choice_attempt[person]<if_stmt>chosen_mode.endswith("unmatched")<block_start><if_stmt>chosen_mode.startswith("ride_hail_pooled")<block_start>count_of_unmatched_pool_requests<augadd>1<block_end><else_stmt><block_start>count_of_unmatched_solo_requests<augadd>1<block_end><del_stmt>mode_choice_attempt[person]<block_end><elif_stmt><not>vehicle.startswith("rideHailVehicle")<block_start>i=0<line_sep># agent started walking towards ride hail vehicle <block_end><elif_stmt>chosen_mode<eq>"ride_hail_pooled"<block_start>person_in_veh[person]=vehicle<line_sep>prev_pool=passengers_per_veh[vehicle]<if>vehicle<in>passengers_per_veh<else>0<line_sep>passengers_per_veh[vehicle]=prev_pool+1<for_stmt>p {k:v<for>k,v person_in_veh.items()<if>v<eq>vehicle}<block_start><if_stmt>p<not><in>person_has_shared_a_trip<or><not>person_has_shared_a_trip[p]<block_start>person_has_shared_a_trip[p]=passengers_per_veh[vehicle]<g>1<block_end><block_end># chained trips metrics <if_stmt>prev_pool<eq>0<block_start>ct_nb_requests[vehicle]=0<block_end>ct_nb_requests[vehicle]<augadd>1<block_end><else_stmt><block_start>count_of_solo_trips<augadd>1<block_end><block_end><elif_stmt>event<eq>"PersonLeavesVehicle"<block_start><if_stmt>person<not><in>mode_choice_attempt<block_start><continue><block_end><if_stmt><not>vehicle.startswith("rideHailVehicle")<block_start>i=0<line_sep># agent ended walking towards the ride hail vehicle <block_end><elif_stmt>mode_choice_attempt[person]<eq>"ride_hail_pooled"<block_start><if_stmt>passengers_per_veh[vehicle]<g>1<block_start>person_has_shared_a_trip[person]=<true><block_end><if_stmt>person_has_shared_a_trip[person]<is><true><block_start>count_of_multi_passenger_pool_trips<augadd>1<block_end><else_stmt><block_start>count_of_one_passenger_pool_trips<augadd>1<block_end><del_stmt>person_has_shared_a_trip[person]<del_stmt>person_in_veh[person]<line_sep>passengers_per_veh[vehicle]<augsub>1<line_sep># chained trips metrics <if_stmt>passengers_per_veh[vehicle]<eq>0<block_start>chained_trips_requests=(chained_trips_requests<times>chained_trips_count+ct_nb_requests[vehicle])/(chained_trips_count+1)<line_sep>chained_trips_count<augadd>1<block_end><block_end><del_stmt>mode_choice_attempt[person]<block_end><elif_stmt>event<eq>"PathTraversal"<block_start><if_stmt><not>vehicle.startswith("rideHailVehicle")<block_start><continue><block_end><if_stmt>int(passengers)<eq>0<block_start>sum_deadheading_distance_traveled<augadd>float(distance)<block_end>sum_ride_hail_distance_traveled<augadd>float(distance)<block_end><block_end><del_stmt>_data<line_sep>tot_pool_trips=count_of_multi_passenger_pool_trips+count_of_one_passenger_pool_trips+count_of_unmatched_pool_requests<line_sep>tot_solo_trips=count_of_solo_trips+count_of_unmatched_solo_requests<line_sep>tot_rh_trips=tot_pool_trips+tot_solo_trips<line_sep>tot_rh_unmatched=count_of_unmatched_pool_requests+count_of_unmatched_solo_requests<line_sep>multi_passengers_trips_per_pool_trips=0<if>tot_pool_trips<eq>0<else>count_of_multi_passenger_pool_trips/tot_pool_trips<line_sep>multi_passengers_trips_per_ride_hail_trips=0<if>tot_rh_trips<eq>0<else>count_of_multi_passenger_pool_trips/tot_rh_trips<line_sep>unmatched_per_ride_hail_requests=0<if>tot_rh_trips<eq>0<else>tot_rh_unmatched/tot_rh_trips<line_sep>deadheading_per_ride_hail_trips=0<if>sum_ride_hail_distance_traveled<eq>0<else>sum_deadheading_distance_traveled/sum_ride_hail_distance_traveled<line_sep><return>{"ride_hail_requests":tot_rh_trips "ride_hail_solo_requests":count_of_solo_trips+count_of_unmatched_solo_requests "ride_hail_pool_requests":tot_pool_trips+count_of_unmatched_pool_requests "multi_passenger_pool_trips":count_of_multi_passenger_pool_trips "one_passenger_pool_trips":count_of_one_passenger_pool_trips "solo_trips":count_of_solo_trips "unmatched_pool_requests":count_of_unmatched_pool_requests "unmatched_solo_requests":count_of_unmatched_solo_requests "deadheading_distance_traveled":sum_deadheading_distance_traveled "ride_hail_distance_traveled":sum_ride_hail_distance_traveled "multi_passengers_trips_per_pool_trips":multi_passengers_trips_per_pool_trips "multi_passengers_trips_per_ride_hail_trips":multi_passengers_trips_per_ride_hail_trips "unmatched_per_ride_hail_requests":unmatched_per_ride_hail_requests "deadheading_per_ride_hail_trips":deadheading_per_ride_hail_trips "chained_trips_requests":chained_trips_requests "chained_trips_count":chained_trips_count}<block_end><def_stmt>get_all_metrics filename __local_file_path<block_start>metrics_json={}<line_sep>pool_metrics_file_path="{}.pooling-metrics.json".format(__local_file_path)<if_stmt>os.path.exists(pool_metrics_file_path)<block_start><with_stmt>open(pool_metrics_file_path)<as>f<block_start>metrics_json=json.load(f)<block_end><block_end>compression=<none><if_stmt>filename.endswith(".gz")<block_start>compression='gzip'<block_end>data=pd.read_csv(filename sep="," index_col=<none> header=0 compression=compression)<line_sep>modeChoice=data.loc[data['type']<eq>'ModeChoice'].dropna(how='all' axis=1)<line_sep>pathTraversal=data.loc[data['type']<eq>'PathTraversal'].dropna(how='all' axis=1)<line_sep>print("get_all_metrics ...")<if_stmt>len(metrics_json)<eq>0<block_start>ride_hail_mc=modeChoice[modeChoice['mode'].str.startswith('ride_hail')]<line_sep>ride_hail_mc_users=set(ride_hail_mc['person'])<line_sep>data2=data[(data['type'].isin(['PathTraversal'])&data['vehicle'].str.startswith('rideHailVehicle'))|(data['type'].isin(['ModeChoice' 'PersonEntersVehicle' 'PersonLeavesVehicle'])&data['person'].isin(ride_hail_mc_users))]<del_stmt>data<line_sep>metrics_json=get_pooling_metrics(data2)<with_stmt>open(pool_metrics_file_path 'w')<as>outfile<block_start>json.dump(metrics_json outfile)<line_sep>pooling_sankey_path=__local_file_path.rsplit("/" 1)[0]+"/sankey/"+__local_file_path.rsplit("/" 1)[1]<block_end>#generate_sankey_for_pooling(metrics_json, pooling_sankey_path) <block_end><else_stmt><block_start><del_stmt>data<block_end>pathTraversal['miles']=pathTraversal['length']/1609.34<line_sep>pathTraversal['gallons']=(pathTraversal['primaryFuel']+pathTraversal['secondaryFuel'])<times>8.3141841e-9<line_sep>pathTraversal['mpg']=pathTraversal['miles']/pathTraversal['gallons']<line_sep>pathTraversal['startingPrimaryFuelLevel']=pathTraversal['primaryFuelLevel']+pathTraversal['primaryFuel']<line_sep>pathTraversal['mode_extended']=pathTraversal['mode']<line_sep>pathTraversal['isRH']=pathTraversal['vehicle'].str.contains('rideHail')<line_sep>pathTraversal['isCAV']=pathTraversal['vehicleType'].str.contains('L5')<line_sep>pathTraversal.loc[pathTraversal['isRH'] 'mode_extended']<augadd>'_RH'<line_sep>pathTraversal.loc[pathTraversal['isCAV'] 'mode_extended']<augadd>'_CAV'<line_sep>pathTraversal['trueOccupancy']=pathTraversal['numPassengers']<line_sep>pathTraversal.loc[pathTraversal['mode_extended']<eq>'car' 'trueOccupancy']<augadd>1<line_sep>pathTraversal.loc[pathTraversal['mode_extended']<eq>'walk' 'trueOccupancy']=1<line_sep>pathTraversal.loc[pathTraversal['mode_extended']<eq>'bike' 'trueOccupancy']=1<line_sep>pathTraversal['vehicleMiles']=pathTraversal['length']/1609.34<line_sep>pathTraversal['passengerMiles']=(pathTraversal['length']<times>pathTraversal['trueOccupancy'])/1609.34<line_sep>pathTraversal['vehicleHours']=(pathTraversal['arrivalTime']-pathTraversal['departureTime'])/3600<line_sep>pathTraversal['passengerHours']=pathTraversal['vehicleHours']<times>pathTraversal['trueOccupancy']<line_sep>pathTraversal=pathTraversal.loc[~((pathTraversal['mode']<eq>'walk')&(pathTraversal['vehicleHours']<g>2)) :]<line_sep>lightDutyVehiclePathTraversals=pathTraversal.loc[(pathTraversal['vehicleType'].str.contains("BUS")<eq><false>)&(pathTraversal['vehicleType'].str.contains("BIKE")<eq><false>)&(pathTraversal['vehicleType'].str.contains("BODY")<eq><false>)&(pathTraversal['vehicleType'].str.contains("CABLE")<eq><false>)&(pathTraversal['vehicleType'].str.contains("FERRY")<eq><false>)&(pathTraversal['vehicleType'].str.contains("SUBWAY")<eq><false>)&(pathTraversal['vehicleType'].str.contains("TRAM")<eq><false>)&(pathTraversal['vehicleType'].str.contains("TRAIN")<eq><false>) :]<line_sep>metrics_json['total_VHT_LightDutyVehicles']=lightDutyVehiclePathTraversals['vehicleHours'].sum()<line_sep>modeChoiceTotals=modeChoice.groupby('mode').agg({'person':'count' 'length':'sum'})<for_stmt>mode modeChoiceTotals.index<block_start>metrics_json[mode+'_counts']=int(modeChoiceTotals.loc[mode 'person'])<block_end>pathTraversalModes=pathTraversal.groupby('mode_extended').agg({'vehicleMiles':'sum' 'primaryFuel':'sum' 'secondaryFuel':'sum' 'passengerMiles':'sum' 'vehicleHours':'sum' 'passengerHours':'sum'})<for_stmt>mode pathTraversalModes.index<block_start>metrics_json['VMT_'+mode]=float(pathTraversalModes.loc[mode 'vehicleMiles'])<line_sep>metrics_json['PMT_'+mode]=float(pathTraversalModes.loc[mode 'passengerMiles'])<line_sep>metrics_json['VHT_'+mode]=float(pathTraversalModes.loc[mode 'vehicleHours'])<line_sep>metrics_json['PHT_'+mode]=float(pathTraversalModes.loc[mode 'passengerHours'])<line_sep>metrics_json['Energy_'+mode]=float(pathTraversalModes.loc[mode 'primaryFuel']+pathTraversalModes.loc[mode 'secondaryFuel'])<block_end><for_stmt>mode pathTraversalModes.index<block_start>metrics_json['VMT_'+mode+"_empty"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<eq>0) 'vehicleMiles'].sum())<line_sep>metrics_json['VMT_'+mode+"_shared"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<g>1) 'vehicleMiles'].sum())<line_sep>metrics_json['VMT_'+mode+"_shared_2p"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<eq>2) 'vehicleMiles'].sum())<line_sep>metrics_json['VMT_'+mode+"_shared_3p"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<eq>3) 'vehicleMiles'].sum())<line_sep>metrics_json['VMT_'+mode+"_shared_4p"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<ge>4) 'vehicleMiles'].sum())<line_sep>metrics_json['PMT_'+mode+"_empty"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<eq>0) 'passengerMiles'].sum())<line_sep>metrics_json['PMT_'+mode+"_shared"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<g>1) 'passengerMiles'].sum())<line_sep>metrics_json['PMT_'+mode+"_shared_2p"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<eq>2) 'passengerMiles'].sum())<line_sep>metrics_json['PMT_'+mode+"_shared_3p"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<eq>3) 'passengerMiles'].sum())<line_sep>metrics_json['PMT_'+mode+"_shared_4p"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<ge>4) 'passengerMiles'].sum())<line_sep>metrics_json['VHT_'+mode+"_empty"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<eq>0) 'vehicleHours'].sum())<line_sep>metrics_json['VHT_'+mode+"_shared"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<g>1) 'vehicleHours'].sum())<line_sep>metrics_json['VHT_'+mode+"_shared_2p"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<eq>2) 'vehicleHours'].sum())<line_sep>metrics_json['VHT_'+mode+"_shared_3p"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<eq>3) 'vehicleHours'].sum())<line_sep>metrics_json['VHT_'+mode+"_shared_4p"]=float(pathTraversal.loc[(pathTraversal['mode_extended']<eq>mode)&(pathTraversal['trueOccupancy']<ge>4) 'vehicleHours'].sum())<block_end>metrics_json['VMT_L1']=float(pathTraversal.loc[pathTraversal['vehicleType'].str.contains('L1') 'vehicleMiles'].sum())<line_sep>metrics_json['VMT_L3']=float(pathTraversal.loc[pathTraversal['vehicleType'].str.contains('L3') 'vehicleMiles'].sum())<line_sep>metrics_json['VMT_L5']=float(pathTraversal.loc[pathTraversal['vehicleType'].str.contains('L5') 'vehicleMiles'].sum())<line_sep>expansion_factor=(7.75/0.315)<times>27.0/21.3<line_sep>transitPathTraversals=pathTraversal.loc[(pathTraversal['vehicleType'].str.contains("BUS")<eq><true>)|(pathTraversal['vehicleType'].str.contains("BIKE")<eq><true>)|(pathTraversal['vehicleType'].str.contains("BODY")<eq><true>)|(pathTraversal['vehicleType'].str.contains("CABLE")<eq><true>)|(pathTraversal['vehicleType'].str.contains("FERRY")<eq><true>)|(pathTraversal['vehicleType'].str.contains("SUBWAY")<eq><true>)|(pathTraversal['vehicleType'].str.contains("TRAM")<eq><true>)|(pathTraversal['vehicleType'].str.contains("TRAIN")<eq><true>) :]<line_sep>transit_primaryFuelTypes=transitPathTraversals.groupby('primaryFuelType').agg({'primaryFuel':'sum'})<line_sep>transit_secondaryFuelTypes=transitPathTraversals.groupby('secondaryFuelType').agg({'secondaryFuel':'sum'})<line_sep>ldv_primaryFuelTypes=lightDutyVehiclePathTraversals.groupby('primaryFuelType').agg({'primaryFuel':'sum'})<line_sep>ldv_secondaryFuelTypes=lightDutyVehiclePathTraversals.groupby('secondaryFuelType').agg({'secondaryFuel':'sum'})<line_sep>primaryFuelTypes=pathTraversal.groupby('primaryFuelType').agg({'primaryFuel':'sum'})<line_sep>secondaryFuelTypes=pathTraversal.groupby('secondaryFuelType').agg({'secondaryFuel':'sum'})<for_stmt>fueltype primaryFuelTypes.index<block_start>metrics_json['totalEnergy_'+fueltype]=0<block_end><for_stmt>fuelType secondaryFuelTypes.index<block_start><if_stmt>'None'<not><in>fuelType<block_start>metrics_json['totalEnergy_'+fuelType]=0<block_end><block_end><for_stmt>fueltype transit_primaryFuelTypes.index<block_start>metrics_json['totalEnergy_'+fueltype]<augadd>float(transit_primaryFuelTypes.loc[fueltype 'primaryFuel'])/expansion_factor<block_end><for_stmt>fuelType transit_secondaryFuelTypes.index<block_start><if_stmt>'None'<not><in>fuelType<block_start>metrics_json['totalEnergy_'+fuelType]<augadd>float(transit_secondaryFuelTypes.loc[fueltype 'secondaryFuel'])/expansion_factor<block_end><block_end><for_stmt>fueltype ldv_primaryFuelTypes.index<block_start>metrics_json['totalEnergy_'+fueltype]<augadd>float(ldv_primaryFuelTypes.loc[fueltype 'primaryFuel'])<block_end><for_stmt>fuelType ldv_secondaryFuelTypes.index<block_start><if_stmt>'None'<not><in>fuelType<block_start>metrics_json['totalEnergy_'+fuelType]<augadd>float(ldv_secondaryFuelTypes.loc[fueltype 'secondaryFuel'])<block_end><block_end>print("get_all_metrics done")<line_sep><return>metrics_json<block_end><def_stmt>generate_sankey_for_pooling _df _local_filename_itr _unit=1000.0<block_start>pool_tot_share=_df["multi_passengers_trips_per_ride_hail_trips"]<line_sep>pool_share=_df["multi_passengers_trips_per_pool_trips"]<line_sep>solo_share=(_df["solo_trips"]+_df["one_passenger_pool_trips"])/_df["ride_hail_requests"]<line_sep>unmatched_share=(_df["unmatched_pool_requests"]+_df["unmatched_solo_requests"])/_df["ride_hail_requests"]<line_sep>labels=["pool requests: {:.1f}K".format(_df["ride_hail_pool_requests"]/_unit) "solo requests: {:.1f}K".format(_df["ride_hail_solo_requests"]/_unit) "pool: {:.1%} ({:.1%})".format(pool_tot_share pool_share) "solo: {:.1%}".format(solo_share) "unmatched: {:.1%}".format(unmatched_share)]<line_sep>fig=go.Figure(data=[go.Sankey(# Define nodes node=dict(pad=15 thickness=15 line=dict(color="black" width=0.5) label=labels) # Add links link=dict(source=[0 0 0 1 1] target=[2 3 4 3 4] value=[_df["multi_passenger_pool_trips"] _df["one_passenger_pool_trips"] _df["unmatched_pool_requests"] _df["solo_trips"] _df["unmatched_solo_requests"]]))])<line_sep>fig.update_layout(title_text="Sankey Diagram For Pooling" font_size=10)<line_sep>fig.write_image("{}.pooling-sankey.png".format(_local_filename_itr))<block_end>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. <import_stmt>functools<import_stmt>json<import_stmt>os<import_from_stmt>playhouse.sqlite_ext SqliteExtDatabase<import_from_stmt>nni.common.blob_utils load_or_download_file<import_from_stmt>.constants DB_URLS DATABASE_DIR<line_sep>json_dumps=functools.partial(json.dumps sort_keys=<true>)<line_sep># to prevent repetitive loading of benchmarks _loaded_benchmarks={}<def_stmt>load_benchmark benchmark:str<arrow>SqliteExtDatabase<block_start>""" Load a benchmark as a database. Parmaeters ---------- benchmark : str Benchmark name like nasbench201. """<if_stmt>benchmark<in>_loaded_benchmarks<block_start><return>_loaded_benchmarks[benchmark]<block_end>url=DB_URLS[benchmark]<line_sep>local_path=os.path.join(DATABASE_DIR os.path.basename(url))<line_sep>load_or_download_file(local_path url)<line_sep>_loaded_benchmarks[benchmark]=SqliteExtDatabase(local_path autoconnect=<true>)<line_sep><return>_loaded_benchmarks[benchmark]<block_end><def_stmt>download_benchmark benchmark:str progress:bool=<true><block_start>""" Download a converted benchmark. Parameters ---------- benchmark : str Benchmark name like nasbench201. """<line_sep>url=DB_URLS[benchmark]<line_sep>local_path=os.path.join(DATABASE_DIR os.path.basename(url))<line_sep>load_or_download_file(local_path url <true> progress)<block_end>
<import_from_stmt>unittest TestCase<import_from_stmt>pysparkling.utils MonotonicallyIncreasingIDGenerator<class_stmt>MonotonicallyIncreasingIDGeneratorTests(TestCase)<block_start><def_stmt>test_init_ok self<block_start>sut=MonotonicallyIncreasingIDGenerator(0)<line_sep>self.assertEqual(sut.value -1)# Shouldn't we throw an error here? sut=MonotonicallyIncreasingIDGenerator(1)<line_sep>self.assertEqual(sut.value 8589934592-1)# I do it this way so I can easily find/replace the value sut=MonotonicallyIncreasingIDGenerator(2)<line_sep>self.assertEqual(sut.value 2<times>8589934592-1)<block_end><def_stmt>test_next_value_ok self<block_start>sut=MonotonicallyIncreasingIDGenerator(1)<line_sep>self.assertEqual(next(sut) 8589934592)<line_sep>self.assertEqual(next(sut) 8589934593)<line_sep>self.assertEqual(next(sut) 8589934594)<block_end><block_end>
<import_from_stmt>abc abstractmethod ABCMeta<import_from_stmt>.security Permissions require<import_from_stmt>.utils json_response validate_query<class_stmt>AbstractResource(metaclass=ABCMeta)<block_start><def_stmt>__init__ self * primary_key resource_name=<none><block_start>class_name=self.__class__.__name__.lower()<line_sep>self._resource_name=resource_name<or>class_name<line_sep>self._primary_key=primary_key<block_end>@property<def_stmt>primary_key self<block_start><return>self._primary_key<block_end>@abstractmethod<async_keyword><def_stmt>list self request# pragma: no cover <block_start><await>require(request Permissions.view)<line_sep>q=validate_query(request.GET)<assert_stmt>q<line_sep># total number of results should be supplied in separate headers={'X-Total-Count':str(0)}<line_sep><return>json_response({} headers=headers)<block_end>@abstractmethod<async_keyword><def_stmt>detail self request# pragma: no cover <block_start><await>require(request Permissions.view)<line_sep>entity_id=request.match_info['entity_id']<assert_stmt>entity_id<line_sep><return>json_response({})<block_end>@abstractmethod<async_keyword><def_stmt>create self request# pragma: no cover <block_start><await>require(request Permissions.add)<line_sep><return>json_response({})<block_end>@abstractmethod<async_keyword><def_stmt>update self request# pragma: no cover <block_start><await>require(request Permissions.edit)<line_sep>entity_id=request.match_info['entity_id']<assert_stmt>entity_id<line_sep><return>json_response({})<block_end>@abstractmethod<async_keyword><def_stmt>delete self request# pragma: no cover <block_start><await>require(request Permissions.delete)<line_sep>entity_id=request.match_info['entity_id']<assert_stmt>entity_id<line_sep><return>json_response({})<block_end><def_stmt>setup self app base_url<block_start>url=str(base_url/self._resource_name)<line_sep>url_id=url+'/{entity_id}'<line_sep>add_route=app.router.add_route<line_sep>add_route('GET' url self.list)<line_sep>add_route('GET' url_id self.detail)<line_sep>add_route('POST' url self.create)<line_sep>add_route('PUT' url_id self.update)<line_sep>add_route('DELETE' url_id self.delete)<block_end><block_end>
expected_output={"slot":{"lc":{"1":{"16x400G Ethernet Module":{"hardware":"3.1" "mac_address":"bc-4a-56-ff-fa-5b to bc-4a-56-ff-fb-dd" "model":"N9K-X9716D-GX" "online_diag_status":"Pass" "ports":"16" "serial_number":"FOC24322RBW" "slot":"1" "slot/world_wide_name":"LC1" "software":"10.1(0.233)" "status":"ok"}} "2":{"36x40/100G Ethernet Module":{"hardware":"1.1" "mac_address":"90-77-ee-ff-2d-b0 to 90-77-ee-ff-2e-43" "model":"N9K-X9736C-FX" "online_diag_status":"Pass" "ports":"36" "serial_number":"FOC24294DJ8" "slot":"2" "slot/world_wide_name":"LC2" "software":"10.1(0.233)" "status":"ok"}} "22":{"8-slot (100G) Fabric Module":{"hardware":"1.1" "mac_address":"NA" "model":"N9K-C9508-FM-E2" "online_diag_status":"Pass" "ports":"0" "serial_number":"FOC24381TPG" "slot":"22" "slot/world_wide_name":"FM2" "software":"10.1(0.233)" "status":"ok"}} "24":{"8-slot (100G) Fabric Module":{"hardware":"1.1" "mac_address":"NA" "model":"N9K-C9508-FM-E2" "online_diag_status":"Pass" "ports":"0" "serial_number":"FOC24381TX1" "slot":"24" "slot/world_wide_name":"FM4" "software":"10.1(0.233)" "status":"ok"}} "26":{"8-slot (100G) Fabric Module":{"hardware":"1.1" "mac_address":"NA" "model":"N9K-C9508-FM-E2" "online_diag_status":"Pass" "ports":"0" "serial_number":"FOC24381TUV" "slot":"26" "slot/world_wide_name":"FM6" "software":"10.1(0.233)" "status":"ok"}} "29":{"System Controller":{"hardware":"2.0" "mac_address":"NA" "model":"N9K-SC-A" "online_diag_status":"Pass" "ports":"0" "serial_number":"FOC24362EU0" "slot":"29" "slot/world_wide_name":"SC1" "software":"10.1(0.233)" "status":"active"}} "30":{"System Controller":{"hardware":"2.0" "mac_address":"NA" "model":"N9K-SC-A" "online_diag_status":"Pass" "ports":"0" "serial_number":"FOC2435407P" "slot":"30" "slot/world_wide_name":"SC2" "software":"10.1(0.233)" "status":"standby"}} "5":{"36x40G Ethernet":{"model":"Module" "ports":"36" "slot":"5" "status":"pwr-denied"}} "6":{"48x10/25G + 4x40/100G Ethernet Module":{"hardware":"2.3" "mac_address":"24-16-9d-ff-9a-09 to 24-16-9d-ff-9a-4c" "model":"N9K-X97160YC-EX" "online_diag_status":"Pass" "ports":"52" "serial_number":"FOC24021CNU" "slot":"6" "slot/world_wide_name":"LC6" "software":"10.1(0.233)" "status":"ok"}} "7":{"48x10G + 4x40/100G Ethernet":{"model":"Module" "ports":"52" "slot":"7" "status":"pwr-denied"}}} "rp":{"27":{"Supervisor Module":{"hardware":"1.1" "mac_address":"54-88-de-ff-09-2f to 54-88-de-ff-09-40" "model":"N9K-SUP-A+" "online_diag_status":"Pass" "ports":"0" "serial_number":"FOC24362EGB" "slot":"27" "slot/world_wide_name":"SUP1" "software":"10.1(0.233)" "status":"active"}}}}}<line_sep>
# Python implementation Randomized Pivot QuickSort using Lomuto's partition Scheme. # Contributed by @ddhira123 # References: GeeksforGeeks <import_stmt>random<line_sep>''' The function which implements QuickSort. array : array to be sorted. start : starting index of the array. stop : ending index of the array. '''<def_stmt>quicksort array start stop<block_start><if_stmt>(start<l>stop)# pivot is the index where the pivot index lies in the array # The pivot index is randomly generated by random_partition function <block_start>pivot=random_partition(array start stop)<line_sep># At this stage the array is partially sorted around the pivot. # Separately sorting the left part of the array and the # right part of the array that has partitioned by pivot. quicksort(array start pivot-1)<line_sep>quicksort(array pivot+1 stop)<block_end><block_end># This function generates random pivot, swaps the first element with the pivot # and calls the partition function. <def_stmt>random_partition array start stop# Generating a random number between the starting index of the array and the ending index of the array. <block_start>randpivot=random.randrange(start stop)<line_sep># Swapping the starting element of the array and the pivot array[start],array[randpivot]=array[randpivot] array[start]<line_sep><return>partition(array start stop)<block_end>''' This function takes the first element as pivot, places the pivot element at the correct position in the sorted array. All the elements are re-arranged according to the pivot, the elements smaller than the pivot is places on the left and the elements greater than the pivot is placed to the right of pivot. '''<def_stmt>partition array start stop<block_start>pivot=start# pivot # a variable to memorize where the current position of start lies # This also holds important aspect for the quicksort() so that can # reach the stopping condition. i=start+1<line_sep># partition in the array starts from. <for_stmt>j range(start+1 stop+1)# if the current element is smaller or equal to pivot, # place it to the left side of the partition. <block_start><if_stmt>array[j]<le>array[pivot]<block_start>array[i],array[j]=array[j] array[i]<line_sep>i=i+1<block_end><block_end>array[pivot],array[i-1]=array[i-1] array[pivot]<line_sep>pivot=i-1<line_sep><return>(pivot)<block_end># Driver Code <if_stmt>__name__<eq>"__main__"<block_start>array=[99 1 4 3 17 21 16 34 29]<line_sep>quicksort(array 0 len(array)-1)<line_sep>print(array)<block_end>
""" Test cases for the Comparisons class over the composite types: Layout (the + operator) Overlay (the * operator) HoloMaps are not tested in this file. """<import_from_stmt>holoviews Element<import_from_stmt>holoviews.element.comparison ComparisonTestCase<class_stmt>CompositeComparisonTestCase(ComparisonTestCase)<block_start><def_stmt>setUp self<block_start>self.el1=Element('data1')<line_sep>self.el2=Element('data2')<line_sep>self.el3=Element('data3')<line_sep>self.el4=Element('data5' group='ValB')<line_sep>self.el5=Element('data6' label='LabelA')<block_end>#========================# # Tests for layout trees # #========================# <def_stmt>test_layouttree_comparison_equal self<block_start>t1=self.el1+self.el2<line_sep>t2=self.el1+self.el2<line_sep>self.assertEqual(t1 t2)<block_end><def_stmt>test_layouttree_comparison_equal_large self<block_start>t1=self.el1+self.el2+self.el4+self.el5<line_sep>t2=self.el1+self.el2+self.el4+self.el5<line_sep>self.assertEqual(t1 t2)<block_end><def_stmt>test_layouttree_comparison_unequal_data self<block_start>t1=self.el1+self.el2<line_sep>t2=self.el1+self.el3<try_stmt><block_start>self.assertEqual(t1 t2)<block_end><except_stmt>AssertionError<as>e<block_start>self.assertEqual(str(e) "'data2' != 'data3'")<block_end><block_end><def_stmt>test_layouttree_comparison_unequal_paths self<block_start>t1=self.el1+self.el2<line_sep>t2=self.el1+self.el2.relabel(group='ValA')<try_stmt><block_start>self.assertEqual(t1 t2)<block_end><except_stmt>AssertionError<as>e<block_start>self.assertEqual(str(e) 'Layouts have mismatched paths.')<block_end><block_end><def_stmt>test_layouttree_comparison_unequal_sizes self<block_start>t1=self.el1+self.el2<line_sep>t2=self.el1+self.el2+self.el3<try_stmt><block_start>self.assertEqual(t1 t2)<block_end><except_stmt>AssertionError<as>e<block_start>self.assertEqual(str(e) 'Layouts have mismatched path counts.')<block_end><block_end>#=============================# # Matching tests for Overlays # #=============================# <def_stmt>test_overlay_comparison_equal self<block_start>t1=self.el1<times>self.el2<line_sep>t2=self.el1<times>self.el2<line_sep>self.assertEqual(t1 t2)<block_end><def_stmt>test_overlay_comparison_equal_large self<block_start>t1=self.el1<times>self.el2<times>self.el3<times>self.el4<line_sep>t2=self.el1<times>self.el2<times>self.el3<times>self.el4<line_sep>self.assertEqual(t1 t2)<block_end><def_stmt>test_overlay_comparison_unequal_data self<block_start>t1=self.el1<times>self.el2<line_sep>t2=self.el1<times>self.el3<try_stmt><block_start>self.assertEqual(t1 t2)<block_end><except_stmt>AssertionError<as>e<block_start>self.assertEqual(str(e) "'data2' != 'data3'")<block_end><block_end><def_stmt>test_overlay_comparison_unequal_paths self<block_start>t1=self.el1<times>self.el2<line_sep>t2=self.el1<times>self.el2.relabel(group='ValA')<try_stmt><block_start>self.assertEqual(t1 t2)<block_end><except_stmt>AssertionError<as>e<block_start>self.assertEqual(str(e) 'Overlays have mismatched paths.')<block_end><block_end><def_stmt>test_overlay_comparison_unequal_sizes self<block_start>t1=self.el1<times>self.el2<line_sep>t2=self.el1<times>self.el2<times>self.el3<try_stmt><block_start>self.assertEqual(t1 t2)<block_end><except_stmt>AssertionError<as>e<block_start>self.assertEqual(str(e) 'Overlays have mismatched path counts.')<block_end><block_end>#==================================# # Mixed composite comparison tests # #==================================# <def_stmt>test_composite_comparison_equal self<block_start>t1=(self.el1<times>self.el2)+(self.el1<times>self.el2)<line_sep>t2=(self.el1<times>self.el2)+(self.el1<times>self.el2)<line_sep>self.assertEqual(t1 t2)<block_end><def_stmt>test_composite_unequal_data self<block_start>t1=(self.el1<times>self.el2)+(self.el1<times>self.el2)<line_sep>t2=(self.el1<times>self.el2)+(self.el1<times>self.el3)<try_stmt><block_start>self.assertEqual(t1 t2)<block_end><except_stmt>AssertionError<as>e<block_start>self.assertEqual(str(e) "'data2' != 'data3'")<block_end><block_end><def_stmt>test_composite_unequal_paths_outer self<block_start>t1=(self.el1<times>self.el2)+(self.el1<times>self.el2).relabel(group='ValA')<line_sep>t2=(self.el1<times>self.el2)+(self.el1<times>self.el3)<try_stmt><block_start>self.assertEqual(t1 t2)<block_end><except_stmt>AssertionError<as>e<block_start>self.assertEqual(str(e) 'Layouts have mismatched paths.')<block_end><block_end><def_stmt>test_composite_unequal_paths_inner self<block_start>t1=(self.el1<times>self.el2)+(self.el1<times>self.el2.relabel(group='ValA'))<line_sep>t2=(self.el1<times>self.el2)+(self.el1<times>self.el3)<try_stmt><block_start>self.assertEqual(t1 t2)<block_end><except_stmt>AssertionError<as>e<block_start>self.assertEqual(str(e) 'Overlays have mismatched paths.')<block_end><block_end><def_stmt>test_composite_unequal_sizes self<block_start>t1=(self.el1<times>self.el2)+(self.el1<times>self.el2)+self.el3<line_sep>t2=(self.el1<times>self.el2)+(self.el1<times>self.el2)<try_stmt><block_start>self.assertEqual(t1 t2)<block_end><except_stmt>AssertionError<as>e<block_start>self.assertEqual(str(e) 'Layouts have mismatched path counts.')<block_end><block_end><block_end>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) # ---------------------------------------------------------------------------- # If you submit this package back to Spack as a pull request, # please first remove this boilerplate and all FIXME comments. # # This is a template package file for Spack. We've put "FIXME" # next to all the things you'll want to change. Once you've handled # them, you can save this file and test your package like this: # # spack install flexflow # # You can edit this file again by typing: # # spack edit flexflow # # See the Spack documentation for more information on packaging. # ---------------------------------------------------------------------------- <import_stmt>os<import_from_stmt>spack *<class_stmt>Flexflow(CMakePackage)<block_start>"""FlexFlow is a deep learning framework that accelerates distributed DNN training by automatically searching for efficient parallelization strategies. FlexFlow provides a drop-in replacement for TensorFlow Keras and PyTorch. """<line_sep>homepage="http://flexflow.ai"<line_sep>git="https://github.com/flexflow/FlexFlow.git"<line_sep>maintainers=['jiazhihao' 'eddy16112']<line_sep>version('master' branch='master' submodules=<true>)<line_sep>depends_on("[email protected]:" type='build')<line_sep>depends_on('[email protected]:11.9')<line_sep>depends_on('cudnn')<line_sep>depends_on('nccl' when='+nccl')<line_sep>depends_on('[email protected]:3.9' when='+python')<line_sep>depends_on('mpi' when='network=gasnet')<line_sep>depends_on('ucx' when='conduit=ucx')<line_sep>depends_on('mpi' when='conduit=mpi')<line_sep>variant('max_dims' values=int default=4 description="Set max number of dimensions for logical regions.")<line_sep>variant('zlib' default=<true> description="Enable zlib support.")<line_sep>variant('nccl' default=<false> description="Enable zlib support.")<line_sep>variant('python' default=<true> description="Enable Python support.")<line_sep>variant('examples' default=<false> description="Build all examples.")<line_sep>variant('avx2' default=<false> description="Enable AVX2 support.")<line_sep>variant('gasnet' default=<false> description="Enable GASNet support.")<line_sep>variant('conduit' default='none' values=('aries' 'ibv' 'udp' 'mpi' 'ucx' 'none') description="The gasnet conduit(s) to enable." multi=<false>)<line_sep>conflicts('conduit=none' when='gasnet=True' msg="a conduit must be selected when enable GASNet")<line_sep># cuda_arch=0 means FlexFlow will automatically detect the cuda arch of the current platform cuda_arch_list=('0' '60' '70' '75' '80')<line_sep>variant('cuda_arch' default='0' values=cuda_arch_list description="GPU/CUDA architecture to build for." multi=<false>)<def_stmt>cmake_args self<block_start>spec=self.spec<line_sep>cmake_cxx_flags=[]<line_sep>options=['-DCUDA_USE_STATIC_CUDA_RUNTIME=OFF']<if_stmt>'+python'<in>spec<block_start>options.append('-DFF_USE_PYTHON=ON')<block_end><else_stmt><block_start>options.append('-DFF_USE_PYTHON=OFF')<block_end><if_stmt>'+nccl'<in>spec<block_start>options.append('-DFF_USE_NCCL=ON')<block_end><else_stmt><block_start>options.append('-DFF_USE_NCCL=OFF')<block_end><if_stmt>'+examples'<in>spec<block_start>options.append('-DFF_BUILD_ALL_EXAMPLES=ON')<block_end><else_stmt><block_start>options.append('-DFF_BUILD_ALL_EXAMPLES=OFF')<block_end><if_stmt>'+avx2'<in>spec<block_start>options.append('-DFF_USE_AVX2=ON')<block_end><else_stmt><block_start>options.append('-DFF_USE_AVX2=OFF')<block_end><if_stmt>'+gasnet'<in>spec<block_start>options.append('-DFF_USE_GASNET=ON')<line_sep>gasnet_conduit=spec.variants['conduit'].value<line_sep>options.append('-DFF_GASNET_CONDUIT=%s'%gasnet_conduit)<block_end><else_stmt><block_start>options.append('-DFF_USE_GASNET=OFF')<block_end>maxdims=int(spec.variants['max_dims'].value)<line_sep>options.append('-DFF_MAX_DIM=%d'%maxdims)<line_sep>cuda_arch=spec.variants['cuda_arch'].value<if_stmt>cuda_arch<ne>'0'<block_start>options.append('-DFF_CUDA_ARCH=%s'%cuda_arch)<block_end><return>options<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>jack.io.embeddings.embeddings Embeddings load_embeddings<import_from_stmt>jack.io.embeddings.glove load_glove<line_sep>__all__=['Embeddings' 'load_embeddings'<concat>'load_word2vec' 'get_word2vec_vocabulary' 'load_glove' ]<line_sep>
""" toad command line application """<import_stmt>argparse<import_from_stmt>.commands get_plugins<def_stmt>add_sub parsers config<block_start>"""add sub parser by config """<line_sep>info=config.get('info' {})<line_sep>args=config.get('args' [])<line_sep>defaults=config.get('defaults' <none>)<line_sep>sub_parser=parsers.add_parser(**info)<for_stmt>detail args<block_start>flag=detail.pop('flag')<line_sep>sub_parser.add_argument(*flag **detail)<block_end><if_stmt>defaults<block_start>sub_parser.set_defaults(**defaults)<block_end><block_end><def_stmt>get_parser <block_start>"""get parser """<line_sep>parser=argparse.ArgumentParser(prog='toad' description='Detect data from a csv file' )<line_sep>subparsers=parser.add_subparsers()<line_sep>plugins=get_plugins()<for_stmt>plug plugins<block_start>add_sub(subparsers plug.ARGS)<block_end><return>parser<block_end><def_stmt>main <block_start>""" """<line_sep>parser=get_parser()<line_sep>args=parser.parse_args()<if_stmt>hasattr(args 'func')<block_start>args.func(args)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>HcalHardcodeGeometryEP=cms.ESProducer("HcalHardcodeGeometryEP" UseOldLoader=cms.bool(<false>))<line_sep>
# Copyright 2017 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Sample data exhibiting audio summaries, via a waveform generator."""<import_stmt>inspect<import_stmt>math<import_stmt>os.path<import_from_stmt>absl app<import_from_stmt>absl flags<import_stmt>tensorflow<as>tf<line_sep>FLAGS=flags.FLAGS<line_sep>flags.DEFINE_string("logdir" "/tmp/audio_demo" "Directory into which to write TensorBoard data." )<line_sep>flags.DEFINE_integer("steps" 5 "Number of frequencies of each waveform to generate.")<line_sep># Parameters for the audio output. flags.DEFINE_integer("sample_rate" 44100 "Sample rate, in Hz.")<line_sep>flags.DEFINE_float("duration" 2.0 "Duration of each waveform, in s.")<def_stmt>_samples <block_start>"""Compute how many samples should be included in each waveform."""<line_sep><return>int(FLAGS.sample_rate<times>FLAGS.duration)<block_end><def_stmt>run wave_name wave_constructor step<block_start>"""Generate an audio waveform and write it to summaries. Waves will be generated at frequencies ranging from A4 to A5. Args: wave_name: the name of the wave being generated wave_constructor: a function that accepts a float32 frequency (in Hz) at which to construct a wave, and returns a tensor of shape [1, _samples(), `n`] representing audio data (for some number of channels `n`). step: number step """<line_sep># For the given step, linearly interpolate a frequency between A4 (440 Hz) # and A5 (880 Hz) and create its waveform. f_min=440.0<line_sep>f_max=880.0<line_sep>t=step/(FLAGS.steps-1)<line_sep>frequency=f_min<times>(1.0-t)+f_max<times>t<line_sep>waveform=wave_constructor(frequency)<line_sep># Optionally generate a description that will appear in TensorBoard # next to the audio. This one includes the source code behind the # waveform for context. source="\n".join(" %s"%line.rstrip()<for>line inspect.getsourcelines(wave_constructor)[0])<line_sep>description="A wave of type `%r`, generated via:\n\n%s"%(wave_name source )<line_sep># Write the audio waveform summary. The `waveform` is a # [num_clips, num_frames, num_channels] shaped tensor. tf.summary.audio("waveform" waveform FLAGS.sample_rate step=step description=description )<block_end><def_stmt>sine_wave frequency<block_start>"""Emit a sine wave at the given frequency."""<line_sep>xs=tf.reshape(tf.range(_samples() dtype=tf.float32) [1 _samples() 1])<line_sep>ts=xs/FLAGS.sample_rate<line_sep><return>tf.sin(2<times>math.pi<times>frequency<times>ts)<block_end><def_stmt>square_wave frequency<block_start>"""Emit a square wave at the given frequency."""<line_sep># The square is just the sign of the sine! <return>tf.sign(sine_wave(frequency))<block_end><def_stmt>bisine_wave frequency<block_start>"""Emit two sine waves, in stereo at different octaves."""<line_sep># Generate 2 sine waves, each of which is a [1, _samples(), 1] shaped tensor. sine_hi=sine_wave(frequency)<line_sep>sine_lo=sine_wave(frequency/2.0)<line_sep># Concatenating along axis 2 produces a [1, _samples(), 2] shaped tensor, a # stereo (2 channel) audio waveform. sample1=tf.concat([sine_lo sine_hi] axis=2)<line_sep>sample2=tf.concat([sine_hi sine_lo] axis=2)<line_sep># Return [2, _samples(), 2], representing 2 audio clips. <return>tf.concat([sample1 sample2] axis=0)<block_end><def_stmt>run_all base_logdir<block_start>"""Generate waves of the shapes defined above. For each wave, creates a run that contains summaries. Arguments: base_logdir: the directory into which to store all the runs' data """<line_sep>waves=[("sine_wave" sine_wave) ("square_wave" square_wave) ("bisine_wave" bisine_wave) ]<for_stmt>wave_name,wave_constructor waves<block_start>logdir=os.path.join(base_logdir wave_name)<line_sep>writer=tf.summary.create_file_writer(logdir)<with_stmt>writer.as_default()<block_start><for_stmt>step range(FLAGS.steps)<block_start>run(wave_name wave_constructor step)<block_end><block_end><block_end><block_end><def_stmt>main unused_argv<block_start>print("Saving output to %s."%FLAGS.logdir)<line_sep>print("To view results in your browser, run `tensorboard --logdir %s`"%FLAGS.logdir)<line_sep>run_all(FLAGS.logdir)<line_sep>print("Done. Output saved to %s."%FLAGS.logdir)<block_end><if_stmt>__name__<eq>"__main__"<block_start>app.run(main)<block_end>
# utils_ui.py # # MIT License # # Copyright (c) 2020 <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_stmt>logging<import_stmt>random<import_from_stmt>typing Optional Tuple Callable<import_from_stmt>gi.repository GdkPixbuf Granite Gtk Notify<as>notify<import_from_stmt>.config APP_TITLE DEFAULT_NOTIFICATION_TIMEOUT DEFAULT_NOTIFICATION_ERROR_TIMEOUT<import_from_stmt>.config ApplicationSettings<import_from_stmt>.utils call_in_main_thread running_on_main_thread<line_sep>############################################################################### # messages and notyfications ############################################################################### <def_stmt>show_notification msg header:str=<none> icon:str=<none> timeout:Optional[int]=<none> action:Optional[Tuple[str Callable]]=<none> threaded:bool=<true> is_error:bool=<false><block_start>""" Show a desktop notification """<line_sep># see https://lazka.github.io/pgi-docs/#Notify-0.7 # maybe we could also use https://notify2.readthedocs.io/en/latest/ <if_stmt><not>header<block_start>header=APP_TITLE<block_end>icon_filename=<none><if_stmt><not>icon<block_start>icon_filename=ApplicationSettings.get_app_icon()<block_end><if_stmt>is_error<block_start>t=timeout<if>timeout<is><not><none><else>DEFAULT_NOTIFICATION_ERROR_TIMEOUT<line_sep>logging.error(msg)<block_end><else_stmt><block_start>t=timeout<if>timeout<is><not><none><else>DEFAULT_NOTIFICATION_TIMEOUT<line_sep>logging.debug(msg)<block_end><def_stmt>do_notify <block_start><assert_stmt>running_on_main_thread()<line_sep>n=notify.Notification.new(header msg icon)<line_sep>n.set_app_name(APP_TITLE)<if_stmt>icon_filename<is><not><none><block_start>pixbuf=GdkPixbuf.Pixbuf.new_from_file(icon_filename)<line_sep>n.set_icon_from_pixbuf(pixbuf)<block_end># Note that the timeout may be ignored by the server. n.set_timeout(t)<if_stmt>action<is><not><none><block_start>action_str,action_callback=action<line_sep>r=random.randrange(0 10000)<line_sep>n.add_action(f"{r}-{APP_TITLE}-id" action_str action_callback <none>)<block_end>n.show()<if_stmt><not>threaded# important: do not return anything if invoked with `call_in_main_thread` <block_start><return>n<block_end><block_end><if_stmt>threaded<block_start>call_in_main_thread(do_notify)<line_sep><return><none><block_end><else_stmt><block_start><return>do_notify()<block_end><block_end><def_stmt>show_error_dialog msg:str explanation:str icon:str="dialog-error" parent=<none> ok_label:str="Ok"<arrow><none><block_start>""" Show a info/warning dialog, with just a "Close" button """<def_stmt>do_show <block_start>error_diag=Granite.MessageDialog.with_image_from_icon_name(msg "\n\n"+explanation icon Gtk.ButtonsType.CLOSE)<if_stmt>parent<is><not><none><block_start>error_diag.set_transient_for(parent)<line_sep>error_diag.set_flags=Gtk.DialogFlags.MODAL<block_end>error_diag.connect("response" <lambda>widget response_id:widget.destroy())<line_sep>error_diag.show_all()<block_end><if_stmt>running_on_main_thread()<block_start>do_show()<block_end><else_stmt><block_start>call_in_main_thread(do_show)<block_end><block_end><def_stmt>show_warning_dialog msg:str explanation:str<block_start>""" Show a warning dialog, with just one OK button """<line_sep>show_error_dialog(msg explanation icon="dialog-warning")<block_end>############################################################################### # settings ############################################################################### <def_stmt>_link_gtk_entry_to_settings settings:ApplicationSettings entry:Gtk.Entry settings_id:str<block_start>""" Link a Gtk.Entry to a GSettings ID, so any change in one of them will be reflected in the other one. """<line_sep>name=entry.get_name()<line_sep>logging.debug(f"[LINK] settings::{settings_id} <-> entry {name} [str]")<line_sep>curr_value=settings.get_safe_string(settings_id)<if_stmt>curr_value<block_start>entry.set_text(curr_value)<block_end>settings.connect(f"changed::{settings_id}" <lambda>s k:entry.set_text(settings.get_safe_string(settings_id)))<line_sep>entry.connect("changed" <lambda>e:settings.set_string(settings_id str(entry.get_text())))<block_end><def_stmt>_link_gtk_switch_to_settings settings:ApplicationSettings switch:Gtk.Switch settings_id:str<block_start>""" Link a Gtk.Switch to a GSettings ID, so any change in one of them will be reflected in the other one. """<line_sep>name=switch.get_name()<line_sep>logging.debug(f"[LINK] settings::{settings_id} <-> switch {name} [bool]")<line_sep>curr_value=settings.get_boolean(settings_id)<if_stmt>curr_value<block_start>switch.set_state(curr_value)<block_end>settings.connect(f"changed::{settings_id}" <lambda>s k:switch.set_state(settings.get_boolean(settings_id)))<line_sep>switch.connect("state-set" <lambda>_sw _state:settings.set_boolean(settings_id _state))<block_end><def_stmt>_link_gtk_spinbutton_to_settings settings:ApplicationSettings spin:Gtk.SpinButton settings_id:str<block_start>""" Link a Gtk.SpinButton to a GSettings ID, so any change in one of them will be reflected in the other one. """<line_sep>name=spin.get_name()<line_sep>logging.debug(f"[LINK] settings::{settings_id} <-> spinbutton {name} [int]")<line_sep>curr_value=settings.get_int(settings_id)<if_stmt>curr_value<block_start>spin.set_value(settings.get_int(settings_id))<block_end>settings.connect(f"changed::{settings_id}" <lambda>s k:spin.set_value(settings.get_int(settings_id)))<line_sep>spin.connect("change-value" <lambda>e:settings.set_int(settings_id spin.get_value()))<block_end><def_stmt>_link_gtk_combobox_to_settings settings:ApplicationSettings combo:Gtk.ComboBox settings_id:str<block_start><def_stmt>combo_changed *args<block_start>tree_iter=combo.get_active_iter()<if_stmt>tree_iter<is><not><none><block_start>model=combo.get_model()<line_sep>text=model[tree_iter][0]<block_end><else_stmt><block_start>entry=combo.get_child()<line_sep>text=entry.get_text()<block_end>settings.set_string(settings_id text)<block_end><def_stmt>settings_changed *args<block_start>value=settings.get_safe_string(settings_id)<if_stmt>value<is><none><or>value<eq>""<block_start>combo.set_active(0)<block_end><else_stmt><block_start>model=combo.get_model()<for_stmt>i range(0 len(model))<block_start>text=model[i][0]<if_stmt>text<eq>value<block_start>combo.set_active(i)<line_sep><return><block_end><block_end>entry=combo.get_child()<if_stmt>hasattr(entry "set_text")<block_start>entry.set_text(value)<block_end><block_end><block_end>name=combo.get_name()<line_sep>logging.debug(f"[LINK] settings::{settings_id} <-> combo {name} [str]")<line_sep>settings_changed()<line_sep>settings.connect(f"changed::{settings_id}" <lambda>s k:settings_changed)<line_sep>combo.connect("changed" combo_changed)<block_end><def_stmt>link_widget_to_settings settings:ApplicationSettings widget:Gtk.Widget settings_id:str<block_start>""" Link a Gtk.SpinButton to a GSettings ID, so any change in one of them will be reflected in the other one. """<line_sep># note: take into account inheritance in these heuristics... <if_stmt>isinstance(widget Gtk.ComboBox)<block_start>_link_gtk_combobox_to_settings(settings widget settings_id)<block_end><elif_stmt>isinstance(widget Gtk.SpinButton)<block_start>_link_gtk_spinbutton_to_settings(settings widget settings_id)<block_end><elif_stmt>isinstance(widget Gtk.Switch)<block_start>_link_gtk_switch_to_settings(settings widget settings_id)<block_end><elif_stmt>isinstance(widget Gtk.Entry)<block_start>_link_gtk_entry_to_settings(settings widget settings_id)<block_end><else_stmt><block_start><raise>Exception("unsupported widget type to link")<block_end><block_end>############################################################################### # settings UI ############################################################################### <class_stmt>SettingsPage(Granite.SimpleSettingsPage)<block_start>""" A settings page, with some convenience functions. """<line_sep># settings that will be reset when calling set_defaults() _managed_settings=[]<def_stmt>__init__ self settings:ApplicationSettings **kwargs<block_start>self._settings=settings<line_sep>super().__init__(**kwargs)<line_sep>self._entries=[]<line_sep>self._entries_area=self.get_content_area()<line_sep>self._entries_area.set_halign(Gtk.Align.FILL)<line_sep>self._entries_area.set_hexpand(<true>)<block_end><def_stmt>append_entry self label widget setting=<none># attach to the grid (see https://python-gtk-3-tutorial.readthedocs.io/en/latest/layout.html#grid) <block_start>count=len(self._entries)<line_sep>self._entries_area.attach(label 0 count 1 1)<line_sep>self._entries_area.attach(widget 1 count 1 1)<line_sep>self._entries.append(widget)<if_stmt>setting<block_start>link_widget_to_settings(self._settings widget setting)<block_end><block_end><def_stmt>append_labeled_entry self text widget setting=<none><block_start>label=Gtk.Label(text)<line_sep>label.props.hexpand=<false><line_sep>label.props.halign=Gtk.Align.END<line_sep>widget.props.halign=Gtk.Align.START<line_sep>self.append_entry(label widget setting=setting)<block_end><def_stmt>on_validate self<block_start>""" Validate all the settings, raising an exception if something is wrong """<line_sep><pass><block_end><def_stmt>on_apply self<block_start>""" Validate all the settings, raising an exception if something is wrong """<line_sep><pass><block_end><def_stmt>set_defaults self<block_start>""" Set all the settings to the default values. """<for_stmt>setting self._managed_settings<block_start>logging.debug(f"[UI] Resetting {setting} to default value")<line_sep>self._settings.reset(setting)<block_end><block_end><block_end>
<import_stmt>matplotlib.pyplot<as>plt<import_stmt>os<import_stmt>re<import_stmt>shutil<import_stmt>string<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.keras layers<import_from_stmt>tensorflow.keras losses<import_from_stmt>tensorflow.keras preprocessing<import_from_stmt>tensorflow.keras.layers.experimental.preprocessing TextVectorization<import_stmt>os<line_sep>os.environ["CUDA_VISIBLE_DEVICES"]="-1"<line_sep>print(tf.__version__)<line_sep># https://www.tensorflow.org/tutorials/keras/text_classification url="https://ai.stanford.edu/~amaas/data/sentiment/aclImdb_v1.tar.gz"<line_sep>tf.debugging.set_log_device_placement(<true>)<line_sep>dataset=tf.keras.utils.get_file("aclImdb_v1.tar.gz" url untar=<true> cache_dir='C:/Users/haipi/AppData/Local/Temp/aclImdb_v1' cache_subdir='')<line_sep>dataset_dir=os.path.join(os.path.dirname(dataset) 'aclImdb')<line_sep>train_dir=os.path.join(dataset_dir 'train')<line_sep>sample_file=os.path.join(train_dir 'pos/0_9.txt')<with_stmt>open(sample_file)<as>f<block_start>print(f.read())<block_end># remove_dir = os.path.join(train_dir, 'unsup') # shutil.rmtree(remove_dir) batch_size=32<line_sep>seed=42<line_sep>raw_train_ds=tf.keras.preprocessing.text_dataset_from_directory(train_dir batch_size=batch_size validation_split=0.2 subset='training' seed=seed)<for_stmt>text_batch,label_batch raw_train_ds.take(1)<block_start><for_stmt>i range(3)<block_start>print("Review" text_batch.numpy()[i])<line_sep>print("Label" label_batch.numpy()[i])<block_end><block_end>print("Label 0 corresponds to" raw_train_ds.class_names[0])<line_sep>print("Label 1 corresponds to" raw_train_ds.class_names[1])<line_sep>raw_val_ds=tf.keras.preprocessing.text_dataset_from_directory(train_dir batch_size=batch_size validation_split=0.2 subset='validation' seed=seed)<line_sep>test_dir=os.path.join(dataset_dir 'test')<line_sep>raw_test_ds=tf.keras.preprocessing.text_dataset_from_directory(test_dir batch_size=batch_size)<def_stmt>custom_standardization input_data<block_start>lowercase=tf.strings.lower(input_data)<line_sep>stripped_html=tf.strings.regex_replace(lowercase '<br />' ' ')<line_sep><return>tf.strings.regex_replace(stripped_html '[%s]'%re.escape(string.punctuation) '')<block_end>max_features=10000<line_sep>sequence_length=250<line_sep>vectorize_layer=TextVectorization(standardize=custom_standardization max_tokens=max_features output_mode='int' output_sequence_length=sequence_length)<line_sep># Make a text-only dataset (without labels), then call adapt train_text=raw_train_ds.map(<lambda>x y:x)<line_sep>vectorize_layer.adapt(train_text)<def_stmt>vectorize_text text label<block_start>text=tf.expand_dims(text -1)<line_sep><return>vectorize_layer(text) label<block_end># retrieve a batch (of 32 reviews and labels) from the dataset text_batch,label_batch=next(iter(raw_train_ds))<line_sep>first_review,first_label=text_batch[0] label_batch[0]<line_sep>print("Review" first_review)<line_sep>print("Label" raw_train_ds.class_names[first_label])<line_sep>print("Vectorized review" vectorize_text(first_review first_label))<line_sep>print("1287 ---> " vectorize_layer.get_vocabulary()[1287])<line_sep>print(" 313 ---> " vectorize_layer.get_vocabulary()[313])<line_sep>print('Vocabulary size: {}'.format(len(vectorize_layer.get_vocabulary())))<line_sep>train_ds=raw_train_ds.map(vectorize_text)<line_sep>val_ds=raw_val_ds.map(vectorize_text)<line_sep>test_ds=raw_test_ds.map(vectorize_text)<line_sep>AUTOTUNE=tf.data.AUTOTUNE<line_sep>train_ds=train_ds.cache().prefetch(buffer_size=AUTOTUNE)<line_sep>val_ds=val_ds.cache().prefetch(buffer_size=AUTOTUNE)<line_sep>test_ds=test_ds.cache().prefetch(buffer_size=AUTOTUNE)<line_sep># Create the model embedding_dim=16<line_sep>model=tf.keras.Sequential([layers.Embedding(max_features+1 embedding_dim) layers.Dropout(0.2) layers.GlobalAveragePooling1D() layers.Dropout(0.2) layers.Dense(1)])<line_sep>model.summary()<line_sep>model.compile(loss=losses.BinaryCrossentropy(from_logits=<true>) optimizer='adam' metrics=tf.metrics.BinaryAccuracy(threshold=0.0))<line_sep>epochs=30<line_sep>history=model.fit(train_ds validation_data=val_ds epochs=epochs)<line_sep>loss,accuracy=model.evaluate(test_ds)<line_sep>print("Loss: " loss)<line_sep>print("Accuracy: " accuracy)<line_sep>
# -*- coding: utf-8 -*- """TFRecords.ipynb Automatically generated by Colaboratory. Original file is located at https://colab.research.google.com/drive/1p-Nz6v3CyqKSc-QazX1FgvZkamt5T-uC """<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow keras<import_stmt>numpy<as>np<line_sep># Load MNIST data (x_train y_train),(x_test y_test)=keras.datasets.mnist.load_data()<line_sep># Preprocessing x_train=x_train/255.0<line_sep>x_test=x_test/255.0<line_sep># Track the data type dataType=x_train.dtype<line_sep>print(f"Data type: {dataType}")<line_sep>labelType=y_test.dtype<line_sep>print(f"Data type: {labelType}")<line_sep>im_list=[]<line_sep>n_samples_to_show=16<line_sep>c=0<for_stmt>i range(n_samples_to_show)<block_start>im_list.append(x_train[i])<block_end># Visualization <import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>mpl_toolkits.axes_grid1 ImageGrid<line_sep>fig=plt.figure(figsize=(4. 4.))<line_sep># Ref: https://matplotlib.org/3.1.1/gallery/axes_grid1/simple_axesgrid.html grid=ImageGrid(fig 111 # similar to subplot(111) nrows_ncols=(4 4) # creates 2x2 grid of axes axes_pad=0.1 # pad between axes in inch. )<line_sep># Show image grid <for_stmt>ax,im zip(grid im_list)# Iterating over the grid returns the Axes. <block_start>ax.imshow(im 'gray')<block_end>plt.show()<line_sep># Convert values to compatible tf.Example types. <def_stmt>_bytes_feature value<block_start>"""Returns a bytes_list from a string / byte."""<if_stmt>isinstance(value type(tf.constant(0)))<block_start>value=value.numpy()# BytesList won't unpack a string from an EagerTensor. <block_end><return>tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))<block_end><def_stmt>_float_feature value<block_start>"""Returns a float_list from a float / double."""<line_sep><return>tf.train.Feature(float_list=tf.train.FloatList(value=[value]))<block_end><def_stmt>_int64_feature value<block_start>"""Returns an int64_list from a bool / enum / int / uint."""<line_sep><return>tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))<block_end># Create the features dictionary. <def_stmt>image_example image label dimension<block_start>feature={'dimension':_int64_feature(dimension) 'label':_int64_feature(label) 'image_raw':_bytes_feature(image.tobytes()) }<line_sep><return>tf.train.Example(features=tf.train.Features(feature=feature))<block_end>record_file='mnistTrain.tfrecords'<line_sep>n_samples=x_train.shape[0]<line_sep>dimension=x_train.shape[1]<with_stmt>tf.io.TFRecordWriter(record_file)<as>writer<block_start><for_stmt>i range(n_samples)<block_start>image=x_train[i]<line_sep>label=y_train[i]<line_sep>tf_example=image_example(image label dimension)<line_sep>writer.write(tf_example.SerializeToString())<block_end><block_end># Create the dataset object from tfrecord file(s) dataset=tf.data.TFRecordDataset(record_file)<line_sep># Decoding function <def_stmt>parse_record record<block_start>name_to_features={'dimension':tf.io.FixedLenFeature([] tf.int64) 'label':tf.io.FixedLenFeature([] tf.int64) 'image_raw':tf.io.FixedLenFeature([] tf.string) }<line_sep><return>tf.io.parse_single_example(record name_to_features)<block_end><def_stmt>decode_record record<block_start>image=tf.io.decode_raw(record['image_raw'] out_type=dataType little_endian=<true> fixed_length=<none> name=<none>)<line_sep>label=record['label']<line_sep>dimension=record['dimension']<line_sep>image=tf.reshape(image (dimension dimension))<line_sep><return>(image label)<block_end>im_list=[]<line_sep>n_samples_to_show=16<line_sep>c=0<for_stmt>record dataset<block_start>c<augadd>1<if_stmt>c<g>n_samples_to_show<block_start><break><block_end>parsed_record=parse_record(record)<line_sep>decoded_record=decode_record(parsed_record)<line_sep>image,label=decoded_record<line_sep>im_list.append(image)<block_end># Visualization <import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>mpl_toolkits.axes_grid1 ImageGrid<line_sep>fig=plt.figure(figsize=(4. 4.))<line_sep># Ref: https://matplotlib.org/3.1.1/gallery/axes_grid1/simple_axesgrid.html grid=ImageGrid(fig 111 # similar to subplot(111) nrows_ncols=(4 4) # creates 2x2 grid of axes axes_pad=0.1 # pad between axes in inch. )<line_sep># Show image grid <for_stmt>ax,im zip(grid im_list)# Iterating over the grid returns the Axes. <block_start>ax.imshow(im 'gray')<block_end>plt.show()<line_sep>
<import_stmt>logging<import_stmt>os<import_stmt>sys<import_from_stmt>dino.config ConfigKeys<import_from_stmt>dino.environ env<line_sep>DEFAULT_DAYS=31<line_sep>logger=logging.getLogger('warm_up_cache.py')<try_stmt><block_start>days=env.config.get(ConfigKeys.WARMUP_DAYS domain=ConfigKeys.CACHE_SERVICE default=-1)<if_stmt>days<ne>-1<block_start><try_stmt><block_start>days=int(float(days))<block_end><except_stmt>Exception<as>e1<block_start>logger.error("could not parse configured days {}: {}".format(days str(e)))<line_sep>days=-1<block_end><block_end><if_stmt>days<l>0<block_start>days=os.getenv('DINO_DAYS')<if_stmt>days<is><none><block_start><if_stmt>len(sys.argv)<g>1<block_start>days=sys.argv[1]<block_end><else_stmt><block_start>days=DEFAULT_DAYS<block_end><block_end><try_stmt><block_start>days=int(float(days))<block_end><except_stmt>ValueError<as>e<block_start>logger.error("invalid days: {}: {}, using default value of {}".format(days str(e) DEFAULT_DAYS))<line_sep>days=DEFAULT_DAYS<block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>logger.error("could not get days: {}".format(str(e)))<line_sep>days=DEFAULT_DAYS<block_end>logger.info('caching all user ids...')<line_sep># not needed for wio <if_stmt>'wio'<not><in>os.getenv('DINO_ENVIRONMENT')<block_start><try_stmt><block_start>all_users=env.db.get_all_user_ids()<line_sep>logger.info('caching all user roles ({})...'.format(len(all_users)))<line_sep>env.db.get_users_roles(all_users)<block_end><except_stmt>NotImplementedError<block_start><pass><block_end>logger.info('caching all rooms...')<try_stmt><block_start>channels=env.db.get_channels()<line_sep>logger.info('caching all rooms for channels ({})...'.format(len(channels)))<for_stmt>channel_id channels.keys()<block_start>env.db.rooms_for_channel(channel_id)<line_sep>env.db.get_acls_in_channel_for_action(channel_id 'list')<block_end><block_end><except_stmt>NotImplementedError<block_start><pass><block_end><block_end>logger.info('caching last {} days of online time...'.format(days))<try_stmt><block_start>last_online_times=env.db.get_last_online_since(days=days)<line_sep>logger.info('caching all last online time for {} users...'.format(len(last_online_times)))<line_sep>env.cache.set_last_online(last_online_times)<block_end><except_stmt>NotImplementedError<block_start><pass><block_end>logger.info('done! cache warmed up')<line_sep>
<import_from_stmt>scipy.sparse csr_matrix lil_matrix<line_sep>l=[[0 10 20] [30 0 0] [0 0 0]]<line_sep>csr=csr_matrix(l)<line_sep>print(csr)<line_sep># (0, 1) 10 # (0, 2) 20 # (1, 0) 30 print(type(csr))<line_sep># <class 'scipy.sparse.csr.csr_matrix'> lil=csr.tolil()<line_sep>print(lil)<line_sep># (0, 1) 10 # (0, 2) 20 # (1, 0) 30 print(type(lil))<line_sep># <class 'scipy.sparse.lil.lil_matrix'> lil=lil_matrix(csr)<line_sep>print(lil)<line_sep># (0, 1) 10 # (0, 2) 20 # (1, 0) 30 print(type(lil))<line_sep># <class 'scipy.sparse.lil.lil_matrix'> lil[0 0]=100<line_sep>print(lil.toarray())<line_sep># [[100 10 20] # [ 30 0 0] # [ 0 0 0]] print(csr.toarray())<line_sep># [[ 0 10 20] # [30 0 0] # [ 0 0 0]] lil2=lil_matrix(lil)<line_sep>print(lil2.toarray())<line_sep># [[100 10 20] # [ 30 0 0] # [ 0 0 0]] lil[0 0]=0<line_sep>print(lil2.toarray())<line_sep># [[ 0 10 20] # [30 0 0] # [ 0 0 0]] lil2_copy=lil_matrix(lil copy=<true>)<line_sep>print(lil2_copy.toarray())<line_sep># [[ 0 10 20] # [30 0 0] # [ 0 0 0]] lil[0 0]=100<line_sep>print(lil2_copy.toarray())<line_sep># [[ 0 10 20] # [30 0 0] # [ 0 0 0]]
<import_from_stmt>amodem calib<import_from_stmt>amodem common<import_from_stmt>amodem config<import_from_stmt>io BytesIO<import_stmt>numpy<as>np<import_stmt>random<import_stmt>pytest<import_stmt>mock<line_sep>config=config.fastest()<class_stmt>ProcessMock<block_start><def_stmt>__init__ self<block_start>self.buf=BytesIO()<line_sep>self.stdin=self<line_sep>self.stdout=self<line_sep>self.bytes_per_sample=2<block_end><def_stmt>write self data<block_start><assert_stmt>self.buf.tell()<l>10e6<line_sep>self.buf.write(data)<block_end><def_stmt>read self n<block_start><return>self.buf.read(n)<block_end><block_end><def_stmt>test_success <block_start>p=ProcessMock()<line_sep>calib.send(config p gain=0.5 limit=32)<line_sep>p.buf.seek(0)<line_sep>calib.recv(config p)<block_end><def_stmt>test_too_strong <block_start>p=ProcessMock()<line_sep>calib.send(config p gain=1.001 limit=32)<line_sep>p.buf.seek(0)<for_stmt>r calib.detector(config src=p)<block_start><assert_stmt><not>r['success']<assert_stmt>r['msg']<eq>'too strong signal'<block_end><block_end><def_stmt>test_too_weak <block_start>p=ProcessMock()<line_sep>calib.send(config p gain=0.01 limit=32)<line_sep>p.buf.seek(0)<for_stmt>r calib.detector(config src=p)<block_start><assert_stmt><not>r['success']<assert_stmt>r['msg']<eq>'too weak signal'<block_end><block_end><def_stmt>test_too_noisy <block_start>r=random.Random(0)# generate random binary signal signal=np.array([r.choice([-1 1])<for>i range(int(config.Fs))])<line_sep>src=BytesIO(common.dumps(signal<times>0.5))<for_stmt>r calib.detector(config src=src)<block_start><assert_stmt><not>r['success']<assert_stmt>r['msg']<eq>'too noisy signal'<block_end><block_end><def_stmt>test_errors <block_start><class_stmt>WriteError(ProcessMock)<block_start><def_stmt>write self data<block_start><raise>KeyboardInterrupt()<block_end><block_end>p=WriteError()<with_stmt>pytest.raises(KeyboardInterrupt)<block_start>calib.send(config p limit=32)<block_end><assert_stmt>p.buf.tell()<eq>0<class_stmt>ReadError(ProcessMock)<block_start><def_stmt>read self n<block_start><raise>KeyboardInterrupt()<block_end><block_end>p=ReadError()<with_stmt>pytest.raises(KeyboardInterrupt)<block_start>calib.recv(config p verbose=<true>)<block_end><assert_stmt>p.buf.tell()<eq>0<block_end>@pytest.fixture(params=[0]+[sign<times>mag<for>sign (+1 -1)<for>mag (0.1 1 10 100 1e3 2e3)])<def_stmt>freq_err request<block_start><return>request.param<times>1e-6<block_end><def_stmt>test_drift freq_err<block_start>freq=config.Fc<times>(1+freq_err/1e6)<line_sep>t=np.arange(int(1.0<times>config.Fs))<times>config.Ts<line_sep>frame_length=100<line_sep>rms=0.5<line_sep>signal=rms<times>np.cos(2<times>np.pi<times>freq<times>t)<line_sep>src=BytesIO(common.dumps(signal))<line_sep>iters=0<for_stmt>r calib.detector(config src frame_length=frame_length)<block_start><assert_stmt>r['success']<is><true><assert_stmt>abs(r['rms']-rms)<l>1e-3<assert_stmt>abs(r['total']-rms)<l>1e-3<line_sep>iters<augadd>1<block_end><assert_stmt>iters<g>0<assert_stmt>iters<eq>config.baud/frame_length<block_end><def_stmt>test_volume <block_start><with_stmt>mock.patch('subprocess.check_call')<as>check_call<block_start>ctl=calib.volume_controller('volume-control')<line_sep>ctl(0.01)<line_sep>ctl(0.421)<line_sep>ctl(0.369)<line_sep>ctl(1)<assert_stmt>check_call.mock_calls<eq>[mock.call(shell=<true> args='volume-control 1%') mock.call(shell=<true> args='volume-control 42%') mock.call(shell=<true> args='volume-control 37%') mock.call(shell=<true> args='volume-control 100%')]<with_stmt>pytest.raises(AssertionError)<block_start>ctl(0)<block_end><with_stmt>pytest.raises(AssertionError)<block_start>ctl(-0.5)<block_end><with_stmt>pytest.raises(AssertionError)<block_start>ctl(12.3)<block_end><block_end><block_end><def_stmt>test_send_max_volume <block_start><with_stmt>mock.patch('subprocess.check_call')<as>check_call<block_start>calib.send(config dst=BytesIO() volume_cmd='ctl' limit=1)<block_end><assert_stmt>check_call.mock_calls<eq>[mock.call(shell=<true> args='ctl 100%')]<block_end><def_stmt>test_recv_binary_search <block_start>buf=BytesIO()<line_sep>gains=[0.5 0.25 0.38 0.44 0.41 0.39 0.40 0.40]<for_stmt>gain gains<block_start>calib.send(config buf gain=gain limit=2)<block_end>buf.seek(0)<line_sep>dump=BytesIO()<with_stmt>mock.patch('subprocess.check_call')<as>check_call<block_start>calib.recv(config src=buf volume_cmd='ctl' dump_audio=dump)<block_end><assert_stmt>dump.getvalue()<eq>buf.getvalue()<line_sep>gains.append(gains[-1])<line_sep>fmt='ctl {0:.0f}%'<line_sep>expected=[mock.call(shell=<true> args=fmt.format(100<times>g))<for>g gains]<assert_stmt>check_call.mock_calls<eq>expected<block_end><def_stmt>test_recv_freq_change <block_start>p=ProcessMock()<line_sep>calib.send(config p gain=0.5 limit=2)<line_sep>offset=p.buf.tell()<floordiv>16<line_sep>p.buf.seek(offset)<line_sep>messages=[state['msg']<for>state calib.recv_iter(config p)]<assert_stmt>messages<eq>['good signal' 'good signal' 'good signal' 'frequency change' 'good signal' 'good signal' 'good signal']<block_end>
<def_stmt>spam gen<block_start><yield><from>gen<block_end>
# Copyright 2016 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_from_stmt>contrib.cluster_telemetry loading_base_ct<line_sep># pylint: disable=protected-access <class_stmt>LoadingClusterTelemetry(loading_base_ct._LoadingBaseClusterTelemetry)<block_start>@classmethod<def_stmt>Name cls<block_start><return>'loading.cluster_telemetry'<block_end><block_end>
### # transform original gt to location gt ### <import_stmt>os<def_stmt>rawGT_to_locGT in_path out_path<block_start><if_stmt><not>os.path.exists(out_path)<block_start>os.mkdir(out_path)<block_end>files_list=os.listdir(in_path)<for_stmt>name files_list<block_start>in_file=os.path.join(in_path name)<line_sep>out_file=os.path.join(out_path 'gt_'+name)<line_sep>f1=open(in_file 'r')<line_sep>#f1 = codecs.open(in_file, 'r', 'utf-8-sig') lines=f1.readlines()<line_sep>f1.close()<line_sep>f2=open(out_file 'w+')<line_sep>#print("img %s %s" % (in_file, lines)) <for_stmt>line lines<block_start>line.strip()<if_stmt>line.split(',')[-2]<eq>'Arabic'<block_start><continue><block_end>loc=line.split(',')[:8]<line_sep>str1=",".join(loc)<line_sep>str1.strip()<line_sep>#print("img %s raw str is %s" % (in_file, line)) #print("img %s aft str is %s" % (in_file, str1)) f2.write(str1)<line_sep>f2.write('\n')<block_end>f2.close()<block_end><block_end>rawGT_to_locGT('/home/ljs/OCR_dataset/ali_ocr/train_1000/txt_1000' '/home/ljs/data_ready/ali_icpr/gt_1000')<line_sep>#rawGT_to_locGT('/home/ljs/OCR_dataset/MLT/val_gt', '/home/ljs/OCR_dataset/MLT/val_loc_gt')
<def_stmt>foo <block_start><global>bar<def_stmt>bar <block_start><pass><block_end><block_end>
# flake8: noqa # import pandas as pd # import pytest # # from catalyst.contrib.utils.pandas import ( # folds_to_list, # split_dataframe_on_stratified_folds, # ) # # # def test_folds_to_list(): # """@TODO: Docs. Contribution is welcome.""" # assert folds_to_list("1,2,1,3,4,2,4,6") == [1, 2, 3, 4, 6] # assert folds_to_list([1, 2, 3.0, 5, 2, 1]) == [1, 2, 3, 5] # assert folds_to_list([]) == [] # # with pytest.raises(ValueError): # folds_to_list([1, "True", 3.0, None, 2, 1]) # # # def _setup_data(num_rows=10): # df_data = [] # for i in range(num_rows): # if i < (num_rows / 2): # df_data.append(["ants", "%s.jpg" % i, 0]) # else: # df_data.append(["bees", "%s.jpg" % i, 1]) # return pd.DataFrame(df_data, columns=["tag", "filepath", "class"]) # # # def test_stratified_fold_split(): # """@TODO: Docs. Contribution is welcome.""" # df = _setup_data() # # splitted = split_dataframe_on_stratified_folds( # dataframe=df, class_column="class" # ) # # assert int == splitted["fold"].dtype # assert set(range(5)) == set(splitted["fold"].unique()) # ants_folds = set(splitted[splitted["tag"] == "ants"]["fold"]) # bees_folds = set(splitted[splitted["tag"] == "bees"]["fold"]) # assert ants_folds == bees_folds # # # def test_stratified_fold_split_num_folds(): # """@TODO: Docs. Contribution is welcome.""" # df = _setup_data() # # splitted = split_dataframe_on_stratified_folds(df, "class", n_folds=2) # # assert set(range(2)) == set(splitted["fold"].unique())
# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"). You # may not use this file except in compliance with the License. A copy of # the License is located at # # http://aws.amazon.com/apache2.0/ # # or in the "license" file accompanying this file. This file is # distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF # ANY KIND, either express or implied. See the License for the specific # language governing permissions and limitations under the License. INSTANCE_TAIL_LOGS_RESPONSE="""------------------------------------- /var/log/awslogs.log ------------------------------------- {'skipped_events_count': 0, 'first_event': {'timestamp': 1522962583519, 'start_position': 559799L, 'end_position': 560017L}, 'fallback_events_count': 0, 'last_event': {'timestamp': 1522962583519, 'start_position': 559799L, 'end_position': 560017L}, 'source_id': '77b026040b93055eb448bdc0b59e446f', 'num_of_events': 1, 'batch_size_in_bytes': 243} ------------------------------------- /var/log/httpd/error_log ------------------------------------- [Thu Apr 05 19:54:23.624780 2018] [mpm_prefork:warn] [pid 3470] AH00167: long lost child came home! (pid 3088) ------------------------------------- /var/log/httpd/access_log ------------------------------------- 172.31.69.153 (172.16.58.3) - - [05/Apr/2018:20:57:55 +0000] "HEAD /pma/ HTTP/1.1" 404 - "-" "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36" ------------------------------------- /var/log/eb-activity.log ------------------------------------- + chown -R webapp:webapp /var/app/ondeck [2018-04-05T19:54:21.630Z] INFO [3555] - [Application update app-180406_044630@3/AppDeployStage0/AppDeployPreHook/02_setup_envvars.sh] : Starting activity... ------------------------------------- /tmp/sample-app.log ------------------------------------- 2018-04-05 20:52:51 Received message: \\xe2\\x96\\x88\\xe2 ------------------------------------- /var/log/eb-commandprocessor.log ------------------------------------- [2018-04-05T19:45:05.526Z] INFO [2853] : Running 2 of 2 actions: AppDeployPostHook..."""<line_sep>REQUEST_ENVIRONMENT_INFO_RESPONSE={"EnvironmentInfo":[{"InfoType":"tail" "Ec2InstanceId":"i-024a31a441247971d" "SampleTimestamp":"2018-04-06T01:05:43.875Z" "Message":"https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com"} {"InfoType":"tail" "Ec2InstanceId":"i-0dce0f6c5e2d5fa48" "SampleTimestamp":"2018-04-06T01:05:43.993Z" "Message":"https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com"} {"InfoType":"tail" "Ec2InstanceId":"i-090689581e5afcfc6" "SampleTimestamp":"2018-04-06T01:05:43.721Z" "Message":"https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com"} {"InfoType":"tail" "Ec2InstanceId":"i-053efe7c102d0a540" "SampleTimestamp":"2018-04-06T01:05:43.900Z" "Message":"https://elasticbeanstalk-us-east-1-123123123123.s3.amazonaws.com"}]}<line_sep>
<import_from_stmt>rest_framework serializers<import_from_stmt>usaspending_api.common.serializers LimitableSerializer<import_from_stmt>usaspending_api.references.models Cfda ObjectClass RefProgramActivity SubtierAgency ToptierAgency<class_stmt>ToptierAgencySerializer(LimitableSerializer)<block_start><class_stmt>Meta<block_start>model=ToptierAgency<line_sep>fields="__all__"<line_sep>default_fields=["toptier_code" "name" "abbreviation"]<block_end><block_end><class_stmt>SubtierAgencySerializer(LimitableSerializer)<block_start><class_stmt>Meta<block_start>model=SubtierAgency<line_sep>fields="__all__"<line_sep>default_fields=["subtier_code" "name" "abbreviation"]<block_end><block_end><class_stmt>CfdaSerializer(LimitableSerializer)<block_start><class_stmt>Meta<block_start>model=Cfda<line_sep>fields="__all__"<line_sep>default_fields=["id" "program_number" "program_title" "popular_name" "website_address" "objectives"]<block_end><block_end><class_stmt>ProgramActivitySerializer(LimitableSerializer)<block_start><class_stmt>Meta<block_start>model=RefProgramActivity<line_sep>fields=("id" "program_activity_code" "program_activity_name")<block_end><block_end><class_stmt>ObjectClassSerializer(LimitableSerializer)<block_start><class_stmt>Meta<block_start>model=ObjectClass<line_sep>fields=("id" "major_object_class" "major_object_class_name" "object_class" "object_class_name")<block_end><block_end><class_stmt>FilterSerializer(serializers.Serializer)<block_start>hash=serializers.CharField()<block_end><class_stmt>HashSerializer(serializers.Serializer)<block_start>json_str=serializers.CharField()<block_end>
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # <def_stmt>f_gold s<block_start>n=len(s)<line_sep>sub_count=(n<times>(n+1))<floordiv>2<line_sep>arr=[0]<times>sub_count<line_sep>index=0<for_stmt>i range(n)<block_start><for_stmt>j range(1 n-i+1)<block_start>arr[index]=s[i:i+j]<line_sep>index<augadd>1<line_sep><block_end><block_end>arr.sort()<line_sep>res=""<for_stmt>i range(sub_count)<block_start>res<augadd>arr[i]<line_sep><block_end><return>res<line_sep><block_end>#TOFILL <if_stmt>__name__<eq>'__main__'<block_start>param=[('sqGOi' ) ('848580' ) ('01001110011001' ) ('ZhWXUKmeiI' ) ('0917296541285' ) ('01101001111100' ) ('tjP kR' ) ('999907' ) ('011100' ) ('qJPHNSJOUj' )]<line_sep>n_success=0<for_stmt>i,parameters_set enumerate(param)<block_start><if_stmt>f_filled(*parameters_set)<eq>f_gold(*parameters_set)<block_start>n_success<augadd>1<block_end><block_end>print("#Results: %i, %i"%(n_success len(param)))<block_end>
<import_stmt>os<line_sep># How much to delay scheduled tasks for testing purposes. # Note that on macOS Catalina, when using an unsigned Python version, taskgated # (com.apple.securityd) needs to approve launching the process. We therefore # need ample time here (> 0.3s) in order to prevent test failures. DELAY=0.4<line_sep># Redis database number which will be wiped and used for the tests TEST_DB=int(os.environ.get('REDIS_DB' 7))<line_sep># Redis hostname REDIS_HOST=os.environ.get('REDIS_HOST' 'localhost')<line_sep>
<import_from_stmt>pathlib Path<import_stmt>csv<import_stmt>json<import_stmt>jimi<class_stmt>_storageTrigger(jimi.trigger._trigger)<block_start>storage_id=str()<line_sep>file_type="csv"<def_stmt>doCheck self<block_start>self.result={"events":[] "var":{} "plugin":{}}<line_sep>storageFile=jimi.storage._storage().getAsClass(id=self.storage_id)<try_stmt><block_start>storageFile=storageFile[0]<with_stmt>open(Path(storageFile.getLocalFilePath()) 'r')<as>f<block_start><if_stmt>self.file_type<eq>"csv"<block_start>self.result["events"]=list(csv.DictReader(f))<block_end><elif_stmt>self.file_type<eq>"json"<block_start>self.result["events"]=json.load(f)<block_end><elif_stmt>self.file_type<eq>"txt"<block_start>self.result["events"]=f.readlines()<block_end><block_end><block_end><except_stmt><block_start><pass><block_end><return>self.result["events"]<block_end><block_end>
<import_from_stmt>..utils execute<def_stmt>mkdir folder_path recursive=<false><block_start>execute('mkdir '+('-p '<if>recursive<else>' ')+('"%s" '%folder_path))<block_end><def_stmt>rm item_path recursive=<false> force=<false> under=<false><block_start>execute('rm '+('-r '<if>recursive<else>' ')+('-f '<if>force<else>' ')+(('"%s" '%item_path)<if><not>under<else>('"%s"/* '%item_path)))<block_end><def_stmt>touch file_path<block_start>execute('touch "%s"'%file_path)<block_end><def_stmt>mv old_path new_path<block_start>execute('mv "%s" "%s"'%(old_path new_path))<block_end><def_stmt>truncate file_path size<block_start>execute('truncate -s "%s" "%s"'%(size file_path))<block_end><def_stmt>fallocate file_path size<block_start>execute('fallocate -l "%s" "%s"'%(size file_path))<block_end><def_stmt>head file_path size=<none> write_to=<none> append_to=<none><block_start><return>execute('head '+(('-c %s '%size)<if>size<else>' ')+('"%s" '%file_path)+(('> "%s" '%write_to)<if>write_to<else>' ')+(('>> "%s" '%append_to)<if>append_to<else>' '))<block_end><def_stmt>tail file_path size=<none> write_to=<none> append_to=<none><block_start><return>execute('tail '+(('-c %s '%size)<if>size<else>' ')+('"%s" '%file_path)+(('> "%s" '%write_to)<if>write_to<else>' ')+(('>> "%s" '%append_to)<if>append_to<else>' '))<block_end><def_stmt>cp source_file_path destination_file_path<block_start>execute('cp "%s" "%s"'%(source_file_path destination_file_path))<block_end>
# coding:utf-8 # 目的:作为统一的配置加载,由这个来控制配置文件的更新以及导出 # 第一:文件检查,首先检查有没有配置文件对象,如果没有下载配置文件然后更新文件对象 # 第二:时间判断,请求文件获取到请求的缓存时间然后比对文件的创建时间,如果比创建时间大那么启动更新 <import_stmt>datetime<import_stmt>pathlib<import_stmt>os<class_stmt>FileUpdate(object)<block_start><def_stmt>__init__ self<block_start>self.time=datetime.datetime.now()<block_end><def_stmt>__get__ self instance instance_type<block_start><pass><block_end><def_stmt>__set__ self instance value<block_start><pass><block_end><block_end><class_stmt>Config(object)<block_start><def_stmt>__init__ self<block_start>self.H5_configURL=''<line_sep>self.Config_manager={}<block_end><def_stmt>getConfig self name<block_start><return>self.Config_manager.get(name '')<block_end><block_end><class_stmt>TSDKError(Exception)<block_start><def_stmt>__init__ self errInfo<block_start>super().__init__(self)<line_sep>self.errinfo=errInfo<block_end><def_stmt>__setError self<block_start><pass><block_end><def_stmt>__str__ self<block_start><return>self.errinfo<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><pass><block_end>
<import_stmt>collections<import_from_stmt>supriya.enums CalculationRate<import_from_stmt>supriya.synthdefs MultiOutUGen<class_stmt>Pitch(MultiOutUGen)<block_start>""" :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch Pitch.ar() """<line_sep>### CLASS VARIABLES ### _ordered_input_names=collections.OrderedDict('source' 'init_frequency' 'min_frequency' 'max_frequency' 'exec_frequency' 'max_bins_per_octave' 'median' 'amp_threshold' 'peak_threshold' 'down_sample' 'clar' )<line_sep>_valid_calculation_rates=<none><line_sep>### INITIALIZER ### <def_stmt>__init__ self calculation_rate=<none> amp_threshold=0.01 clar=0 down_sample=1 exec_frequency=100 init_frequency=440 max_bins_per_octave=16 max_frequency=4000 median=1 min_frequency=60 peak_threshold=0.5 source=<none> <block_start>MultiOutUGen.__init__(self calculation_rate=calculation_rate amp_threshold=amp_threshold clar=clar down_sample=down_sample exec_frequency=exec_frequency init_frequency=init_frequency max_bins_per_octave=max_bins_per_octave max_frequency=max_frequency median=median min_frequency=min_frequency peak_threshold=peak_threshold source=source )<block_end>### PUBLIC METHODS ### @classmethod<def_stmt>kr cls amp_threshold=0.01 clar=0 down_sample=1 exec_frequency=100 init_frequency=440 max_bins_per_octave=16 max_frequency=4000 median=1 min_frequency=60 peak_threshold=0.5 source=<none> <block_start>""" Constructs a control-rate Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.kr( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch Pitch.kr() Returns ugen graph. """<import_stmt>supriya.synthdefs<line_sep>calculation_rate=supriya.CalculationRate.CONTROL<line_sep>ugen=cls._new_expanded(calculation_rate=calculation_rate amp_threshold=amp_threshold clar=clar down_sample=down_sample exec_frequency=exec_frequency init_frequency=init_frequency max_bins_per_octave=max_bins_per_octave max_frequency=max_frequency median=median min_frequency=min_frequency peak_threshold=peak_threshold source=source )<line_sep><return>ugen<block_end># def newFromDesc(): ... ### PUBLIC PROPERTIES ### @property<def_stmt>amp_threshold self<block_start>""" Gets `amp_threshold` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.amp_threshold 0.01 Returns ugen input. """<line_sep>index=self._ordered_input_names.index('amp_threshold')<line_sep><return>self._inputs[index]<block_end>@property<def_stmt>clar self<block_start>""" Gets `clar` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.clar 0.0 Returns ugen input. """<line_sep>index=self._ordered_input_names.index('clar')<line_sep><return>self._inputs[index]<block_end>@property<def_stmt>down_sample self<block_start>""" Gets `down_sample` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.down_sample 1.0 Returns ugen input. """<line_sep>index=self._ordered_input_names.index('down_sample')<line_sep><return>self._inputs[index]<block_end>@property<def_stmt>exec_frequency self<block_start>""" Gets `exec_frequency` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.exec_frequency 100.0 Returns ugen input. """<line_sep>index=self._ordered_input_names.index('exec_frequency')<line_sep><return>self._inputs[index]<block_end>@property<def_stmt>init_frequency self<block_start>""" Gets `init_frequency` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.init_frequency 440.0 Returns ugen input. """<line_sep>index=self._ordered_input_names.index('init_frequency')<line_sep><return>self._inputs[index]<block_end>@property<def_stmt>max_bins_per_octave self<block_start>""" Gets `max_bins_per_octave` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.max_bins_per_octave 16.0 Returns ugen input. """<line_sep>index=self._ordered_input_names.index('max_bins_per_octave')<line_sep><return>self._inputs[index]<block_end>@property<def_stmt>max_frequency self<block_start>""" Gets `max_frequency` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.max_frequency 4000.0 Returns ugen input. """<line_sep>index=self._ordered_input_names.index('max_frequency')<line_sep><return>self._inputs[index]<block_end>@property<def_stmt>median self<block_start>""" Gets `median` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.median 1.0 Returns ugen input. """<line_sep>index=self._ordered_input_names.index('median')<line_sep><return>self._inputs[index]<block_end>@property<def_stmt>min_frequency self<block_start>""" Gets `min_frequency` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.min_frequency 60.0 Returns ugen input. """<line_sep>index=self._ordered_input_names.index('min_frequency')<line_sep><return>self._inputs[index]<block_end>@property<def_stmt>peak_threshold self<block_start>""" Gets `peak_threshold` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.peak_threshold 0.5 Returns ugen input. """<line_sep>index=self._ordered_input_names.index('peak_threshold')<line_sep><return>self._inputs[index]<block_end>@property<def_stmt>source self<block_start>""" Gets `source` input of Pitch. :: >>> source = supriya.ugens.In.ar(bus=0) >>> pitch = supriya.ugens.Pitch.ar( ... amp_threshold=0.01, ... clar=0, ... down_sample=1, ... exec_frequency=100, ... init_frequency=440, ... max_bins_per_octave=16, ... max_frequency=4000, ... median=1, ... min_frequency=60, ... peak_threshold=0.5, ... source=source, ... ) >>> pitch.source OutputProxy( source=In( bus=0.0, calculation_rate=CalculationRate.AUDIO, channel_count=1 ), output_index=0 ) Returns ugen input. """<line_sep>index=self._ordered_input_names.index('source')<line_sep><return>self._inputs[index]<block_end><block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2018 THL A29 Limited, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<line_sep># noqa <import_stmt>json<import_from_stmt>esb.utils.base has_path_vars<import_from_stmt>common.errors error_codes<import_from_stmt>components.component BaseComponent SetupConfMixin<import_from_stmt>.toolkit configs<class_stmt>FtaComponent(BaseComponent SetupConfMixin)<block_start>sys_name=configs.SYSTEM_NAME<def_stmt>handle self# 替换目标地址中的变量模版 <block_start>path=self.dest_path<if_stmt>has_path_vars(self.dest_path)<block_start>path_vars=self.request.path_vars<and>self.request.path_vars.val_dict<or>{}<try_stmt><block_start>path=self.dest_path.format(**path_vars)<block_end><except_stmt>KeyError e<block_start><raise>error_codes.BUFFET_CANNOT_FORMAT_PATH.format_prompt('{%s}'%e.args[0])<block_end><block_end># 请求参数 params,data=<none> <none><if_stmt>self.dest_http_method<eq>'GET'<block_start>params=self.request.kwargs<line_sep>headers={'Content-Type':'application/x-www-form-urlencoded'}<block_end><elif_stmt>self.dest_http_method<eq>'POST'<block_start>data=json.dumps(self.request.kwargs)<line_sep>headers={'Content-Type':'application/json'}<block_end><if_stmt>'X-Secret'<in>self.request.headers<block_start>headers.update({'X-Secret':self.request.headers['X-Secret']})<block_end># 请求接口 response=self.outgoing.http_client.request(self.dest_http_method configs.host path params=params data=data headers=headers timeout=60 )<line_sep>self.response.payload=response<block_end><block_end>
# -*- coding: utf-8 -*- <import_from_stmt>itertools chain<import_stmt>typing<import_from_stmt>six iterkeys<def_stmt>are_objects_equal obj1 obj2 attributes_to_ignore=<none># type: (typing.Any, typing.Any, typing.Optional[typing.Set[typing.Text]]) -> bool <block_start>""" Helper method that checks if two objects are the same. This is very generic and basically ensures that all the attributes of both objects are defined and are the same. NOTE: Sometimes some attribute do create recursive references to the same objects and/or some objects do not define equality checks (ie. missing __eq__ method). Those attributes might be ignored via attributes_to_ignore parameter """<if_stmt>id(obj1)<eq>id(obj2)<block_start><return><true><block_end><if_stmt><not>isinstance(obj2 obj1.__class__)<block_start><return><false><block_end><if_stmt>attributes_to_ignore<is><none><block_start>attributes_to_ignore=set()<block_end><for_stmt>attr_name set(chain(iterkeys(obj1.__dict__) iterkeys(obj2.__dict__) ) )<block_start><if_stmt>attr_name<in>attributes_to_ignore<block_start><continue><block_end><try_stmt><block_start><if_stmt><not>(getattr(obj1 attr_name)<eq>getattr(obj2 attr_name))<block_start><return><false><block_end><block_end><except_stmt>AttributeError<block_start><return><false><block_end><block_end><return><true><block_end>
# See LICENSE for licensing information. # # Copyright (c) 2016-2021 Regents of the University of California and The Board # of Regents for the Oklahoma Agricultural and Mechanical College # (acting for and on behalf of Oklahoma State University) # All rights reserved. # <import_stmt>debug<import_stmt>bitcell_base<import_from_stmt>tech cell_properties<as>props<import_from_stmt>tech parameter drc<import_stmt>logical_effort<class_stmt>replica_bitcell_1port(bitcell_base.bitcell_base)<block_start>""" A single bit cell (6T, 8T, etc.) This module implements the single memory cell used in the design. It is a hand-made cell, so the layout and netlist should be available in the technology library. """<def_stmt>__init__ self name<block_start>super().__init__(name prop=props.bitcell_1port)<line_sep>debug.info(2 "Create replica bitcell object")<block_end><def_stmt>get_stage_effort self load<block_start>parasitic_delay=1<line_sep>size=0.5# This accounts for bitline being drained thought the access TX and internal node cin=3# Assumes always a minimum sizes inverter. Could be specified in the tech.py file. read_port_load=0.5# min size NMOS gate load <return>logical_effort.logical_effort('bitline' size cin load+read_port_load parasitic_delay <false>)<block_end><def_stmt>input_load self<block_start>"""Return the relative capacitance of the access transistor gates"""<line_sep># FIXME: This applies to bitline capacitances as well. access_tx_cin=parameter["6T_access_size"]/drc["minwidth_tx"]<line_sep><return>2<times>access_tx_cin<block_end><def_stmt>analytical_power self corner load<block_start>"""Bitcell power in nW. Only characterizes leakage."""<import_from_stmt>tech spice<line_sep>leakage=spice["bitcell_leakage"]<line_sep>dynamic=0# FIXME total_power=self.return_power(dynamic leakage)<line_sep><return>total_power<block_end><def_stmt>build_graph self graph inst_name port_nets<block_start>"""Adds edges based on inputs/outputs. Overrides base class function."""<line_sep>self.add_graph_edges(graph port_nets)<block_end><def_stmt>is_non_inverting self<block_start>"""Return input to output polarity for module"""<line_sep><return><false><block_end><block_end>
# Copyright 2009-2017 <NAME>. # This program is distributed under the MIT license. <import_stmt>numbers<import_from_stmt>python_toolbox.dict_tools remove_keys<def_stmt>test <block_start>'''Test the basic workings of `sum_dicts`.'''<line_sep>origin_dict={1:2 3:4 5:6 7:8 9:10 11:12 13:14 15:16 }<line_sep>not_divide_by_three_dict=dict(origin_dict)<line_sep>remove_keys(not_divide_by_three_dict range(0 50 3))<assert_stmt>not_divide_by_three_dict<eq>{1:2 5:6 7:8 11:12 13:14}<line_sep>below_ten_dict=dict(origin_dict)<line_sep>remove_keys(below_ten_dict <lambda>value:value<ge>10)<assert_stmt>below_ten_dict<eq>{1:2 3:4 5:6 7:8 9:10}<class_stmt>HoledNumbersContainer<block_start>'''Contains only numbers that have a digit with a hole in it.'''<def_stmt>__contains__ self number<block_start><if_stmt><not>isinstance(number numbers.Integral)<block_start><return><false><block_end><return>bool(set(str(number)).intersection({'0' '4' '6' '8' '9'}))<block_end><block_end>non_holed_numbers_dict=dict(origin_dict)<line_sep>remove_keys(non_holed_numbers_dict HoledNumbersContainer())<assert_stmt>non_holed_numbers_dict<eq>{1:2 3:4 5:6 7:8 11:12 13:14 15:16 }<block_end>
<import_stmt>dash_bootstrap_components<as>dbc<import_from_stmt>dash html<line_sep>spinners=html.Div([dbc.Spinner(size="sm") html.Hr() dbc.Spinner(spinner_style={"width":"3rem" "height":"3rem"}) ])<line_sep>
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. # SPDX-License-Identifier: Apache-2.0 """ Amazon lookout for Vision dataset code examples used in the service documentation: https://docs.aws.amazon.com/lookout-for-vision/latest/developer-guide/model-create-dataset.html Shows how to create and manage datasets. Also, how to create a manifest file and upload to an S3 bucket. """<import_stmt>logging<import_stmt>time<import_from_stmt>datetime datetime<import_stmt>os<import_stmt>json<import_stmt>boto3<import_from_stmt>botocore.exceptions ClientError<line_sep>logger=logging.getLogger(__name__)<class_stmt>Datasets<block_start>""" Provides example functions for creating, listing and deleting Amazon Lookout for Vision datasets. Also shows how to create a manifest file in an S3 bucket. """<line_sep>@staticmethod<def_stmt>create_dataset lookoutvision_client project_name manifest_file dataset_type<block_start>""" Creates a new Amazon Lookout for Vision dataset :param lookoutvision_client: The Amazon Lookout for Vision Boto3 client. :param project_name: The name of the project in which you want to create a dataset. :param bucket: The bucket that contains the manifest file. :param manifest_file: The path and name of the manifest file. :param dataset_type: The type of the dataset (train or test). """<try_stmt><block_start>bucket,key=manifest_file.replace("s3://" "").split("/" 1)<line_sep># Create a dataset logger.info("Creating %s dataset type..." dataset_type)<line_sep>dataset={"GroundTruthManifest":{"S3Object":{"Bucket":bucket "Key":key}}}<line_sep>response=lookoutvision_client.create_dataset(ProjectName=project_name DatasetType=dataset_type DatasetSource=dataset )<line_sep>logger.info("Dataset Status: %s" response["DatasetMetadata"]["Status"])<line_sep>logger.info("Dataset Status Message: %s" response["DatasetMetadata"]["StatusMessage"] )<line_sep>logger.info("Dataset Type: %s" response["DatasetMetadata"]["DatasetType"])<line_sep># Wait until either created or failed. finished=<false><line_sep>status=""<while_stmt>finished<is><false><block_start>dataset_description=lookoutvision_client.describe_dataset(ProjectName=project_name DatasetType=dataset_type)<line_sep>status=dataset_description["DatasetDescription"]["Status"]<if_stmt>status<eq>"CREATE_IN_PROGRESS"<block_start>logger.info("Dataset creation in progress...")<line_sep>time.sleep(2)<line_sep><continue><block_end><if_stmt>status<eq>"CREATE_COMPLETE"<block_start>logger.info("Dataset created.")<line_sep>finished=<true><line_sep><continue><block_end>logger.info("Dataset creation failed: %s" dataset_description["DatasetDescription"]["StatusMessage"] )<line_sep>finished=<true><block_end><if_stmt>status<ne>"CREATE_COMPLETE"<block_start>message=dataset_description["DatasetDescription"]["StatusMessage"]<line_sep>logger.exception("Couldn't create dataset: %s" message)<line_sep><raise>Exception(f"Couldn't create dataset: {message}")<block_end><block_end><except_stmt>ClientError<as>err<block_start>logger.exception("Service error: Couldn't create dataset: %s" err.response["Message"])<line_sep><raise><block_end><block_end>@staticmethod<def_stmt>create_manifest_file_s3 s3_resource image_s3_path manifest_s3_path<block_start>""" Creates a manifest file and uploads to S3. :param image_s3_path: The S3 path to the images referenced by the manifest file. The images must be in an S3 bucket with the following folder structure. s3://my-bucket/<train or test>/ normal/ anomaly/ Place normal images in the normal folder. Anomalous images in the anomaly folder. https://docs.aws.amazon.com/lookout-for-vision/latest/developer-guide/create-dataset-s3.html :param manifest_s3_path: The S3 location in which to store the created manifest file. """<try_stmt><block_start>output_manifest_file="temp.manifest"<line_sep># Current date and time in manifest file format dttm=datetime.now().strftime("%Y-%m-%dT%H:%M:%S.%f")<line_sep># get bucket and folder from image and manifest file paths bucket,prefix=image_s3_path.replace("s3://" "").split("/" 1)<line_sep>manifest_bucket,manifest_prefix=manifest_s3_path.replace("s3://" "").split("/" 1)<line_sep># create local temp manifest file <with_stmt>open(output_manifest_file "w")<as>mfile<block_start>logger.info("Creating manifest file")<line_sep># create JSON lines for anomalous images src_bucket=s3_resource.Bucket(bucket)<line_sep># create json lines for abnormal images. <for_stmt>obj src_bucket.objects.filter(Prefix=prefix+"anomaly/" Delimiter="/")<block_start>image_path=f"s3://{src_bucket.name}/{obj.key}"<line_sep>manifest=Datasets.create_json_line(image_path "anomaly" dttm)<line_sep>mfile.write(json.dumps(manifest)+"\n")<block_end># create json lines for normal images <for_stmt>obj src_bucket.objects.filter(Prefix=prefix+"normal/" Delimiter="/")<block_start>image_path=f"s3://{src_bucket.name}/{obj.key}"<line_sep>manifest=Datasets.create_json_line(image_path "normal" dttm)<line_sep>mfile.write(json.dumps(manifest)+"\n")<block_end><block_end># copy local manifest to target S3 location logger.info("Uploading manifest file to %s" manifest_s3_path)<line_sep>s3_resource.Bucket(manifest_bucket).upload_file(output_manifest_file manifest_prefix)<line_sep># delete local manifest file os.remove(output_manifest_file)<block_end><except_stmt>ClientError<as>err<block_start>logger.exception("S3 Service Error: %s" format(err))<line_sep><raise><block_end><except_stmt>Exception<as>err<block_start>logger.exception(format(err))<line_sep><raise><block_end><else_stmt><block_start>logger.info("Completed manifest file creation and upload.")<block_end><block_end>@staticmethod<def_stmt>create_json_line image class_name dttm<block_start>""" Creates a single JSON line for an image. :param image: The S3 location for the image. :param label: The label for the image (normal or anomaly) :param dttm: The date and time that the JSON is created. """<line_sep>label=0<if_stmt>class_name<eq>"normal"<block_start>label=0<block_end><elif_stmt>class_name<eq>"anomaly"<block_start>label=1<block_end><else_stmt><block_start>logger.exception("Unexpected label value: %s for %s" str(label) image)<line_sep><raise>Exception("Unexpected label value: {} for {}".format(str(label) image))<block_end>manifest={"source-ref":image "anomaly-label":label "anomaly-label-metadata":{"confidence":1 "job-name":"labeling-job/anomaly-label" "class-name":class_name "human-annotated":"yes" "creation-date":dttm "type":"groundtruth/image-classification" } }<line_sep><return>manifest<block_end>@staticmethod<def_stmt>delete_dataset lookoutvision_client project_name dataset_type<block_start>""" Deletes an Amazon Lookout for Vision dataset :param lookoutvision_client: The Amazon Lookout for Vision Boto3 client. :param project_name: The name of the project that contains the dataset that you want to delete. :param dataset_type: The type (train or test) of the dataset that you want to delete. """<try_stmt># Delete the dataset <block_start>logger.info("Deleting the %s dataset for project %s." dataset_type project_name)<line_sep>lookoutvision_client.delete_dataset(ProjectName=project_name DatasetType=dataset_type)<line_sep>logger.info("Dataset deleted.")<block_end><except_stmt>ClientError<as>err<block_start>logger.exception("Service error: Couldn't delete dataset: %s." err.response["Message"])<line_sep><raise><block_end><block_end>@staticmethod<def_stmt>describe_dataset lookoutvision_client project_name dataset_type<block_start>""" Gets information about an Amazon Lookout for Vision dataset. :param lookoutvision_client: The Amazon Lookout for Vision Boto3 client. :param project_name: The name of the project that contains the dataset that you want to describe. :param dataset_type: The type (train or test) of the dataset that you want to describe. """<try_stmt># Describe a dataset <block_start>response=lookoutvision_client.describe_dataset(ProjectName=project_name DatasetType=dataset_type)<line_sep>print(f"Name: {response['DatasetDescription']['ProjectName']}")<line_sep>print(f"Type: {response['DatasetDescription']['DatasetType']}")<line_sep>print(f"Status: {response['DatasetDescription']['Status']}")<line_sep>print(f"Message: {response['DatasetDescription']['StatusMessage']}")<line_sep>print(f"Images: {str(response['DatasetDescription']['ImageStats']['Total'])}")<line_sep>print(f"Labeled: {str(response['DatasetDescription']['ImageStats']['Labeled'])}")<line_sep>print(f"Normal: {str(response['DatasetDescription']['ImageStats']['Normal'])}")<line_sep>print(f"Anomaly: {str(response['DatasetDescription']['ImageStats']['Anomaly'])}")<line_sep>print("Done...")<block_end><except_stmt>ClientError<as>err<block_start>logger.exception("Service error: problem list datasets: %s" err.response["Message"])<block_end>print("Done")<block_end><block_end>
<import_stmt>time<import_from_stmt>ttldict TTLDict<def_stmt>test_ttldict <block_start>d=TTLDict(ttl=1)<line_sep>d['foo']='bar'<assert_stmt>'foo'<in>d<assert_stmt>d['foo']<eq>'bar'<line_sep>time.sleep(1)<assert_stmt>'foo'<not><in>d<block_end>
# Spectral Subtraction: Method used for noise reduction <import_stmt>scipy.io.wavfile<as>wav<import_stmt>numpy<as>np<import_stmt>matplotlib.pyplot<as>plt<line_sep>file=input("Enter the file path: ")<line_sep>sr,data=wav.read(file)<line_sep>fl=400#frame_length frames=[]#empty list <for_stmt>i range(0 int(len(data)/(int(fl/2))-1))<block_start>arr=data[int(i<times>int(fl/2)):int(i<times>int(fl/2)+fl)]<line_sep>frames.append(arr)#appending each array data into the frames list <block_end>frames=np.array(frames)#converting the frames list into an array ham_window=np.hamming(fl)#using np.hamming windowed_frames=frames<times>ham_window#multiplying frames array with ham_window dft=[]#empty list containing fft of windowed_frames <for_stmt>i windowed_frames<block_start>dft.append(np.fft.fft(i))#now taking the first fourier transform of each window <block_end>dft=np.array(dft)#converting dft into array dft_mag_spec=np.abs(dft)#converting dft into absolute values dft_phase_spec=np.angle(dft)#finding dft angle noise_estimate=np.mean(dft_mag_spec axis=0)#mean noise_estimate_mag=np.abs(noise_estimate)#absolute value estimate_mag=(dft_mag_spec-2<times>noise_estimate_mag)#subtraction method estimate_mag[estimate_mag<l>0]=0<line_sep>estimate=estimate_mag<times>np.exp(1j<times>dft_phase_spec)#calculating the final estimate ift=[]#taking ift as input list containing inverse fourier transform of estimate <for_stmt>i estimate<block_start>ift.append(np.fft.ifft(i))#appending in ift list <block_end>clean_data=[]<line_sep>clean_data.extend(ift[0][:int(fl/2)])#extending clean_data containg ift list <for_stmt>i range(len(ift)-1)<block_start>clean_data.extend(ift[i][int(fl/2):]+ift[i+1][:int(fl/2)])<block_end>clean_data.extend(ift[-1][int(fl/2):])#extending clean_data containing ift list clean_data=np.array(clean_data)#converting it into array #finally plotting the graph showing the diffrence in the noise fig=plt.figure(figsize=(8 5))<line_sep>ax=plt.subplot(1 1 1)<line_sep>ax.plot(np.linspace(0 64000 64000) data label='Original' color="orange")<line_sep>ax.plot(np.linspace(0 64000 64000) clean_data label='Filtered' color="purple")<line_sep>ax.legend(fontsize=12)<line_sep>ax.set_title('Spectral Subtraction Method' fontsize=15)<line_sep>filename=os.path.basename(file)<line_sep>cleaned_file="(Filtered_Audio)"+filename#final filtered audio wav.write(cleaned_file rate=sr data=clean_data.astype(np.int16))<line_sep>plt.savefig(filename+"(Spectral Subtraction graph).jpg")#saved file name as audio.wav(Spectral Subtraction graph).jpg
""" salttiger上的免费国外编程电子书 """<import_stmt>os<import_from_stmt>pprint pprint<import_stmt>looter<as>lt<line_sep>domain='https://salttiger.com'<def_stmt>crawl url<block_start>tree=lt.fetch(url)<line_sep>items=tree.css('ul.car-monthlisting li')<line_sep>total=[]<for_stmt>item items<block_start>data={}<line_sep>data['name']=item.css('a::text').extract_first()<line_sep>data['url']=item.css('a::attr(href)').extract_first()<line_sep>data['comments']=int(item.css('span::text').re_first(r'(\d+)'))<line_sep>pprint(data)<line_sep>total.append(data)<block_end><return>total<block_end><if_stmt>__name__<eq>'__main__'<block_start>task=f'{domain}/archives/'<line_sep>result=crawl(task)<line_sep>lt.save(result name='salttiger.csv' sort_by='comments' order='desc')<block_end>
<import_from_stmt>PyQt5 QtCore QtGui QtWidgets QtSvg<class_stmt>PianoKey(QtWidgets.QGraphicsRectItem)<block_start><def_stmt>__init__ self black=<false> rect=QtCore.QRectF() parent=<none><block_start>super(PianoKey self).__init__(rect parent)<line_sep>self.m_pressed=<false><line_sep>self.m_selectedBrush=QtGui.QBrush()<line_sep>self.m_brush=QtGui.QBrush(QtCore.Qt.black)<if>black<else>QtGui.QBrush(QtCore.Qt.white)<line_sep>self.m_black=black<block_end><def_stmt>setPressedBrush self brush<block_start>self.m_selectedBrush=brush<block_end><def_stmt>paint self painter option widget<block_start>rendered=QtSvg.QSvgRenderer("key.svg")<line_sep>black_pen=QtGui.QPen(QtCore.Qt.black 1)<line_sep>gray_pen=QtGui.QPen(QtGui.QBrush(QtCore.Qt.gray) 1 QtCore.Qt.SolidLine QtCore.Qt.RoundCap QtCore.Qt.RoundJoin)<if_stmt>self.m_pressed<block_start><if_stmt>self.m_selectedBrush.style()<ne>QtCore.Qt.NoBrush<block_start>painter.setBrush(self.m_selectedBrush)<block_end><else_stmt><block_start>painter.setBrush(QtWidgets.QApplication.palette().highlight())<block_end><block_end><else_stmt><block_start>painter.setBrush(self.m_brush)<line_sep><block_end>painter.setPen(black_pen)<line_sep>painter.drawRoundedRect(self.rect() 15 15 QtCore.Qt.RelativeSize)<if_stmt>self.m_black<block_start>rendered.render(painter self.rect())<block_end><else_stmt><block_start>points=[QtCore.QPointF(self.rect().left()+1.5 self.rect().bottom()-1) QtCore.QPointF(self.rect().right()-1 self.rect().bottom()-1) QtCore.QPointF(self.rect().right()-1 self.rect().top()+1)]<line_sep>painter.setPen(gray_pen)<line_sep>painter.drawPolyline(QtGui.QPolygonF(points))<block_end><block_end><def_stmt>mousePressEvent self event<block_start>self.m_pressed=<true><line_sep>self.update()<line_sep>super(PianoKey self).mousePressEvent(event)<line_sep>event.accept()<block_end><def_stmt>mouseReleaseEvent self event<block_start>self.m_pressed=<false><line_sep>self.update()<line_sep>super(PianoKey self).mouseReleaseEvent(event)<block_end><block_end>KEYWIDTH,KEYHEIGHT=18 72<class_stmt>PianoKeyBoard(QtWidgets.QGraphicsView)<block_start><def_stmt>__init__ self num_octaves=2 parent=<none><block_start>super(PianoKeyBoard self).__init__(parent)<line_sep>self.initialize()<line_sep>self.m_numOctaves=num_octaves<line_sep>scene=QtWidgets.QGraphicsScene(QtCore.QRectF(0 0 KEYWIDTH<times>self.m_numOctaves<times>7 KEYHEIGHT) self)<line_sep>self.setScene(scene)<line_sep>numkeys=self.m_numOctaves<times>12<for_stmt>i range(numkeys)<block_start>octave=i<floordiv>12<times>7<line_sep>j=i%12<if_stmt>j<ge>5<block_start>j<augadd>1<block_end><if_stmt>j%2<eq>0<block_start>x=(octave+j/2)<times>KEYWIDTH<line_sep>key=PianoKey(rect=QtCore.QRectF(x 0 KEYWIDTH KEYHEIGHT) black=<false>)<block_end><else_stmt><block_start>x=(octave+j<floordiv>2)<times>KEYWIDTH+KEYWIDTH<times>6<floordiv>10+1<line_sep>key=PianoKey(rect=QtCore.QRectF(x 0 KEYWIDTH<times>8<floordiv>10-1 KEYHEIGHT<times>6<floordiv>10) black=<true>)<line_sep>key.setZValue(1)<block_end>key.setPressedBrush(QtWidgets.QApplication.palette().highlight())<line_sep>self.scene().addItem(key)<block_end><block_end><def_stmt>initialize self<block_start>self.setAttribute(QtCore.Qt.WA_InputMethodEnabled <false>)<line_sep>self.setHorizontalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)<line_sep>self.setVerticalScrollBarPolicy(QtCore.Qt.ScrollBarAlwaysOff)<line_sep>self.setCacheMode(QtWidgets.QGraphicsView.CacheBackground)<line_sep>self.setViewportUpdateMode(QtWidgets.QGraphicsView.MinimalViewportUpdate)<line_sep>self.setRenderHints(QtGui.QPainter.Antialiasing|QtGui.QPainter.TextAntialiasing|QtGui.QPainter.SmoothPixmapTransform)<line_sep>self.setOptimizationFlag(QtWidgets.QGraphicsView.DontClipPainter <true>)<line_sep>self.setOptimizationFlag(QtWidgets.QGraphicsView.DontSavePainterState <true>)<line_sep>self.setOptimizationFlag(QtWidgets.QGraphicsView.DontAdjustForAntialiasing <true>)<line_sep>self.setBackgroundBrush(QtWidgets.QApplication.palette().base())<block_end><def_stmt>resizeEvent self event<block_start>super(PianoKeyBoard self).resizeEvent(event)<line_sep>self.fitInView(self.scene().sceneRect() QtCore.Qt.KeepAspectRatio)<block_end><def_stmt>sizeHint self<block_start><return>self.mapFromScene(self.sceneRect()).boundingRect().size()<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start><import_stmt>sys<line_sep>app=QtWidgets.QApplication(sys.argv)<line_sep>app.setStyle('fusion')<line_sep>w=QtWidgets.QWidget()<line_sep>lay=QtWidgets.QVBoxLayout(w)<line_sep>lay.addWidget(QtWidgets.QLabel("Piano Keyboard" alignment=QtCore.Qt.AlignCenter))<line_sep>lay.addWidget(PianoKeyBoard())<line_sep>w.resize(640 480)<line_sep>w.show()<line_sep>sys.exit(app.exec_())<block_end>
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>functools partial<import_from_stmt>torchnet.transform compose<import_from_stmt>torchnet.dataset ListDataset TransformDataset<import_from_stmt>fewshots.data.base convert_dict CudaTransform EpisodicBatchSampler<import_from_stmt>fewshots.data.setup setup_images<import_from_stmt>fewshots.data.cache Cache<import_from_stmt>fewshots.utils filter_opt<import_from_stmt>fewshots.data.SetupEpisode SetupEpisode<line_sep>root_dir=''<def_stmt>extract_episode setup_episode augm_opt d# data: N x C x H x W <block_start>n_max_examples=d[0]['data'].size(0)<line_sep>n_way,n_shot,n_query=setup_episode.get_current_setup()<line_sep>example_inds=torch.randperm(n_max_examples)[:(n_shot+n_query)]<line_sep>support_inds=example_inds[:n_shot]<line_sep>query_inds=example_inds[n_shot:]<line_sep>xs_list=[d[i]['data'][support_inds]<for>i range(augm_opt['n_augment'])]<line_sep># concatenate as shots into xs xs=torch.cat(xs_list dim=0)<line_sep># extract queries from a single cache entry xq=d[np.random.randint(augm_opt['n_augment'])]['data'][query_inds]<line_sep>out_dict={'class':d[0]['class'] 'xs':xs 'xq':xq 'n_way':n_way 'n_shot':n_shot 'n_query':n_query}<line_sep><return>out_dict<block_end><def_stmt>load_data opt splits<block_start><global>root_dir<line_sep>root_dir=opt['data.root_dir']<line_sep>augm_opt=filter_opt(opt 'augm')<line_sep>dataset=opt['data.dataset']<line_sep>split_dir=os.path.join(opt['data.root_dir'] opt['data.dataset'] 'splits' opt['data.split'])<line_sep>ret={}<line_sep># cache = {} cache=Cache()<for_stmt>split splits<block_start><if_stmt>split<in>['val1' 'val5' 'test']<block_start>n_way=opt['data.test_way']<block_end><else_stmt><block_start>n_way=opt['data.way']<block_end><if_stmt>split<in>['train' 'trainval']# random shots <block_start>SE=SetupEpisode(batch_size=opt['data.batch_size'] shot_max=opt['data.shot_max'] fixed_shot=opt['data.shot'] way_min=opt['data.way_min'] fixed_way=n_way)<block_end><elif_stmt>split<eq>'val1'<block_start>SE=SetupEpisode(batch_size=opt['data.batch_size'] shot_max=opt['data.shot_max'] fixed_shot=1 way_min=opt['data.way_min'] fixed_way=n_way)<block_end><elif_stmt>split<eq>'val5'<block_start>SE=SetupEpisode(batch_size=opt['data.batch_size'] shot_max=opt['data.shot_max'] fixed_shot=5 way_min=opt['data.way_min'] fixed_way=n_way)<block_end><else_stmt><block_start>SE=SetupEpisode(batch_size=opt['data.batch_size'] shot_max=opt['data.shot_max'] fixed_shot=opt['data.test_shot'] way_min=opt['data.way_min'] fixed_way=n_way)<block_end><if_stmt>split<in>['val1' 'val5' 'test']<block_start>n_episodes=opt['data.test_episodes']<block_end><else_stmt><block_start>n_episodes=opt['data.train_episodes']<block_end>transforms=[partial(convert_dict 'class') partial(load_class_images split dataset cache augm_opt) partial(extract_episode SE augm_opt)]<if_stmt>opt['data.cuda']<block_start>transforms.append(CudaTransform())<block_end>transforms=compose(transforms)<line_sep>class_names=[]<line_sep>split_file='val.txt'<if>split<in>['val1' 'val5']<else>"{:s}.txt".format(split)<with_stmt>open(os.path.join(split_dir split_file) 'r')<as>f<block_start><for_stmt>class_name f.readlines()<block_start>class_names.append(class_name.rstrip('\n'))<block_end><block_end>ds=TransformDataset(ListDataset(class_names) transforms)<line_sep>sampler=EpisodicBatchSampler(SE len(ds) n_episodes)<line_sep># use num_workers=0, otherwise may receive duplicate episodes ret[split]=torch.utils.data.DataLoader(ds batch_sampler=sampler num_workers=0)<block_end><return>ret<block_end><def_stmt>load_class_images split dataset cache augm_opt d<block_start><if_stmt>d['class']<in>cache.data.keys()<block_start><if_stmt>len(cache.data[d['class']])<l>augm_opt['cache_size']<block_start>init_entry=<false><line_sep>setup_images(split d cache dataset init_entry root_dir augm_opt)<block_end><block_end><else_stmt><block_start>init_entry=<true><line_sep>setup_images(split d cache dataset init_entry root_dir augm_opt)<block_end>cache_len=len(cache.data[d['class']])<line_sep># if cache does not enough shots yet, repeat <if_stmt>cache_len<l>augm_opt['n_augment']<block_start>rand_ids=np.random.choice(cache_len size=augm_opt['n_augment'] replace=<true>)<block_end><else_stmt><block_start>rand_ids=np.random.choice(cache_len size=augm_opt['n_augment'] replace=<false>)<block_end>out_dicts=[{'class':d['class'] 'data':cache.data[d['class']][rand_ids[i]]}<for>i range(augm_opt['n_augment'])]<line_sep><return>out_dicts<block_end>
# Copyright (C) 2022 Mandiant, Inc. All Rights Reserved. <import_stmt>io<import_stmt>textwrap<import_stmt>collections<import_from_stmt>typing List Tuple Union<import_stmt>tabulate<import_stmt>floss.utils<as>util<import_stmt>floss.logging_<import_from_stmt>floss.render Verbosity<import_from_stmt>floss.results AddressType StackString TightString DecodedString ResultDocument StringEncoding<import_from_stmt>floss.render.sanitize sanitize<line_sep>MIN_WIDTH_LEFT_COL=22<line_sep>MIN_WIDTH_RIGHT_COL=82<line_sep>DISABLED="Disabled"<line_sep>tabulate.PRESERVE_WHITESPACE=<true><line_sep>logger=floss.logging_.getLogger(__name__)<class_stmt>StringIO(io.StringIO)<block_start><def_stmt>writeln self s<block_start>self.write(s)<line_sep>self.write("\n")<block_end><block_end><def_stmt>width s:str character_count:int<arrow>str<block_start>"""pad the given string to at least `character_count`"""<if_stmt>len(s)<l>character_count<block_start><return>s+" "<times>(character_count-len(s))<block_end><else_stmt><block_start><return>s<block_end><block_end><def_stmt>render_meta results:ResultDocument ostream verbose<block_start>rows:List[Tuple[str str]]=list()<if_stmt>verbose<eq>Verbosity.DEFAULT<block_start>rows.append((width("file path" MIN_WIDTH_LEFT_COL) width(results.metadata.file_path MIN_WIDTH_RIGHT_COL)))<block_end><else_stmt><block_start>rows.extend([(width("file path" MIN_WIDTH_LEFT_COL) width(results.metadata.file_path MIN_WIDTH_RIGHT_COL)) ("start date" results.metadata.runtime.start_date.strftime("%Y-%m-%d %H:%M:%S")) ("runtime" strtime(results.metadata.runtime.total)) ("version" results.metadata.version) ("imagebase" f"0x{results.metadata.imagebase:x}") ("min string length" f"{results.metadata.min_length}") ])<block_end>rows.append(("extracted strings" ""))<line_sep>rows.extend(render_string_type_rows(results))<if_stmt>verbose<g>Verbosity.DEFAULT<block_start>rows.extend(render_function_analysis_rows(results))<block_end>ostream.write(tabulate.tabulate(rows tablefmt="psql"))<line_sep>ostream.write("\n")<block_end><def_stmt>render_string_type_rows results:ResultDocument<arrow>List[Tuple[str str]]<block_start><return>[(" static strings" str(len(results.strings.static_strings))<if>results.analysis.enable_static_strings<else>DISABLED ) (" stack strings" str(len(results.strings.stack_strings))<if>results.analysis.enable_stack_strings<else>DISABLED ) (" tight strings" str(len(results.strings.tight_strings))<if>results.analysis.enable_tight_strings<else>DISABLED ) (" decoded strings" str(len(results.strings.decoded_strings))<if>results.analysis.enable_decoded_strings<else>DISABLED ) ]<block_end><def_stmt>render_function_analysis_rows results<arrow>List[Tuple[str str]]<block_start><if_stmt>results.metadata.runtime.vivisect<eq>0<block_start><return>[("analyzed functions" DISABLED)]<block_end>rows=[("analyzed functions" "") (" discovered" results.analysis.functions.discovered) (" library" results.analysis.functions.library) ]<if_stmt>results.analysis.enable_stack_strings<block_start>rows.append((" stack strings" str(results.analysis.functions.analyzed_stack_strings)))<block_end><if_stmt>results.analysis.enable_tight_strings<block_start>rows.append((" tight strings" str(results.analysis.functions.analyzed_tight_strings)))<block_end><if_stmt>results.analysis.enable_decoded_strings<block_start>rows.append((" decoded strings" str(results.analysis.functions.analyzed_decoded_strings)))<block_end><if_stmt>results.analysis.functions.decoding_function_scores<block_start>rows.append((" identified decoding functions\n (offset and score)" textwrap.fill(", ".join([f"0x{fva:x} ({d:.3f})"<for>fva,d results.analysis.functions.decoding_function_scores.items()]) max(len(results.metadata.file_path) MIN_WIDTH_RIGHT_COL) ) ))<block_end><return>rows<block_end><def_stmt>strtime seconds<block_start>m,s=divmod(seconds 60)<line_sep><return>f"{m:02.0f}:{s:02.0f}"<block_end><def_stmt>render_staticstrings strings ostream verbose disable_headers<block_start>render_heading("FLOSS STATIC STRINGS" len(strings) ostream disable_headers)<line_sep>ascii_strings=list(filter(<lambda>s:s.encoding<eq>StringEncoding.ASCII strings))<line_sep>unicode_strings=list(filter(<lambda>s:s.encoding<eq>StringEncoding.UTF16LE strings))<line_sep>ascii_offset_len=0<line_sep>unicode_offset_len=0<if_stmt>ascii_strings<block_start>ascii_offset_len=len(f"{ascii_strings[-1].offset}")<block_end><if_stmt>unicode_strings<block_start>unicode_offset_len=len(f"{unicode_strings[-1].offset}")<block_end>offset_len=max(ascii_offset_len unicode_offset_len)<line_sep>render_heading("FLOSS ASCII STRINGS" len(ascii_strings) ostream disable_headers)<for_stmt>s ascii_strings<block_start><if_stmt>verbose<eq>Verbosity.DEFAULT<block_start>ostream.writeln(s.string)<block_end><else_stmt><block_start>ostream.writeln(f"0x{s.offset:>0{offset_len}x} {s.string}")<block_end><block_end>ostream.writeln("")<line_sep>render_heading("FLOSS UTF-16LE STRINGS" len(unicode_strings) ostream disable_headers)<for_stmt>s unicode_strings<block_start><if_stmt>verbose<eq>Verbosity.DEFAULT<block_start>ostream.writeln(s.string)<block_end><else_stmt><block_start>ostream.writeln(f"0x{s.offset:>0{offset_len}x} {s.string}")<block_end><block_end><block_end><def_stmt>render_stackstrings strings:Union[List[StackString] List[TightString]] ostream verbose:bool disable_headers:bool<block_start><if_stmt>verbose<eq>Verbosity.DEFAULT<block_start><for_stmt>s strings<block_start>ostream.writeln(sanitize(s.string))<block_end><block_end><else_stmt><block_start><if_stmt>strings<block_start>ostream.write(tabulate.tabulate([(util.hex(s.function) util.hex(s.program_counter) util.hex(s.frame_offset) sanitize(s.string) )<for>s strings] headers=("Function" "Function Offset" "Frame Offset" "String")<if><not>disable_headers<else>() ))<line_sep>ostream.write("\n")<block_end><block_end><block_end><def_stmt>render_decoded_strings decoded_strings:List[DecodedString] ostream verbose disable_headers<block_start>""" Render results of string decoding phase. """<if_stmt>verbose<eq>Verbosity.DEFAULT<block_start><for_stmt>ds decoded_strings<block_start>ostream.writeln(sanitize(ds.string))<block_end><block_end><else_stmt><block_start>strings_by_functions=collections.defaultdict(list)<for_stmt>ds decoded_strings<block_start>strings_by_functions[ds.decoding_routine].append(ds)<block_end><for_stmt>fva,data strings_by_functions.items()<block_start>render_heading(f" FUNCTION at 0x{fva:x}" len(data) ostream disable_headers)<line_sep>rows=[]<for_stmt>ds data<block_start><if_stmt>ds.address_type<in>(AddressType.HEAP AddressType.STACK)<block_start>offset_string=f"({ds.address_type})"<block_end><else_stmt><block_start>offset_string=hex(ds.address<or>0)<block_end>rows.append((offset_string hex(ds.decoded_at) sanitize(ds.string)))<block_end><if_stmt>rows<block_start>ostream.write(tabulate.tabulate(rows headers=("Offset" "Called At" "String")<if><not>disable_headers<else>()))<line_sep>ostream.writeln("\n")<block_end><block_end><block_end><block_end><def_stmt>render_heading heading n ostream disable_headers<block_start>""" example:: ------------------------------- | FLOSS STATIC STRINGS (1337) | ------------------------------- """<if_stmt>disable_headers<block_start><return><block_end>heading=f"| {heading} ({n}) |"<line_sep>ostream.write(tabulate.tabulate([[heading]]))<line_sep>ostream.write("\n")<block_end><def_stmt>render results verbose disable_headers<block_start>ostream=StringIO()<if_stmt><not>disable_headers<block_start>ostream.writeln("")<line_sep>ostream.write(f"FLARE FLOSS RESULTS (version {results.metadata.version})\n")<line_sep>render_meta(results ostream verbose)<line_sep>ostream.writeln("")<block_end><if_stmt>results.analysis.enable_static_strings<block_start>render_staticstrings(results.strings.static_strings ostream verbose disable_headers)<line_sep>ostream.writeln("")<block_end><if_stmt>results.analysis.enable_stack_strings<block_start>render_heading("FLOSS STACK STRINGS" len(results.strings.stack_strings) ostream disable_headers)<line_sep>render_stackstrings(results.strings.stack_strings ostream verbose disable_headers)<line_sep>ostream.writeln("")<block_end><if_stmt>results.analysis.enable_tight_strings<block_start>render_heading("FLOSS TIGHT STRINGS" len(results.strings.tight_strings) ostream disable_headers)<line_sep>render_stackstrings(results.strings.tight_strings ostream verbose disable_headers)<line_sep>ostream.writeln("")<block_end><if_stmt>results.analysis.enable_decoded_strings<block_start>render_heading("FLOSS DECODED STRINGS" len(results.strings.decoded_strings) ostream disable_headers)<line_sep>render_decoded_strings(results.strings.decoded_strings ostream verbose disable_headers)<block_end><return>ostream.getvalue()<block_end>
<import_from_stmt>django.http HttpResponse<def_stmt>empty_view request *args **kwargs<block_start><return>HttpResponse('')<block_end><def_stmt>kwargs_view request arg1=1 arg2=2<block_start><return>HttpResponse('')<block_end><def_stmt>absolute_kwargs_view request arg1=1 arg2=2<block_start><return>HttpResponse('')<block_end><class_stmt>ViewClass(object)<block_start><def_stmt>__call__ self request *args **kwargs<block_start><return>HttpResponse('')<block_end><block_end>view_class_instance=ViewClass()<def_stmt>bad_view request *args **kwargs<block_start><raise>ValueError("I don't think I'm getting good value for this view")<block_end>
<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>trainer *<import_from_stmt>trainer256 *<import_from_stmt>config get_config<import_from_stmt>utils prepare_dirs_and_logger save_config<import_stmt>pdb os<def_stmt>main config<block_start>prepare_dirs_and_logger(config)<if_stmt>config.gpu<g>-1<block_start>os.environ["CUDA_DEVICE_ORDER"]="PCI_BUS_ID"# see issue #152 os.environ["CUDA_VISIBLE_DEVICES"]=str(config.gpu)<block_end>config.data_format='NHWC'<if_stmt>1<eq>config.model<block_start>trainer=PG2(config)<line_sep>trainer.init_net()<block_end><elif_stmt>11<eq>config.model<block_start>trainer=PG2_256(config)<line_sep>trainer.init_net()<block_end><if_stmt>config.is_train<block_start>save_config(config)<line_sep>trainer.train()<block_end><else_stmt># if not config.load_path: # raise Exception("[!] You should specify `load_path` to load a pretrained model") <block_start>trainer.test()<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>config,unparsed=get_config()<line_sep>main(config)<block_end>
<import_stmt>proto<import_from_stmt>proto proto<import_from_stmt>unittest TestCase<class_stmt>MySender(proto.Sender)<block_start><def_stmt>on_send self buffer offset size# Send nothing... <block_start><return>0<block_end><block_end><class_stmt>MyReceiver(proto.Receiver)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self._order=<false><line_sep>self._balance=<false><line_sep>self._account=<false><block_end><def_stmt>check self<block_start><return>self._order<and>self._balance<and>self._account<block_end><def_stmt>on_receive_ordermessage self value<block_start>self._order=<true><block_end><def_stmt>on_receive_balancemessage self value<block_start>self._balance=<true><block_end><def_stmt>on_receive_accountmessage self value<block_start>self._account=<true><block_end><block_end><class_stmt>TestSendReceive(TestCase)<block_start>@staticmethod<def_stmt>send_and_receive index1 index2<block_start>sender=MySender()<line_sep># Create and send a new order order=proto.Order(1 "EURUSD" proto.OrderSide.buy proto.OrderType.market 1.23456 1000.0)<line_sep>sender.send(proto.OrderMessage(order))<line_sep># Create and send a new balance wallet balance=proto.Balance("USD" 1000.0)<line_sep>sender.send(proto.BalanceMessage(balance))<line_sep># Create and send a new account with some orders account=proto.Account(1 "Test" proto.State.good proto.Balance("USD" 1000.0) proto.Balance("EUR" 100.0))<line_sep>account.orders.append(proto.Order(1 "EURUSD" proto.OrderSide.buy proto.OrderType.market 1.23456 1000.0))<line_sep>account.orders.append(proto.Order(2 "EURUSD" proto.OrderSide.sell proto.OrderType.limit 1.0 100.0))<line_sep>account.orders.append(proto.Order(3 "EURUSD" proto.OrderSide.buy proto.OrderType.stop 1.5 10.0))<line_sep>sender.send(proto.AccountMessage(account))<line_sep>receiver=MyReceiver()<line_sep># Receive data from the sender index1<augmod>sender.buffer.size<line_sep>index2<augmod>sender.buffer.size<line_sep>index2=max(index1 index2)<line_sep>receiver.receive(sender.buffer 0 index1)<line_sep>receiver.receive(sender.buffer index1 index2-index1)<line_sep>receiver.receive(sender.buffer index2 sender.buffer.size-index2)<line_sep><return>receiver.check()<block_end><def_stmt>test_send_and_receive self<block_start><for_stmt>i range(100)<block_start><for_stmt>j range(100)<block_start>self.assertTrue(self.send_and_receive(i j))<block_end><block_end><block_end><block_end>
# Copy a directory recursively WITHOUT the race condition in cp -r. # This also ignores symbolic links. <import_stmt>errno<import_stmt>os<import_stmt>shutil<import_stmt>sys<if_stmt>sys.version_info.major<ge>3<block_start>file_exists_error=FileExistsError<def_stmt>is_file_exists_error e<block_start><return><true><block_end><block_end><else_stmt><block_start>file_exists_error=OSError<def_stmt>is_file_exists_error e<block_start><return>e.errno<eq>errno.EEXIST<block_end><block_end><def_stmt>copy_recursive src destdir<block_start>dest=os.path.join(destdir os.path.basename(src))<if_stmt>os.path.isdir(src)<block_start><try_stmt><block_start>os.mkdir(dest)<block_end><except_stmt>file_exists_error<as>e<block_start><if_stmt><not>is_file_exists_error(e)<block_start><raise><block_end><block_end><for_stmt>filename os.listdir(src)<block_start>path=os.path.join(src filename)<line_sep>copy_recursive(path dest)<block_end><block_end><elif_stmt>os.path.islink(src)<block_start><pass><block_end><elif_stmt>os.path.isfile(src)<block_start>shutil.copy(src dest)<block_end><else_stmt><block_start><raise>Exception('unknown file type for: '+src)<block_end><block_end><if_stmt>len(sys.argv)<l>3<block_start>print('Usage: copy_recursive.py FILE DIRECTORY')<block_end>copy_recursive(sys.argv[1] sys.argv[2])<line_sep>
# # Copyright (c) 2013-2018 Quarkslab. # This file is part of IRMA project. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License in the top-level directory # of this distribution and at: # # http://www.apache.org/licenses/LICENSE-2.0 # # No part of the project, including this file, may be copied, # modified, propagated, or distributed except according to the # terms contained in the LICENSE file. <import_stmt>logging<import_stmt>re<import_stmt>os<import_from_stmt>pathlib Path<import_from_stmt>modules.antivirus.base AntivirusUnix<line_sep>log=logging.getLogger(__name__)<class_stmt>Sophos(AntivirusUnix)<block_start>name="Sophos Anti-Virus (Linux)"<line_sep># ================================== # Constructor and destructor stuff # ================================== <def_stmt>__init__ self *args **kwargs# class super class constructor <block_start>super().__init__(*args **kwargs)<line_sep># scan tool variables self.scan_args=("-archive" # scan inside archives "-cab" # scan microsoft cab file "-loopback" # scan loopback-type file "-tnef" # scan tnet file "-mime" # scan file encoded with mime format "-oe" # scan microsoft outlook "-pua" # scan file encoded with mime format "-ss" # only print errors or found viruses "-nc" # do not ask remove confirmation when infected "-nb" # no bell sound )<line_sep># NOTE: on windows, 0 can be returned even if the file is infected self._scan_retcodes[self.ScanResult.INFECTED]=<lambda>x:x<in>[0 1 2 3]<line_sep>self.scan_patterns=[re.compile(">>> Virus '(?P<name>.+)' found in file (?P<file>.+)" re.IGNORECASE) ]<block_end># ========================================== # Antivirus methods (need to be overriden) # ========================================== <def_stmt>get_version self<block_start>"""return the version of the antivirus"""<line_sep><return>self._run_and_parse('--version' regexp='(?P<version>\d+(\.\d+)+)' group='version')<block_end><def_stmt>get_database self<block_start>"""return list of files in the database"""<line_sep># NOTE: we can use clamconf to get database location, but it is not # always installed by default. Instead, hardcode some common paths and # locate files using predefined patterns search_paths=[Path('/opt/sophos-av/lib/sav') # default location in debian ]<line_sep>database_patterns=['*.dat' 'vdl??.vdb' 'sus??.vdb' '*.ide' ]<line_sep><return>self.locate(database_patterns search_paths syspath=<false>)<block_end><def_stmt>get_scan_path self<block_start>"""return the full path of the scan tool"""<line_sep><return>self.locate_one("savscan" paths=[Path("/opt/sophos-av/bin")])<block_end><def_stmt>scan self paths# quirk to force lang in linux <block_start>os.environ['LANG']="C"<line_sep><return>super().scan(paths)<block_end><def_stmt>get_virus_database_version self<block_start>"""Return the Virus Database version"""<line_sep>retcode,stdout,_=self.run_cmd(self.scan_path '-v')<if_stmt>retcode<block_start><raise>RuntimeError("Bad return code while getting database version")<block_end>matches=re.search('Virus data version *: *(?P<version>.*)' stdout re.IGNORECASE)<if_stmt><not>matches<block_start><raise>RuntimeError("Cannot read database version in stdout")<block_end>version=matches.group('version').strip()<line_sep>matches=re.search('Released *: *'<concat>'(?P<date>\d\d \w+ \d\d\d\d)' stdout re.IGNORECASE)<if_stmt><not>matches<block_start><return>version<block_end>date=matches.group('date').strip()<line_sep><return>version+' ('+date+')'<block_end><block_end>
<import_from_stmt>abc abstractmethod ABC<import_from_stmt>pathlib Path<import_from_stmt>..anki.adapters.anki_deck AnkiDeck<class_stmt>DeckExporter(ABC)<block_start>@abstractmethod<def_stmt>export_to_directory self deck:AnkiDeck output_dir:Path copy_media=<true><arrow>Path<block_start><pass><block_end><block_end>
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT! """Client and server classes corresponding to protobuf-defined services."""<import_stmt>grpc<import_from_stmt>hfc.protos.gossip message_pb2<as>hfc_dot_protos_dot_gossip_dot_message__pb2<class_stmt>GossipStub(object)<block_start>"""Gossip """<def_stmt>__init__ self channel<block_start>"""Constructor. Args: channel: A grpc.Channel. """<line_sep>self.GossipStream=channel.stream_stream('/gossip.Gossip/GossipStream' request_serializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.SerializeToString response_deserializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.FromString )<line_sep>self.Ping=channel.unary_unary('/gossip.Gossip/Ping' request_serializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.SerializeToString response_deserializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.FromString )<block_end><block_end><class_stmt>GossipServicer(object)<block_start>"""Gossip """<def_stmt>GossipStream self request_iterator context<block_start>"""GossipStream is the gRPC stream used for sending and receiving messages """<line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><def_stmt>Ping self request context<block_start>"""Ping is used to probe a remote peer's aliveness """<line_sep>context.set_code(grpc.StatusCode.UNIMPLEMENTED)<line_sep>context.set_details('Method not implemented!')<line_sep><raise>NotImplementedError('Method not implemented!')<block_end><block_end><def_stmt>add_GossipServicer_to_server servicer server<block_start>rpc_method_handlers={'GossipStream':grpc.stream_stream_rpc_method_handler(servicer.GossipStream request_deserializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.FromString response_serializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.SerializeToString ) 'Ping':grpc.unary_unary_rpc_method_handler(servicer.Ping request_deserializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.FromString response_serializer=hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.SerializeToString ) }<line_sep>generic_handler=grpc.method_handlers_generic_handler('gossip.Gossip' rpc_method_handlers)<line_sep>server.add_generic_rpc_handlers((generic_handler ))<block_end># This class is part of an EXPERIMENTAL API. <class_stmt>Gossip(object)<block_start>"""Gossip """<line_sep>@staticmethod<def_stmt>GossipStream request_iterator target options=() channel_credentials=<none> call_credentials=<none> insecure=<false> compression=<none> wait_for_ready=<none> timeout=<none> metadata=<none><block_start><return>grpc.experimental.stream_stream(request_iterator target '/gossip.Gossip/GossipStream' hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.SerializeToString hfc_dot_protos_dot_gossip_dot_message__pb2.Envelope.FromString options channel_credentials insecure call_credentials compression wait_for_ready timeout metadata)<block_end>@staticmethod<def_stmt>Ping request target options=() channel_credentials=<none> call_credentials=<none> insecure=<false> compression=<none> wait_for_ready=<none> timeout=<none> metadata=<none><block_start><return>grpc.experimental.unary_unary(request target '/gossip.Gossip/Ping' hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.SerializeToString hfc_dot_protos_dot_gossip_dot_message__pb2.Empty.FromString options channel_credentials insecure call_credentials compression wait_for_ready timeout metadata)<block_end><block_end>
<import_from_stmt>node_launcher.constants IS_LINUX IS_MACOS IS_WINDOWS OPERATING_SYSTEM TARGET_BITCOIN_RELEASE <import_from_stmt>node_launcher.node_set.lib.software Software<class_stmt>BitcoindSoftware(Software)<block_start><def_stmt>__init__ self<block_start>super().__init__(software_name='bitcoind' release_version=TARGET_BITCOIN_RELEASE)<line_sep>self.release_version=TARGET_BITCOIN_RELEASE.replace('v' '')<if_stmt>IS_WINDOWS<block_start>os_name='win64'<block_end><elif_stmt>IS_MACOS<block_start>os_name='osx64'<block_end><elif_stmt>IS_LINUX<block_start>os_name='x86_64-linux-gnu'<block_end><else_stmt><block_start><raise>Exception(f'{OPERATING_SYSTEM} is not supported')<block_end>self.download_name=f'bitcoin-{self.release_version}-{os_name}'<line_sep>self.download_url=f'https://bitcoincore.org'<concat>f'/bin'<concat>f'/bitcoin-core-{self.release_version}'<concat>f'/{self.download_destination_file_name}'<block_end>@property<def_stmt>daemon self<block_start><return>self.bitcoind<block_end>@property<def_stmt>cli self<block_start><return>self.bitcoin_cli<block_end>@property<def_stmt>bitcoin_qt self<arrow>str<block_start><return>self.executable_path('bitcoin-qt')<block_end>@property<def_stmt>bitcoin_cli self<arrow>str<block_start><return>self.executable_path('bitcoin-cli')<block_end>@property<def_stmt>bitcoind self<arrow>str<block_start><return>self.executable_path('bitcoind')<block_end>@property<def_stmt>uncompressed_directory_name self<arrow>str<block_start><if_stmt>IS_LINUX<block_start>name='-'.join(self.download_name.split('-')[0:2])<block_end><else_stmt><block_start>name='-'.join(self.download_name.split('-')[:-1])<if_stmt>name.count('.')<eq>3<block_start>name='.'.join(name.split('.')[:-1])<block_end><block_end><return>name<block_end><block_end>
# -*- coding: UTF-8 -*- # # Copyright (C) 2020, <NAME> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <https://www.gnu.org/licenses/>. # pylint: disable=missing-docstring """Functions to interact with TMDb API."""<import_from_stmt>. api_utils<import_stmt>xbmc<try_stmt><block_start><import_from_stmt>typing Optional Text Dict List Any# pylint: disable=unused-import InfoType=Dict[Text Any]# pylint: disable=invalid-name <block_end><except_stmt>ImportError<block_start><pass><block_end>HEADERS=(('User-Agent' 'Kodi Movie scraper by Team Kodi') ('Accept' 'application/json') )<line_sep>api_utils.set_headers(dict(HEADERS))<line_sep>TMDB_PARAMS={'api_key':'<KEY>'}<line_sep>BASE_URL='https://api.themoviedb.org/3/{}'<line_sep>SEARCH_URL=BASE_URL.format('search/movie')<line_sep>FIND_URL=BASE_URL.format('find/{}')<line_sep>MOVIE_URL=BASE_URL.format('movie/{}')<line_sep>COLLECTION_URL=BASE_URL.format('collection/{}')<line_sep>CONFIG_URL=BASE_URL.format('configuration')<def_stmt>search_movie query year=<none> language=<none># type: (Text) -> List[InfoType] <block_start>""" Search for a movie :param title: movie title to search :param year: the year to search (optional) :param language: the language filter for TMDb (optional) :return: a list with found movies """<line_sep>xbmc.log('using title of %s to find movie'%query xbmc.LOGDEBUG)<line_sep>theurl=SEARCH_URL<line_sep>params=_set_params(<none> language)<line_sep>params['query']=query<if_stmt>year<is><not><none><block_start>params['year']=str(year)<block_end><return>api_utils.load_info(theurl params=params)<block_end><def_stmt>find_movie_by_external_id external_id language=<none># type: (Text) -> List[InfoType] <block_start>""" Find movie based on external ID :param mid: external ID :param language: the language filter for TMDb (optional) :return: the movie or error """<line_sep>xbmc.log('using external id of %s to find movie'%external_id xbmc.LOGDEBUG)<line_sep>theurl=FIND_URL.format(external_id)<line_sep>params=_set_params(<none> language)<line_sep>params['external_source']='imdb_id'<line_sep><return>api_utils.load_info(theurl params=params)<block_end><def_stmt>get_movie mid language=<none> append_to_response=<none># type: (Text) -> List[InfoType] <block_start>""" Get movie details :param mid: TMDb movie ID :param language: the language filter for TMDb (optional) :append_to_response: the additional data to get from TMDb (optional) :return: the movie or error """<line_sep>xbmc.log('using movie id of %s to get movie details'%mid xbmc.LOGDEBUG)<line_sep>theurl=MOVIE_URL.format(mid)<line_sep><return>api_utils.load_info(theurl params=_set_params(append_to_response language))<block_end><def_stmt>get_collection collection_id language=<none> append_to_response=<none># type: (Text) -> List[InfoType] <block_start>""" Get movie collection information :param collection_id: TMDb collection ID :param language: the language filter for TMDb (optional) :append_to_response: the additional data to get from TMDb (optional) :return: the movie or error """<line_sep>xbmc.log('using collection id of %s to get collection details'%collection_id xbmc.LOGDEBUG)<line_sep>theurl=COLLECTION_URL.format(collection_id)<line_sep><return>api_utils.load_info(theurl params=_set_params(append_to_response language))<block_end><def_stmt>get_configuration # type: (Text) -> List[InfoType] <block_start>""" Get configuration information :return: configuration details or error """<line_sep>xbmc.log('getting configuration details' xbmc.LOGDEBUG)<line_sep><return>api_utils.load_info(CONFIG_URL params=TMDB_PARAMS.copy())<block_end><def_stmt>_set_params append_to_response language<block_start>params=TMDB_PARAMS.copy()<line_sep>img_lang='en,null'<if_stmt>language<is><not><none><block_start>params['language']=language<line_sep>img_lang='%s,en,null'%language[0:2]<block_end><if_stmt>append_to_response<is><not><none><block_start>params['append_to_response']=append_to_response<if_stmt>'images'<in>append_to_response<block_start>params['include_image_language']=img_lang<block_end><block_end><return>params<block_end>
# This sample tests the case where a lambda's type is determined using # bidirectional type inference and one or more of the parameters # corresponds to a generic type. <import_from_stmt>typing Callable TypeVar Generic Any<line_sep>T=TypeVar("T")<line_sep>MsgT=TypeVar("MsgT" bound="Msg[Any]")<class_stmt>Msg(Generic[T])<block_start>body:T<block_end><class_stmt>Request<block_start><ellipsis><block_end><def_stmt>check func:"Callable[[MsgT, int], object]"<arrow>MsgT<block_start><ellipsis><block_end>notification:Msg[Request]=check(<lambda>msg foo:(msg.body foo))<line_sep>
<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>qtrader.agents.base Agent<import_from_stmt>qtrader.utils.numpy softmax<class_stmt>PersistenceAgent(Agent)<block_start>"""Model-based **persistence** agent, acting based on last observation (i.e returns at t-1), using softmax function."""<line_sep>_id='persistence'<def_stmt>__init__ self<block_start><pass><block_end><def_stmt>act self observation<block_start>_returns=observation['returns']<if_stmt>_returns.isnull().any()# random sample <block_start>_values=pd.Series(np.random.uniform(0 1 len(_returns)) index=_returns.index name=_returns.name)<block_end><else_stmt># one step look back <block_start>_values=_returns<block_end><return>softmax(_values)<block_end><block_end>
<import_stmt>os<import_stmt>anyio<import_stmt>logging<import_from_stmt>anyio run<as>_anyio_run<line_sep>log=logging.getLogger(__name__)<def_stmt>_new_run func *args backend=<none> backend_options=<none><block_start><if_stmt>backend<is><none><block_start>backend=os.getenv("PURERPC_BACKEND" "asyncio")<block_end>log.info("Selected {} backend".format(backend))<if_stmt>backend<eq>"uvloop"<block_start><import_stmt>uvloop<line_sep>uvloop.install()<line_sep>backend="asyncio"<block_end><return>_anyio_run(func *args backend=backend backend_options=backend_options)<block_end><def_stmt>apply_monkeypatch <block_start>"""Apply AnyIO monkeypatches (should merge upstream)"""<line_sep>anyio.run=_new_run<block_end>
<import_from_stmt>clusto.drivers.base Location<class_stmt>BasicZone(Location)<block_start>""" Basic zone driver. """<line_sep>_clusto_type="zone"<line_sep>_driver_name="basiczone"<block_end>
# -*- coding: utf-8 -*- """ Examples on how to use GPBoost for the Grabit model of Sigrist and Hirnschall (2019) @author: <NAME> """<import_stmt>sklearn.datasets<as>datasets<import_stmt>numpy<as>np<import_stmt>gpboost<as>gpb<line_sep>""" Example 1 """<line_sep># simulate data np.random.seed(1)<line_sep>n=10000<line_sep>X,lp=datasets.make_friedman3(n_samples=n)<line_sep>X_test,lp_test=datasets.make_friedman3(n_samples=n)<line_sep>lp=lp<times>5+0.2<line_sep>lp_test=lp_test<times>5+0.2<line_sep>y=np.random.normal(loc=lp scale=1)<line_sep>y_test=np.random.normal(loc=lp_test scale=1)<line_sep># apply censoring yu=8<line_sep>yl=5<line_sep>y[y<ge>yu]=yu<line_sep>y[y<le>yl]=yl<line_sep># censoring fractions print(np.sum(y<eq>yu)/n)<line_sep>print(np.sum(y<eq>yl)/n)<line_sep># train model and make predictions params={'objective':'tobit' 'verbose':0 'yl':yl 'yu':yu}<line_sep>dtrain=gpb.Dataset(X y)<line_sep>bst=gpb.train(params=params train_set=dtrain num_boost_round=100)<line_sep>y_pred=bst.predict(X_test)<line_sep># mean square error (approx. 1.1 for n=10'000) print("Test error of Grabit: "+str(((y_pred-y_test)<power>2).mean()))<line_sep># compare to standard least squares gradient boosting (approx. 1.8 for n=10'000) params={'objective':'regression_l2' 'verbose':0}<line_sep>bst=gpb.train(params=params train_set=dtrain num_boost_round=100)<line_sep>y_pred_ls=bst.predict(X_test)<line_sep>print("Test error of standard least squares gradient boosting: "+str(((y_pred_ls-y_test)<power>2).mean()))<line_sep># measure time <import_stmt>time<line_sep>params={'objective':'tobit' 'verbose':0 'yl':yl 'yu':yu}<line_sep>dtrain=gpb.Dataset(X y)<line_sep>start=time.time()<line_sep>bst=gpb.train(params=params train_set=dtrain num_boost_round=100)<line_sep>end=time.time()<line_sep>print(end-start)<line_sep># approx. 0.1 sec for n='10'000 on a standard laptop """ Example 2: 2-d non-linear function """<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>mpl_toolkits.mplot3d Axes3D<def_stmt>nonlin_fct x1 x2<block_start>r=x1<power>2+x2<power>2<line_sep>r=np.pi<times>2<times>1<times>(r<power>0.75)<line_sep>f=2<times>np.cos(r)<line_sep><return>(f)<block_end><def_stmt>plot_2d_fct x1 x2 y title="2d function" elev=45 azim=120 zlim=<none> filename=<none><block_start>fig=plt.figure(figsize=(8 7))<line_sep>ax=Axes3D(fig)<if_stmt>zlim<is><not><none><block_start>ax.set_zlim3d(zlim)<line_sep>surf=ax.plot_surface(x1 x2 y rstride=1 cstride=1 cmap=plt.cm.BuPu edgecolor='k' vmax=zlim[1])<block_end><else_stmt><block_start>surf=ax.plot_surface(x1 x2 y rstride=1 cstride=1 cmap=plt.cm.BuPu edgecolor='k')<block_end>ax.set_xlabel("X1")<line_sep>ax.set_ylabel("X2")<line_sep>ax.set_zlabel('')<line_sep># pretty init view ax.view_init(elev=elev azim=azim)<line_sep>plt.colorbar(surf)<line_sep>plt.suptitle(title)<line_sep>plt.subplots_adjust(top=0.9)<if_stmt>filename<is><none><block_start>plt.show()<block_end><else_stmt><block_start>plt.savefig(filename dpi=200)<block_end><block_end>##True function nx=100<line_sep>x=np.arange(-1+1/nx 1 2/nx)<line_sep>x1,x2=np.meshgrid(x x)<line_sep>yt=nonlin_fct(x1 x2)<line_sep>zlim=(-1.75 1.75)<line_sep>plot_2d_fct(x1 x2 yt title="True F" zlim=zlim)<line_sep># simulate data n=10000<line_sep>np.random.seed(10)<line_sep>X=np.random.rand(n 2)<line_sep>X=(X-0.5)<times>2<line_sep>y=nonlin_fct(X[: 0] X[: 1])+np.random.normal(scale=1 size=n)<line_sep># apply xensoring yc=y.copy()<line_sep>yl=np.percentile(y q=33.33)<line_sep>yu=np.percentile(y q=66.66)<line_sep>yc[y<ge>yu]=yu<line_sep>yc[y<le>yl]=yl<line_sep># train Grabit model and make predictions params={'objective':'tobit' 'verbose':0 'yl':yl 'yu':yu 'sigma':1. 'learning_rate':0.1 'max_depth':3}<line_sep>dtrain=gpb.Dataset(X yc)<line_sep>bst=gpb.train(params=params train_set=dtrain num_boost_round=100)<line_sep>X_pred=np.transpose(np.array([x1.flatten() x2.flatten()]))<line_sep>y_pred=bst.predict(X_pred)<line_sep>plot_2d_fct(x1 x2 y_pred.reshape((100 -1)) title="Grabit" zlim=zlim)<line_sep># compare to standard least squares gradient boosting params={'objective':'regression_l2' 'verbose':0 'yl':yl 'yu':yu 'sigma':1. 'learning_rate':0.1 'max_depth':3}<line_sep>dtrain=gpb.Dataset(X yc)<line_sep>bst=gpb.train(params=params train_set=dtrain num_boost_round=100)<line_sep>X_pred=np.transpose(np.array([x1.flatten() x2.flatten()]))<line_sep>y_pred=bst.predict(X_pred)<line_sep>plot_2d_fct(x1 x2 y_pred.reshape((100 -1)) title="L2 Boosting" zlim=zlim)<line_sep>
<import_from_stmt>unittest TestCase<import_from_stmt>ToLowerCase ToLowerCase<class_stmt>ToLowerCaseTest(TestCase)<block_start><def_stmt>test_toLowerCase self<block_start>tlc=ToLowerCase()<line_sep>self.assertEqual(tlc.toLowerCase("Hello") "hello")<line_sep>self.assertEqual(tlc.toLowerCase("here") "here")<line_sep>self.assertEqual(tlc.toLowerCase("LOVELY") "lovely")<block_end><block_end>
<import_stmt>json<import_stmt>glob<import_stmt>os<line_sep>DATA_PATH='../fefeats/bsz16/epoch0'<line_sep>epoch=list(range(0 13))<line_sep>splits=['train' 'test' 'valid']<line_sep>MAX_WAVS_SPK={'train':100 'test':10 'valid':10}<line_sep>spk2count={}<line_sep>cfg={}<line_sep>splits=['train' 'test' 'valid']<line_sep>spk2split={}#0 spk2idx={}<line_sep>dataset=glob.glob('{}/all/*.npy'.format(DATA_PATH))<for_stmt>filename dataset<block_start>fname=os.path.basename(filename)<line_sep>bname=os.path.splitext(fname)[0]<line_sep>spk_id=bname.split('_')[0]<if_stmt>spk_id<not><in>spk2count<block_start>spk2count[spk_id]={'train':0 'test':0 'valid':0}<line_sep>spk2split[spk_id]=0<line_sep>spk2idx[spk_id]=len(spk2idx)<block_end>curr_split=spk2split[spk_id]<line_sep>curr_samples=spk2count[spk_id][splits[curr_split]]<if_stmt>curr_samples<ge>MAX_WAVS_SPK[splits[curr_split]]<block_start><if_stmt>curr_split<ge>len(splits)-1<block_start><continue><block_end>spk2split[spk_id]<augadd>1<block_end><else_stmt><block_start><if_stmt>splits[curr_split]<not><in>cfg<block_start>cfg[splits[curr_split]]={'wav_files':[] 'spk_ids':[]}<block_end>cfg[splits[curr_split]]['wav_files'].append(fname)<line_sep>cfg[splits[curr_split]]['spk_ids'].append(spk_id)<line_sep>spk2count[spk_id][splits[curr_split]]<augadd>1<block_end><block_end>cfg['spk2idx']=spk2idx<with_stmt>open('bsz16_fefeats_data.cfg' 'w')<as>cfg_f<block_start>cfg_f.write(json.dumps(cfg indent=2))<block_end>
# macro("example", lambda ctx: fetch(ctx, "site.title"))
<import_from_stmt>rdkit Chem<import_from_stmt>rdkit.Chem AllChem<import_stmt>pymatgen<as>mg<import_from_stmt>glob glob<line_sep>data={}<line_sep>data["HAHCOI"]="s1c2ccc3scc4ccc(c1)c2c34"<line_sep>data["JAPWIH"]="s1ccc2cc3sc4cc5ccsc5cc4c3cc12"<line_sep>data["WEXBOS"]="s1c(c2ccccc2)c(c2ccccc2)c2c1c(c(s2)c1ccccc1)c1ccccc1"<line_sep>data["LAGNAL"]="s1c(/C=N/[C@H](C)c2ccc(F)cc2)ccc1/C=N/[C@H](C)c1ccc(F)cc1"<line_sep>data["YICMOP"]="s1cccc1c1c(F)c(OC)c(c2sccc2)c(F)c1OC"<line_sep>data["MERQIM"]="s1c2c(c3c1SCCC3)cc1sc3SCCCc3c1c2"<line_sep>data["LUFHAW"]="CC1=CC2=C(S1)C3=CC4=C(C=C3C=C2)C5=C(C=C4)C=C(S5)C"<line_sep>#smi, cif = "s1c2ccc3scc4ccc(c1)c2c34", "HAHCOI.cif" #smi, cif = "s1ccc2cc3sc4cc5ccsc5cc4c3cc12", "JAPWIH.cif" #smi, cif = "s1c(c2ccccc2)c(c2ccccc2)c2c1c(c(s2)c1ccccc1)c1ccccc1", "WEXBOS.cif" #smi, cif = "s1c(/C=N/[C@H](C)c2ccc(F)cc2)ccc1/C=N/[C@H](C)c1ccc(F)cc1","LAGNAL.cif" #smi, cif = "s1cccc1c1c(F)c(OC)c(c2sccc2)c(F)c1OC", "YICMOP.cif" #smi, cif = "s1c2c(c3c1SCCC3)cc1sc3SCCCc3c1c2", "MERQIM.cif" #smi, cif = "CC1=CC2=C(S1)C3=CC4=C(C=C3C=C2)C5=C(C=C4)C=C(S5)C", "LUFHAW.cif" <for_stmt>file glob("*.cif")<block_start>name=file[:-4]<if_stmt>name<in>data.keys()<block_start>smi=data[name]<line_sep>m=Chem.MolFromSmiles(smi)<line_sep>m2=Chem.AddHs(m)<line_sep>AllChem.EmbedMolecule(m2)<line_sep>cids=AllChem.EmbedMultipleConfs(m2 numConfs=1)<line_sep>xyz=Chem.rdmolfiles.MolToXYZBlock(m2 0)<line_sep>mol=mg.Molecule.from_str(xyz fmt="xyz")<line_sep>mol.to(filename=name+".xyz")<block_end><block_end>
# Copyright 2009-2017 <NAME>. # This program is distributed under the MIT license. '''Defines various tools for manipulating windows.'''<import_stmt>wx<import_from_stmt>python_toolbox.freezing Freezer<class_stmt>WindowFreezer(Freezer)<block_start>'''Context manager for freezing the window while the suite executes.'''<def_stmt>__init__ self window<block_start>Freezer.__init__(self)<assert_stmt>isinstance(window wx.Window)<line_sep>self.window=window<block_end><def_stmt>freeze_handler self<block_start>self.window.Freeze()<block_end><def_stmt>thaw_handler self<block_start>self.window.Thaw()<block_end><block_end><class_stmt>FlagRaiser# todo: rename? <block_start>'''When called, raises a flag of a window and then calls some function.'''<def_stmt>__init__ self window attribute_name=<none> function=<none> delay=<none><block_start>''' Construct the flag raiser. `window` is the window we're acting on. `attribute_name` is the name of the flag that we set to True. `function` is the function we call after we set the flag. Default for `function` is `window.Refresh`. If we get a `delay` argument, then we don't call the function immediately, but wait for `delay` time, specified as seconds, then call it. If this flag raiser will be called again while the timer's on, it will not cause another function calling. '''<assert_stmt>isinstance(window wx.Window)<line_sep>self.window=window<line_sep>'''The window that the flag raiser is acting on.'''<line_sep>self.attribute_name=attribute_name<line_sep>'''The name of the flag that this flag raiser raises.'''<line_sep>self.function=function<or>window.Refresh<line_sep>'''The function that this flag raiser calls after raising the flag.'''<line_sep>self.delay=delay<line_sep>'''The delay, in seconds, that we wait before calling the function.'''<if_stmt>delay<is><not><none><block_start>self._delay_in_ms=delay<times>1000<line_sep>'''The delay in milliseconds.'''<line_sep>self.timer=cute_timer.CuteTimer(self.window)<line_sep>'''The timer we use to call the function.'''<line_sep>self.window.Bind(wx.EVT_TIMER self._on_timer self.timer)<block_end><block_end><def_stmt>__call__ self<block_start>'''Raise the flag and call the function. (With delay if we set one.)'''<if_stmt>self.attribute_name<block_start>setattr(self.window self.attribute_name <true>)<block_end><if_stmt>self.delay<is><none><block_start>self.function()<block_end><else_stmt># self.delay is a positive number <block_start><if_stmt><not>self.timer.IsRunning()<block_start>self.timer.Start(self._delay_in_ms oneShot=<true>)<block_end><block_end><block_end><def_stmt>_on_timer self event<block_start><if_stmt>getattr(self.window self.attribute_name)<is><true><block_start>self.function()<block_end><block_end><block_end>
<import_from_stmt>typing Dict List Optional Tuple<import_stmt>os<import_stmt>json<line_sep>header="| examples | master (KB) | pull request (KB) | diff (KB) | diff (%) |"<line_sep>sep="| --- | --- | --- | --- | --- |"<def_stmt>format_size size:Optional[int]<arrow>str<block_start><if_stmt>size<is><none><block_start><return>"N/A"<block_end><if_stmt>size<eq>0<block_start><return>"0"<block_end><return>f"{size/1024:.3f}"<block_end><def_stmt>format_diff_size master_size:Optional[int] pr_size:Optional[int]<arrow>Tuple[str str bool]<block_start><if_stmt>master_size<is><none><or>pr_size<is><none><block_start><return>("N/A" "N/A" <false>)<block_end>diff=pr_size-master_size<if_stmt>diff<eq>0<block_start><return>("0" "0.000%" <false>)<block_end>diff_percent=diff/master_size<line_sep><return>(f"{diff/1024:+.3f}" f"{diff_percent:+.3%}" abs(diff_percent)<ge>0.01)<block_end><def_stmt>main <arrow><none><block_start><with_stmt>open("size-cmp-info/.SIZE_CMP_INFO")<as>f<block_start>content=json.loads(f.read())<block_end>joined_sizes=content["sizes"]<line_sep>issue_number=content["issue_number"]<line_sep>lines:List[str]=[]<line_sep>significant_lines:List[str]=[]<line_sep>lines.append("### Size Comparison")<line_sep>lines.append("")<line_sep>lines.append("<details>")<line_sep>lines.append("")<line_sep>lines.append(header)<line_sep>lines.append(sep)<for_stmt>(i sizes) joined_sizes<block_start>(master_size pr_size)=sizes<line_sep>master_size_str=format_size(master_size)<line_sep>pr_size_str=format_size(pr_size)<line_sep>(diff_str diff_percent diff_significant)=format_diff_size(master_size pr_size)<line_sep>line_str=(f"| {i} | {master_size_str} | {pr_size_str} | "<concat>f"{diff_str} | {diff_percent} |")<line_sep>lines.append(line_str)<if_stmt>diff_significant<block_start>significant_lines.append(line_str)<block_end><block_end>lines.append("")<line_sep>lines.append("</details>")<line_sep>lines.append("")<if_stmt>significant_lines<block_start><if_stmt>len(significant_lines)<eq>1<block_start>lines.append("⚠️ The following example has changed its size significantly:")<block_end><else_stmt><block_start>lines.append("⚠️ The following examples have changed their size significantly:")<block_end>lines.append("")<line_sep>lines.append(header)<line_sep>lines.append(sep)<line_sep>lines.extend(significant_lines)<block_end><else_stmt><block_start>lines.append("✅ None of the examples has changed their size significantly.")<block_end>output="\n".join(lines)<with_stmt>open(os.environ["GITHUB_ENV"] "a+")<as>f<block_start>f.write(f"YEW_EXAMPLE_SIZES={json.dumps(output)}\n")<line_sep>f.write(f"PR_NUMBER={issue_number}\n")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_stmt>torch<import_from_stmt>transformers AutoTokenizer AutoModel <class_stmt>AttentionGetter<block_start>''' Wrapper Class to store model object. '''<def_stmt>__init__ self model_name:str<block_start>''' Each model has an associated tokenizer object. Load both. '''<line_sep>super().__init__()<line_sep>self.device=torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu")<line_sep>self.model=AutoModel.from_pretrained(model_name output_attentions=<true>).to(self.device)<line_sep>self.tokenizer=AutoTokenizer.from_pretrained(model_name)<block_end><def_stmt>_grab_attn self context<block_start>''' function to get the attention for a model. First runs a forward pass and then extracts and formats attn. '''<line_sep>output=self.model(context)<line_sep># Grab the attention from the output # Format as Layer x Head x From x To attn=torch.cat([l<for>l output[-1]] dim=0)<line_sep>format_attn=[[[[str(round(att<times>100))<for>att head]<for>head layer]<for>layer tok]<for>tok attn.cpu().tolist()]<line_sep><return>format_attn<block_end><def_stmt>gpt_analyze_text self text:str<block_start>""" Works for GPT-2 Style Models """<line_sep># Run tokenizer toked=self.tokenizer.encode(text)<line_sep># GPT-2 generates text after a |<endoftext>| token. Add this: start_token=torch.full((1 1) self.tokenizer.bos_token_id device=self.device dtype=torch.long )<line_sep># Concatenate the text and start token context=torch.tensor(toked device=self.device dtype=torch.long).unsqueeze(0)<line_sep>context=torch.cat([start_token context] dim=1)<line_sep># Extract attention attn=self._grab_attn(context)<line_sep># Build payload <return>{"tokens":self.tokenizer.convert_ids_to_tokens(context[0]) "attention":attn }<block_end><def_stmt>bert_analyze_text self text:str<block_start>""" Works for BERT Style models """<line_sep># Tokenize toked=self.tokenizer.encode(text)<line_sep># Build Tensor context=torch.tensor(toked).unsqueeze(0).long()<line_sep># Extract Attention attn=self._grab_attn(context)<line_sep># Build Payload <return>{"tokens":self.tokenizer.convert_ids_to_tokens(toked) "attention":attn }<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>model=AttentionGetter("gpt2")<line_sep>payload=model.gpt_analyze_text("This is a test.")<line_sep>print(payload)<line_sep>model=AttentionGetter("distilbert-base-uncased")<line_sep>payload=model.bert_analyze_text("This is a test.")<line_sep>print(payload)<line_sep>print("checking successful!")<block_end>
<import_from_stmt>timemachines.skaters.simple.thinking thinking_slow_and_slow<import_from_stmt>timemachines.skatertools.evaluation.evaluators hospital_mean_square_error_with_sporadic_fit<if_stmt>__name__<eq>'__main__'<block_start>print(hospital_mean_square_error_with_sporadic_fit(f=thinking_slow_and_slow n=120 fit_frequency=10))<block_end>
# -*- coding: utf-8 -*- # Form implementation generated from reading ui file 'mainVendas.ui', # licensing of 'mainVendas.ui' applies. # # Created: Mon Mar 18 10:46:46 2019 # by: PyQt5-uic running on PyQt5 5.12.1 # # WARNING! All changes made in this file will be lost! <import_from_stmt>PyQt5 QtCore QtGui QtWidgets<class_stmt>Ui_ct_MainVendas(object)<block_start><def_stmt>setMainVendas self ct_MainVendas<block_start>ct_MainVendas.setObjectName("ct_MainVendas")<line_sep>ct_MainVendas.resize(1000 600)<line_sep>ct_MainVendas.setStyleSheet("border:none")<line_sep>self.frameMainVendas=QtWidgets.QFrame(ct_MainVendas)<line_sep>self.frameMainVendas.setGeometry(QtCore.QRect(0 0 1000 600))<line_sep>self.frameMainVendas.setObjectName("frameMainVendas")<line_sep>self.fr_TopoMenuVendas=QtWidgets.QFrame(self.frameMainVendas)<line_sep>self.fr_TopoMenuVendas.setGeometry(QtCore.QRect(0 60 1000 40))<line_sep>self.fr_TopoMenuVendas.setStyleSheet("background:#E1DFE0;\n"<concat>"border: none;")<line_sep>self.fr_TopoMenuVendas.setObjectName("fr_TopoMenuVendas")<line_sep>self.bt_BuscaVendas=QtWidgets.QPushButton(self.fr_TopoMenuVendas)<line_sep>self.bt_BuscaVendas.setGeometry(QtCore.QRect(820 5 30 30))<line_sep>font=QtGui.QFont()<line_sep>font.setFamily("Arial")<line_sep>self.bt_BuscaVendas.setFont(font)<line_sep>self.bt_BuscaVendas.setCursor(QtCore.Qt.PointingHandCursor)<line_sep>self.bt_BuscaVendas.setFocusPolicy(QtCore.Qt.NoFocus)<line_sep>self.bt_BuscaVendas.setContextMenuPolicy(QtCore.Qt.NoContextMenu)<line_sep>self.bt_BuscaVendas.setStyleSheet("")<line_sep>self.bt_BuscaVendas.setText("")<line_sep>self.bt_BuscaVendas.setObjectName("bt_BuscaVendas")<line_sep>self.bt_AddNovoVenda=QtWidgets.QPushButton(self.fr_TopoMenuVendas)<line_sep>self.bt_AddNovoVenda.setGeometry(QtCore.QRect(900 0 100 40))<line_sep>self.bt_AddNovoVenda.setCursor(QtCore.Qt.PointingHandCursor)<line_sep>self.bt_AddNovoVenda.setFocusPolicy(QtCore.Qt.NoFocus)<line_sep>self.bt_AddNovoVenda.setContextMenuPolicy(QtCore.Qt.ActionsContextMenu)<line_sep>self.bt_AddNovoVenda.setStyleSheet("QPushButton {\n"<concat>"background-color: #7AB32E;\n"<concat>" }\n"<concat>"QPushButton:hover{\n"<concat>"background-color: #40a286\n"<concat>"}")<line_sep>self.bt_AddNovoVenda.setText("")<line_sep>self.bt_AddNovoVenda.setObjectName("bt_AddNovoVenda")<line_sep>self.tx_BuscaVendas=QtWidgets.QLineEdit(self.fr_TopoMenuVendas)<line_sep>self.tx_BuscaVendas.setGeometry(QtCore.QRect(0 5 300 30))<line_sep>font=QtGui.QFont()<line_sep>font.setFamily("Arial")<line_sep>self.tx_BuscaVendas.setFont(font)<line_sep>self.tx_BuscaVendas.setFocusPolicy(QtCore.Qt.ClickFocus)<line_sep>self.tx_BuscaVendas.setStyleSheet("QLineEdit {\n"<concat>"color: #000\n"<concat>"}\n"<concat>"")<line_sep>self.tx_BuscaVendas.setObjectName("tx_BuscaVendas")<line_sep>self.bt_PrintRelatVendas=QtWidgets.QPushButton(self.fr_TopoMenuVendas)<line_sep>self.bt_PrintRelatVendas.setGeometry(QtCore.QRect(860 5 30 30))<line_sep>font=QtGui.QFont()<line_sep>font.setFamily("Arial")<line_sep>self.bt_PrintRelatVendas.setFont(font)<line_sep>self.bt_PrintRelatVendas.setCursor(QtCore.Qt.PointingHandCursor)<line_sep>self.bt_PrintRelatVendas.setFocusPolicy(QtCore.Qt.NoFocus)<line_sep>self.bt_PrintRelatVendas.setContextMenuPolicy(QtCore.Qt.NoContextMenu)<line_sep>self.bt_PrintRelatVendas.setText("")<line_sep>self.bt_PrintRelatVendas.setObjectName("bt_PrintRelatVendas")<line_sep>self.dt_InicioVenda=QtWidgets.QDateEdit(self.fr_TopoMenuVendas)<line_sep>self.dt_InicioVenda.setGeometry(QtCore.QRect(310 16 140 20))<line_sep>self.dt_InicioVenda.setStyleSheet("QDateEdit {\n"<concat>"background: #E1DFE0;\n"<concat>"border: none;\n"<concat>"font-family: \"Arial\";\n"<concat>"font-size: 20px;\n"<concat>"font-weight: bold;\n"<concat>"color: rgb(80,79,79)\n"<concat>"}\n"<concat>" QDateEdit::drop-down {\n"<concat>" subcontrol-origin: padding;\n"<concat>" subcontrol-position: top right;\n"<concat>" width: 25px;\n"<concat>" border-left-width: 1px;\n"<concat>" border-left-color: darkgray;\n"<concat>" border-left-style: solid; /* just a single line */\n"<concat>" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"<concat>" border-bottom-right-radius: 3px;\n"<concat>" }\n"<concat>"QDateEdit::down-arrow {\n"<concat>" image: url(:Images/Images/down.png);\n"<concat>" }")<line_sep>self.dt_InicioVenda.setCalendarPopup(<true>)<line_sep>self.dt_InicioVenda.setObjectName("dt_InicioVenda")<line_sep>self.lb_FormVenda_21=QtWidgets.QLabel(self.fr_TopoMenuVendas)<line_sep>self.lb_FormVenda_21.setGeometry(QtCore.QRect(310 2 120 16))<line_sep>self.lb_FormVenda_21.setStyleSheet("QLabel{\n"<concat>"font-size: 12px;\n"<concat>"font-family: \"Arial Unicode MS\";\n"<concat>"\n"<concat>"color:#1E87F0;\n"<concat>"border: none;\n"<concat>"}")<line_sep>self.lb_FormVenda_21.setObjectName("lb_FormVenda_21")<line_sep>self.lb_FormVenda_22=QtWidgets.QLabel(self.fr_TopoMenuVendas)<line_sep>self.lb_FormVenda_22.setGeometry(QtCore.QRect(460 2 120 16))<line_sep>self.lb_FormVenda_22.setStyleSheet("QLabel{\n"<concat>"font-size: 12px;\n"<concat>"font-family: \"Arial Unicode MS\";\n"<concat>"\n"<concat>"color:#1E87F0;\n"<concat>"border: none;\n"<concat>"}")<line_sep>self.lb_FormVenda_22.setObjectName("lb_FormVenda_22")<line_sep>self.dt_FimVenda=QtWidgets.QDateEdit(self.fr_TopoMenuVendas)<line_sep>self.dt_FimVenda.setGeometry(QtCore.QRect(460 16 140 20))<line_sep>self.dt_FimVenda.setStyleSheet("QDateEdit {\n"<concat>"background: #E1DFE0;\n"<concat>"border: none;\n"<concat>"font-family: \"Arial\";\n"<concat>"font-size: 20px;\n"<concat>"font-weight: bold;\n"<concat>"color: rgb(80,79,79)\n"<concat>"}\n"<concat>" QDateEdit::drop-down {\n"<concat>" subcontrol-origin: padding;\n"<concat>" subcontrol-position: top right;\n"<concat>" width: 25px;\n"<concat>" border-left-width: 1px;\n"<concat>" border-left-color: darkgray;\n"<concat>" border-left-style: solid; /* just a single line */\n"<concat>" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"<concat>" border-bottom-right-radius: 3px;\n"<concat>" }\n"<concat>"QDateEdit::down-arrow {\n"<concat>" image: url(:Images/Images/down.png);\n"<concat>" }")<line_sep>self.dt_FimVenda.setCalendarPopup(<true>)<line_sep>self.dt_FimVenda.setObjectName("dt_FimVenda")<line_sep>self.lb_FormVenda_29=QtWidgets.QLabel(self.fr_TopoMenuVendas)<line_sep>self.lb_FormVenda_29.setGeometry(QtCore.QRect(610 2 95 16))<line_sep>self.lb_FormVenda_29.setStyleSheet("QLabel{\n"<concat>"font-size: 12px;\n"<concat>"font-family: \"Arial Unicode MS\";\n"<concat>"\n"<concat>"color:#1E87F0;\n"<concat>"border: none;\n"<concat>"}")<line_sep>self.lb_FormVenda_29.setObjectName("lb_FormVenda_29")<line_sep>self.cb_pagamento=QtWidgets.QComboBox(self.fr_TopoMenuVendas)<line_sep>self.cb_pagamento.setGeometry(QtCore.QRect(610 16 95 20))<line_sep>self.cb_pagamento.setStyleSheet("QComboBox{\n"<concat>"background: #E1DFE0;\n"<concat>"border: none;\n"<concat>"font-family: \"Arial\";\n"<concat>"font-size: 11px;\n"<concat>"font-weight: bold;\n"<concat>"color: rgb(80,79,79)\n"<concat>"}\n"<concat>" QComboBox::drop-down {\n"<concat>" subcontrol-origin: padding;\n"<concat>" subcontrol-position: top right;\n"<concat>" width: 18px;\n"<concat>" border-left-width: 1px;\n"<concat>" border-left-color: darkgray;\n"<concat>" border-left-style: solid; /* just a single line */\n"<concat>" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"<concat>" border-bottom-right-radius: 3px;\n"<concat>" }\n"<concat>"QComboBox::down-arrow {\n"<concat>" image: url(:Images/Images/down.png);\n"<concat>" }\n"<concat>"")<line_sep>self.cb_pagamento.setObjectName("cb_pagamento")<line_sep>self.lb_FormVenda_30=QtWidgets.QLabel(self.fr_TopoMenuVendas)<line_sep>self.lb_FormVenda_30.setGeometry(QtCore.QRect(715 0 95 16))<line_sep>self.lb_FormVenda_30.setStyleSheet("QLabel{\n"<concat>"font-size: 12px;\n"<concat>"font-family: \"Arial Unicode MS\";\n"<concat>"\n"<concat>"color:#1E87F0;\n"<concat>"border: none;\n"<concat>"}")<line_sep>self.lb_FormVenda_30.setObjectName("lb_FormVenda_30")<line_sep>self.cb_entrega=QtWidgets.QComboBox(self.fr_TopoMenuVendas)<line_sep>self.cb_entrega.setGeometry(QtCore.QRect(715 14 95 20))<line_sep>self.cb_entrega.setStyleSheet("QComboBox{\n"<concat>"background: #E1DFE0;\n"<concat>"border: none;\n"<concat>"font-family: \"Arial\";\n"<concat>"font-size: 11px;\n"<concat>"font-weight: bold;\n"<concat>"color: rgb(80,79,79)\n"<concat>"}\n"<concat>" QComboBox::drop-down {\n"<concat>" subcontrol-origin: padding;\n"<concat>" subcontrol-position: top right;\n"<concat>" width: 18px;\n"<concat>" border-left-width: 1px;\n"<concat>" border-left-color: darkgray;\n"<concat>" border-left-style: solid; /* just a single line */\n"<concat>" border-top-right-radius: 3px; /* same radius as the QComboBox */\n"<concat>" border-bottom-right-radius: 3px;\n"<concat>" }\n"<concat>"QComboBox::down-arrow {\n"<concat>" image: url(:Images/Images/down.png);\n"<concat>" }\n"<concat>"")<line_sep>self.cb_entrega.setObjectName("cb_entrega")<line_sep>self.ct_containerVendas=QtWidgets.QFrame(self.frameMainVendas)<line_sep>self.ct_containerVendas.setGeometry(QtCore.QRect(0 100 1000 500))<line_sep>self.ct_containerVendas.setStyleSheet("border: none")<line_sep>self.ct_containerVendas.setObjectName("ct_containerVendas")<line_sep>self.tb_Vendas=QtWidgets.QTableWidget(self.ct_containerVendas)<line_sep>self.tb_Vendas.setGeometry(QtCore.QRect(0 0 1000 500))<line_sep>self.tb_Vendas.setProperty("cursor" QtCore.Qt.PointingHandCursor)<line_sep>self.tb_Vendas.setFocusPolicy(QtCore.Qt.WheelFocus)<line_sep>self.tb_Vendas.setStyleSheet("QTableView{\n"<concat>"color: #797979;\n"<concat>"font-weight: bold;\n"<concat>"font-size: 13px;\n"<concat>"background: #FFF;\n"<concat>"padding: 0 0 0 5px;\n"<concat>"}\n"<concat>"QHeaderView:section{\n"<concat>"background: #FFF;\n"<concat>"padding: 5px 0 ;\n"<concat>"font-size: 13px;\n"<concat>"font-family: \"Arial\";\n"<concat>"font-weight: bold;\n"<concat>"color: #797979;\n"<concat>"border: none;\n"<concat>"border-bottom: 2px solid #CCC;\n"<concat>"}\n"<concat>"QTableView::item {\n"<concat>"border-bottom: 2px solid #CCC;\n"<concat>"padding: 2px;\n"<concat>"}\n"<concat>"\n"<concat>"")<line_sep>self.tb_Vendas.setFrameShape(QtWidgets.QFrame.NoFrame)<line_sep>self.tb_Vendas.setFrameShadow(QtWidgets.QFrame.Plain)<line_sep>self.tb_Vendas.setAutoScrollMargin(20)<line_sep>self.tb_Vendas.setEditTriggers(QtWidgets.QAbstractItemView.NoEditTriggers)<line_sep>self.tb_Vendas.setTabKeyNavigation(<false>)<line_sep>self.tb_Vendas.setProperty("showDropIndicator" <false>)<line_sep>self.tb_Vendas.setDragDropOverwriteMode(<false>)<line_sep>self.tb_Vendas.setSelectionMode(QtWidgets.QAbstractItemView.NoSelection)<line_sep>self.tb_Vendas.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)<line_sep>self.tb_Vendas.setTextElideMode(QtCore.Qt.ElideMiddle)<line_sep>self.tb_Vendas.setShowGrid(<false>)<line_sep>self.tb_Vendas.setCornerButtonEnabled(<false>)<line_sep>self.tb_Vendas.setRowCount(0)<line_sep>self.tb_Vendas.setObjectName("tb_Vendas")<line_sep>self.tb_Vendas.setColumnCount(7)<line_sep>self.tb_Vendas.setRowCount(0)<line_sep>item=QtWidgets.QTableWidgetItem()<line_sep>self.tb_Vendas.setHorizontalHeaderItem(0 item)<line_sep>item=QtWidgets.QTableWidgetItem()<line_sep>self.tb_Vendas.setHorizontalHeaderItem(1 item)<line_sep>item=QtWidgets.QTableWidgetItem()<line_sep>self.tb_Vendas.setHorizontalHeaderItem(2 item)<line_sep>item=QtWidgets.QTableWidgetItem()<line_sep>self.tb_Vendas.setHorizontalHeaderItem(3 item)<line_sep>item=QtWidgets.QTableWidgetItem()<line_sep>self.tb_Vendas.setHorizontalHeaderItem(4 item)<line_sep>item=QtWidgets.QTableWidgetItem()<line_sep>self.tb_Vendas.setHorizontalHeaderItem(5 item)<line_sep>item=QtWidgets.QTableWidgetItem()<line_sep>self.tb_Vendas.setHorizontalHeaderItem(6 item)<line_sep>self.tb_Vendas.horizontalHeader().setDefaultSectionSize(120)<line_sep>self.tb_Vendas.horizontalHeader().setStretchLastSection(<true>)<line_sep>self.tb_Vendas.verticalHeader().setVisible(<false>)<line_sep>self.tb_Vendas.verticalHeader().setCascadingSectionResizes(<true>)<line_sep>self.tb_Vendas.verticalHeader().setDefaultSectionSize(50)<line_sep>self.fr_TituloVendas=QtWidgets.QFrame(self.frameMainVendas)<line_sep>self.fr_TituloVendas.setGeometry(QtCore.QRect(0 0 1000 60))<line_sep>self.fr_TituloVendas.setStyleSheet("border: none")<line_sep>self.fr_TituloVendas.setObjectName("fr_TituloVendas")<line_sep>self.lb_tituloVendas=QtWidgets.QLabel(self.fr_TituloVendas)<line_sep>self.lb_tituloVendas.setGeometry(QtCore.QRect(10 15 200 30))<line_sep>font=QtGui.QFont()<line_sep>font.setFamily("DejaVu Sans")<line_sep>font.setPointSize(18)<line_sep>font.setWeight(75)<line_sep>font.setBold(<true>)<line_sep>self.lb_tituloVendas.setFont(font)<line_sep>self.lb_tituloVendas.setStyleSheet("color: #FFF")<line_sep>self.lb_tituloVendas.setObjectName("lb_tituloVendas")<line_sep>self.tradMainVendas(ct_MainVendas)<line_sep>QtCore.QMetaObject.connectSlotsByName(ct_MainVendas)<block_end><def_stmt>tradMainVendas self ct_MainVendas<block_start>ct_MainVendas.setWindowTitle(QtWidgets.QApplication.translate("ct_MainVendas" "Frame" <none> -1))<line_sep>self.bt_BuscaVendas.setToolTip(QtWidgets.QApplication.translate("ct_MainVendas" "BUSCAR" <none> -1))<line_sep>self.tx_BuscaVendas.setPlaceholderText(QtWidgets.QApplication.translate("ct_MainVendas" "PROCURAR POR..." <none> -1))<line_sep>self.bt_PrintRelatVendas.setToolTip(QtWidgets.QApplication.translate("ct_MainVendas" "IMPRIMIR" <none> -1))<line_sep>self.dt_InicioVenda.setDisplayFormat(QtWidgets.QApplication.translate("ct_MainVendas" "dd/MM/yyyy" <none> -1))<line_sep>self.lb_FormVenda_21.setText(QtWidgets.QApplication.translate("ct_MainVendas" "DATA ÍNICIO" <none> -1))<line_sep>self.lb_FormVenda_22.setText(QtWidgets.QApplication.translate("ct_MainVendas" "DATA FIM" <none> -1))<line_sep>self.dt_FimVenda.setDisplayFormat(QtWidgets.QApplication.translate("ct_MainVendas" "dd/MM/yyyy" <none> -1))<line_sep>self.lb_FormVenda_29.setText(QtWidgets.QApplication.translate("ct_MainVendas" "PAGAMENTO" <none> -1))<line_sep>self.lb_FormVenda_30.setText(QtWidgets.QApplication.translate("ct_MainVendas" "ENTREGA" <none> -1))<line_sep>self.tb_Vendas.horizontalHeaderItem(0).setText(QtWidgets.QApplication.translate("ct_MainVendas" "ID" <none> -1))<line_sep>self.tb_Vendas.horizontalHeaderItem(2).setText(QtWidgets.QApplication.translate("ct_MainVendas" "CLIENTE" <none> -1))<line_sep>self.tb_Vendas.horizontalHeaderItem(3).setText(QtWidgets.QApplication.translate("ct_MainVendas" "EMISSÂO" <none> -1))<line_sep>self.tb_Vendas.horizontalHeaderItem(4).setText(QtWidgets.QApplication.translate("ct_MainVendas" "ENTREGA" <none> -1))<line_sep>self.tb_Vendas.horizontalHeaderItem(5).setText(QtWidgets.QApplication.translate("ct_MainVendas" "VALOR" <none> -1))<line_sep>self.tb_Vendas.horizontalHeaderItem(6).setText(QtWidgets.QApplication.translate("ct_MainVendas" "EDITAR" <none> -1))<line_sep>self.lb_tituloVendas.setText(QtWidgets.QApplication.translate("ct_MainVendas" "VENDAS" <none> -1))<block_end><block_end>
<import_stmt>faker<import_from_stmt>test.test_project TestProject<import_from_stmt>test.test_scaffold TestScaffold<import_from_stmt>test.test_command_line_interface TestCommandLineInterface<import_from_stmt>test.test_environment_fetcher TestEnvironmentFetcher<import_from_stmt>test.test_config_listing TestConfigListing<import_from_stmt>test.job_submission *<line_sep>
"""Unit tests for multi-tower model."""<import_from_future_stmt> absolute_import division print_function unicode_literals <import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>resnet.configs get_config<import_from_stmt>resnet.configs test_configs<import_from_stmt>resnet.models get_model get_multi_gpu_model<import_from_stmt>resnet.models.multi_pass_optimizer MultiPassOptimizer<import_from_stmt>resnet.utils logger<import_from_stmt>resnet.utils.test_utils check_two_dict<line_sep>log=logger.get()<class_stmt>MultiPassOptimizerTests(tf.test.TestCase)<block_start><def_stmt>test_basic self<block_start>"""Tests multi pass optimizer basic behaviour."""<for_stmt>aggregate_method ["cumsum" "storage"]<block_start><with_stmt>tf.Graph().as_default() tf.Session()<as>sess log.verbose_level(2)<block_start>opt=tf.train.GradientDescentOptimizer(0.1)<line_sep>mp_opt=MultiPassOptimizer(opt 2 aggregate_method=aggregate_method)<line_sep>a=tf.get_variable("a" shape=[10 12] initializer=tf.constant_initializer(0.0))<line_sep>b=tf.get_variable("b" shape=[11 13] initializer=tf.constant_initializer(0.0))<line_sep>da1=tf.ones([10 12])<times>0.4<line_sep>da2=tf.ones([10 12])<times>0.6<line_sep>db1=tf.ones([11 13])<times>0.8<line_sep>db2=tf.ones([11 13])<times>1.0<line_sep>gv1=[(da1 a) (db1 b)]<line_sep>gv2=[(da2 a) (db2 b)]<line_sep>op1=mp_opt.apply_gradients(gv1)<line_sep>op2=mp_opt.apply_gradients(gv2)<line_sep>sess.run(tf.global_variables_initializer())<line_sep>sess.run([op1])<line_sep>sess.run([op2])<line_sep>a,b=sess.run([a b])<line_sep># Final value equals -learning_rate * average_gradients. np.testing.assert_allclose(a -np.ones([10 12])<times>0.05)<line_sep>np.testing.assert_allclose(b -np.ones([11 13])<times>0.09)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
# Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. <import_from_stmt>.extracted_features_dataset ExtractedFeaturesDataset<import_from_stmt>.random_input_dataset RandomInputDataset<line_sep>__all__=["ExtractedFeaturesDataset" "RandomInputDataset" ]<line_sep>
""" Test Boolean Function arrays """<import_from_stmt>nose.tools assert_raises<import_from_stmt>pyeda.boolalg.bdd bddvar BinaryDecisionDiagram<import_from_stmt>pyeda.boolalg.bfarray exprzeros exprvars fcat farray uint2exprs int2exprs <import_from_stmt>pyeda.boolalg.expr exprvar Expression<line_sep>X=exprvars('x' 4)<line_sep>Y=exprvars('y' 4)<line_sep>a,b,c,d,w,x,y,z=map(exprvar 'abcdwxyz')<def_stmt>test_fcat # expected Function or farray <block_start>assert_raises(TypeError fcat X Y 0)<assert_stmt>str(fcat(X[0] X[2:] Y[3] Y[:-2]))<eq>"farray([x[0], x[2], x[3], y[3], y[0], y[1]])"<block_end><def_stmt>test_farray # expected shape volume to match items <block_start>assert_raises(ValueError farray [X[0] X[1]] shape=((0 42) ))<line_sep># could not determine ftype parameter assert_raises(ValueError farray [])<line_sep># expected ftype to be a type assert_raises(TypeError farray [X[0] X[1]] ftype=42)<line_sep># expected ftype to match items assert_raises(ValueError farray [X[0] X[1]] ftype=BinaryDecisionDiagram)<line_sep># expected ftype to be a property subclass of Function assert_raises(TypeError farray [] ftype=int)<line_sep># expected a sequence of Function assert_raises(TypeError farray 42)<line_sep>assert_raises(TypeError farray [1 2 3 4])<line_sep># expected uniform dimensions assert_raises(ValueError farray [[a b] [w x y z] 42])<line_sep>assert_raises(ValueError farray [[a b] [w x y z]])<line_sep># expected uniform types assert_raises(ValueError farray [[a b] [c bddvar('d')]])<line_sep>assert_raises(ValueError farray [[a b] [bddvar('c') bddvar('d')]])<line_sep># _check_shape errors assert_raises(ValueError farray [a b c d] shape=((-1 3) ))<line_sep>assert_raises(ValueError farray [a b c d] shape=((3 -1) ))<line_sep>assert_raises(ValueError farray [a b c d] shape=((5 1) ))<line_sep>assert_raises(TypeError farray [a b c d] shape=(('foo' 'bar') ))<line_sep>assert_raises(TypeError farray [a b c d] shape=42)<line_sep>temp=farray([[a b] [c d]])<assert_stmt>str(temp)<eq>"""\ farray([[a, b], [c, d]])\ """<line_sep># __str__ Z=exprvars('z' 2 2 2)<assert_stmt>str(Z)<eq>"""\ farray([[[z[0,0,0], z[0,0,1]], [z[0,1,0], z[0,1,1]]], [[z[1,0,0], z[1,0,1]], [z[1,1,0], z[1,1,1]]]])\ """<assert_stmt>str(farray([] ftype=Expression))<eq>"farray([])"<line_sep># __getitem__ # expected <= M slice dimensions, got N assert_raises(ValueError X.__getitem__ (2 2))<line_sep>sel=exprvars('s' 2)<assert_stmt>X[sel].equivalent(~sel[0]&~sel[1]&X[0]|sel[0]&~sel[1]&X[1]|~sel[0]&sel[1]&X[2]|sel[0]&sel[1]&X[3])<assert_stmt>X[:2][sel[0]].equivalent(~sel[0]&X[0]|sel[0]&X[1])<line_sep># expected clog2(N) bits assert_raises(ValueError X.__getitem__ sel[0])<line_sep># slice step not supported assert_raises(ValueError X.__getitem__ slice(<none> <none> 2))<line_sep># type error assert_raises(TypeError X.__getitem__ 'foo')<line_sep># norm_index <assert_stmt>X[-1]<is>X[3]<line_sep>assert_raises(IndexError X.__getitem__ 42)<line_sep># norm_indices <assert_stmt>X[-3:-1]._items<eq>[X[-3] X[-2]]<assert_stmt><not>X[-8:-10]._items<assert_stmt><not>X[-10:-8]._items<assert_stmt><not>X[8:10]._items<assert_stmt><not>X[10:8]._items<assert_stmt><not>X[3:1]._items<line_sep># __setitem__ Z=exprzeros(4 4)<line_sep>Z[0 0]=X[0]<assert_stmt>Z._items[0]<is>X[0]<line_sep># expected item to be a Function assert_raises(TypeError Z.__setitem__ (0 0) 42)<line_sep>Z[0 :]=X[:4]<assert_stmt>Z._items[0:4]<eq>[X[0] X[1] X[2] X[3]]<line_sep># expected item to be an farray assert_raises(TypeError Z.__setitem__ (0 slice(<none> <none> <none>)) 42)<line_sep># expected item.size = ... assert_raises(ValueError Z.__setitem__ <ellipsis> X[:2])<line_sep># slice step not supported assert_raises(ValueError X.__setitem__ slice(<none> <none> 2) 42)<line_sep># type error assert_raises(TypeError X.__setitem__ 'foo' 42)<line_sep># __add__ <assert_stmt>(0+X)._items[0].is_zero()<assert_stmt>(X+0)._items[4].is_zero()<assert_stmt>(Y[0]+X)._items[0]<is>Y[0]<assert_stmt>(X+Y[0])._items[4]<is>Y[0]<assert_stmt>(X[:2]+Y[2:])._items<eq>[X[0] X[1] Y[2] Y[3]]<line_sep># expected Function or farray assert_raises(TypeError X.__add__ 42)<line_sep>assert_raises(TypeError X.__radd__ 42)<line_sep>A=exprvars('a' 2 5 6)<line_sep>B=exprvars('b' 2 5 6)<line_sep>C=exprvars('c' (1 3) 5 6)<line_sep># regular MDA will retain shape <assert_stmt>(A+B).shape<eq>((0 4) (0 5) (0 6))<line_sep># irregular MDA will not <assert_stmt>(A+C).shape<eq>((0 4<times>5<times>6) )<line_sep># regular MDA will retain shape <assert_stmt>(A<times>2).shape<eq>((0 4) (0 5) (0 6))<line_sep># irregular MDA will not <assert_stmt>(C<times>2).shape<eq>((0 4<times>5<times>6) )<line_sep># __mul__ # expected multiplier to be an int assert_raises(TypeError X.__mul__ 'foo')<line_sep># expected multiplier to be non-negative assert_raises(ValueError X.__mul__ -2)<assert_stmt>(X[:2]<times>2)._items<eq>[X[0] X[1] X[0] X[1]]<assert_stmt>(2<times>X[:2])._items<eq>[X[0] X[1] X[0] X[1]]<line_sep># offsets Z=exprzeros((1 5) (17 21))<assert_stmt>Z.offsets<eq>(1 17)<line_sep># reshape <assert_stmt>Z.reshape(4 4).shape<eq>((0 4) (0 4))<line_sep># expected shape with equal volume assert_raises(ValueError Z.reshape 42 42)<line_sep># restrict <assert_stmt>str(X.vrestrict({X:'0101'}))<eq>"farray([0, 1, 0, 1])"<line_sep># compose <assert_stmt>X.compose({X[0]:Y[0]})._items[0]<eq>Y[0]<line_sep># to_uint / to_int <assert_stmt>uint2exprs(42).to_uint()<eq>42<assert_stmt>uint2exprs(42 8).to_uint()<eq>42<line_sep># expected all functions to be a constant (0 or 1) form assert_raises(ValueError X.to_uint)<line_sep># expected num >= 0 assert_raises(ValueError uint2exprs -1)<line_sep># overflow assert_raises(ValueError uint2exprs 42 2)<line_sep>assert_raises(ValueError int2exprs 42 2)<assert_stmt>int2exprs(-42).to_int()<eq>-42<assert_stmt>int2exprs(-42 8).to_int()<eq>-42<assert_stmt>int2exprs(42).to_int()<eq>42<assert_stmt>int2exprs(42 8).to_int()<eq>42<line_sep># zext, sext <assert_stmt>X.zext(1)[4].is_zero()<assert_stmt>X.sext(1)[4]<is>X[3]<line_sep># __invert__, __or__, __and__, __xor__ <assert_stmt>str(~X)<eq>"farray([~x[0], ~x[1], ~x[2], ~x[3]])"<assert_stmt>str(X|Y)<eq>"farray([Or(x[0], y[0]), Or(x[1], y[1]), Or(x[2], y[2]), Or(x[3], y[3])])"<assert_stmt>str(X&Y)<eq>"farray([And(x[0], y[0]), And(x[1], y[1]), And(x[2], y[2]), And(x[3], y[3])])"<assert_stmt>str(X^Y)<eq>"farray([Xor(x[0], y[0]), Xor(x[1], y[1]), Xor(x[2], y[2]), Xor(x[3], y[3])])"<line_sep># _op_shape # expected farray input assert_raises(TypeError X.__or__ 42)<line_sep>Z=exprvars('z' 2 2)<assert_stmt>str(X|Z)<eq>"farray([Or(x[0], z[0,0]), Or(x[1], z[0,1]), Or(x[2], z[1,0]), Or(x[3], z[1,1])])"<line_sep>Z=exprvars('z' 2 3)<line_sep># expected operand sizes to match assert_raises(ValueError X.__or__ Z)<line_sep># lsh, rsh <assert_stmt>str(X.lsh(0))<eq>"(farray([x[0], x[1], x[2], x[3]]), farray([]))"<assert_stmt>str(X<lshift>0)<eq>"farray([x[0], x[1], x[2], x[3]])"<assert_stmt>str(X.lsh(2))<eq>"(farray([0, 0, x[0], x[1]]), farray([x[2], x[3]]))"<assert_stmt>str(X<lshift>2)<eq>"farray([0, 0, x[0], x[1]])"<assert_stmt>str(X<lshift>(2 Y[:2]))<eq>"farray([y[0], y[1], x[0], x[1]])"<assert_stmt>str(X.rsh(0))<eq>"(farray([x[0], x[1], x[2], x[3]]), farray([]))"<assert_stmt>str(X<rshift>0)<eq>"farray([x[0], x[1], x[2], x[3]])"<assert_stmt>str(X.rsh(2))<eq>"(farray([x[2], x[3], 0, 0]), farray([x[0], x[1]]))"<assert_stmt>str(X<rshift>2)<eq>"farray([x[2], x[3], 0, 0])"<assert_stmt>str(X<rshift>(2 Y[:2]))<eq>"farray([x[2], x[3], y[0], y[1]])"<line_sep>assert_raises(TypeError X.__lshift__ 'foo')<line_sep>assert_raises(ValueError X.__lshift__ -1)<line_sep>assert_raises(ValueError X.__lshift__ (2 Y))<line_sep>assert_raises(TypeError X.__rshift__ 'foo')<line_sep>assert_raises(ValueError X.__rshift__ -1)<line_sep>assert_raises(ValueError X.__rshift__ (2 Y))<line_sep># arsh <assert_stmt>str(X.arsh(0))<eq>"(farray([x[0], x[1], x[2], x[3]]), farray([]))"<assert_stmt>str(X.arsh(2))<eq>"(farray([x[2], x[3], x[3], x[3]]), farray([x[0], x[1]]))"<line_sep>assert_raises(ValueError X.arsh -1)<line_sep># unary ops <assert_stmt>X.uor().equivalent(X[0]|X[1]|X[2]|X[3])<assert_stmt>X.unor().equivalent(~(X[0]|X[1]|X[2]|X[3]))<assert_stmt>X.uand().equivalent(X[0]&X[1]&X[2]&X[3])<assert_stmt>X.unand().equivalent(~(X[0]&X[1]&X[2]&X[3]))<assert_stmt>X.uxor().equivalent(X[0]^X[1]^X[2]^X[3])<assert_stmt>X.uxnor().equivalent(~(X[0]^X[1]^X[2]^X[3]))<line_sep># decode <assert_stmt>str(farray([] ftype=Expression).decode())<eq>"farray([1])"<line_sep>parts=X[:2].decode()<assert_stmt>parts[0].equivalent(~X[0]&~X[1])<assert_stmt>parts[1].equivalent(X[0]&~X[1])<assert_stmt>parts[2].equivalent(~X[0]&X[1])<assert_stmt>parts[3].equivalent(X[0]&X[1])<block_end><def_stmt>test_dims2shape <block_start>assert_raises(ValueError exprzeros)<line_sep>assert_raises(ValueError exprzeros -1)<line_sep>assert_raises(ValueError exprzeros (-1 0))<line_sep>assert_raises(ValueError exprzeros (0 -1))<line_sep>assert_raises(ValueError exprzeros (1 0))<line_sep>assert_raises(TypeError exprzeros 'foo')<block_end>
# -*- coding: utf-8 -*- <import_from_stmt>os path<import_from_stmt>wsgiref.util FileWrapper<import_from_stmt>django.http Http404<import_from_stmt>django.http HttpResponse<import_from_stmt>django.http HttpResponseRedirect<import_from_stmt>koalixcrm.crm.exceptions *<import_from_stmt>koalixcrm.djangoUserExtension.exceptions *<import_from_stmt>django.utils.translation ugettext<as>_<def_stmt>export_pdf calling_model_admin request whereToCreateFrom whatToCreate redirectTo<block_start>"""This method exports PDFs provided by different Models in the accounting application Args: calling_model_admin (ModelAdmin): The calling ModelAdmin must be provided for error message response. request: The request User is required to get the Calling User TemplateSets and to know where to save the error message whereToCreateFrom (Model): The model from which a PDF should be exported whatToCreate (str): What document Type that has to be created redirectTo (str): String that describes to where the method sould redirect in case of an error Returns: HTTpResponse with a PDF when successful HTTpResponseRedirect when not successful Raises: Http404 exception if anything goes wrong"""<try_stmt><block_start>pdf=whereToCreateFrom.createPDF(request.user whatToCreate)<line_sep>response=HttpResponse(FileWrapper(open(pdf 'rb')) content_type='application/pdf')<line_sep>response['Content-Length']=path.getsize(pdf)<block_end><except_stmt>(TemplateSetMissing UserExtensionMissing)<as>e<block_start><if_stmt>e.isinstance(UserExtensionMissing)<block_start>response=HttpResponseRedirect(redirectTo)<line_sep>calling_model_admin.message_user(request _("User Extension Missing"))<block_end><elif_stmt>e.isinstance(TemplateSetMissing)<block_start>response=HttpResponseRedirect(redirectTo)<line_sep>calling_model_admin.message_user(request _("Templateset Missing"))<block_end><else_stmt><block_start><raise>Http404<block_end><block_end><return>response<block_end><def_stmt>export_xml callingModelAdmin request whereToCreateFrom whatToCreate redirectTo<block_start>"""This method exports XMLs provided by different Models in the accounting application Args: callingModelAdmin (ModelAdmin): The calling ModelAdmin must be provided for error message response. request: The request User is required to get the Calling User TemplateSets and to know where to save the error message hereToCreateFrom (Model): The model from which a PDF should be exported whatToCreate (str): What objects that have to be serialized redirectTo (str): String that describes to where the method sould redirect in case of an error Returns: HTTpResponse with a PDF when successful HTTpResponseRedirect when not successful Raises: raises Http404 exception if anything goes wrong"""<try_stmt><block_start>xml=whereToCreateFrom.createXML(request.user whatToCreate)<line_sep>response=HttpResponse(FileWrapper(open(xml 'rb')) mimetype='application/xml')<line_sep>response['Content-Length']=path.getsize(xml)<block_end><except_stmt>(TemplateSetMissing UserExtensionMissing)<as>e<block_start><if_stmt>e.isinstance(UserExtensionMissing)<block_start>response=HttpResponseRedirect(redirectTo)<line_sep>callingModelAdmin.message_user(request _("User Extension Missing"))<block_end><elif_stmt>e.isinstance(TemplateSetMissing)<block_start>response=HttpResponseRedirect(redirectTo)<line_sep>callingModelAdmin.message_user(request _("Templateset Missing"))<block_end><else_stmt><block_start><raise>Http404<block_end><block_end><return>response<block_end>
<import_stmt>pook<import_stmt>urllib3<line_sep># Mock HTTP traffic only in the given context <with_stmt>pook.use()<block_start>(pook.get('httpbin.org/chunky').reply(200).body(['returned' 'as' 'chunks'] chunked=<true>))<line_sep># Intercept request http=urllib3.PoolManager()<line_sep>r=http.request('GET' 'httpbin.org/chunky')<line_sep>print('Chunks:' list(r.read_chunked()))<block_end>
<import_from_stmt>pathlib Path<import_stmt>logging<import_stmt>argparse<import_stmt>json<import_stmt>papermill<as>pm<def_stmt>run_all_notebooks args<block_start>"""Run all notebooks in the example directory."""<for_stmt>notebook Path(__file__).parent.parent.glob("examples/*.ipynb")<block_start>notebook_path=str(notebook.resolve())<if_stmt>len(args.notebook_name)<g>0<block_start><if_stmt><not>any([x<in>notebook_path<for>x args.notebook_name])<block_start>logging.info(f"Skipping: {notebook_path}")<line_sep><continue><block_end><block_end>nb=pm.execute_notebook(notebook_path notebook_path request_save_on_cell_execute=<true> kernel_name="python3" )<try_stmt><block_start>nb["metadata"]["kernelspec"]["display_name"]="Python 3"<line_sep>nb["metadata"]["kernelspec"]["name"]="python3"<block_end><except_stmt>KeyError<block_start><pass><block_end><with_stmt>open(notebook "w")<as>fp<block_start>json.dump(nb fp)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>logging.basicConfig(level=logging.INFO)<line_sep>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--notebook-name" action="append")<line_sep>args,_=parser.parse_known_args()<line_sep>run_all_notebooks(args)<block_end>
<import_stmt>timeit<import_stmt>pandas<as>pd<import_stmt>matplotlib.pyplot<import_from_stmt>sklearn.linear_model base<import_from_stmt>sklearn.linear_model LinearRegression<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>line_profiler LineProfiler<import_stmt>numpy<as>np<import_from_stmt>utility ols_lstsq ols_sklearn<line_sep># We learn that #https://github.com/scikit-learn/scikit-learn/blob/1495f6924/sklearn/linear_model/base.py#L438 # LinearRegression.fit is expensive because # of calls to check_X_y, _preprocess_data and linalg.lstsq # https://github.com/scikit-learn/scikit-learn/blob/1495f6924/sklearn/linear_model/base.py#L101 # _preprocess_data # has 3 expensive lines - check_array, np.asarray, np.average #https://github.com/scikit-learn/scikit-learn/blob/1495f69242646d239d89a5713982946b8ffcf9d9/sklearn/utils/validation.py#L600 # check_X_y # checks for array for certain characteristics and lengths # df=pd.read_pickle('generated_ols_data.pickle')<line_sep>print(f"Loaded {df.shape} rows")<line_sep>est=LinearRegression()<line_sep>row=df.iloc[0]<line_sep>X=np.arange(row.shape[0]).reshape(-1 1).astype(np.float_)<line_sep>lp=LineProfiler(est.fit)<line_sep>print("Run on a single row")<line_sep>lp.run("est.fit(X, row.values)")<line_sep>lp.print_stats()<line_sep>print("Run on 5000 rows")<line_sep>lp.run("df[:5000].apply(ols_sklearn, axis=1)")<line_sep>lp.print_stats()<line_sep>lp=LineProfiler(base._preprocess_data)<line_sep>lp.run("base._preprocess_data(X, row, fit_intercept=True)")<line_sep>lp.print_stats()<line_sep>lp=LineProfiler(base.check_X_y)<line_sep>lp.run("base.check_X_y(X, y, accept_sparse=['csr', 'csc', 'coo'], y_numeric=True, multi_output=True)")<line_sep>lp.print_stats()<line_sep>#%lprun -f est_diagnosis.fit est_diagnosis.fit(np.arange(rowx.shape[0]).reshape(-1, 1), rowx.values) #lp.run("est_diagnosis.fit(np.arange(rowx.shape[0]).reshape(-1, 1).astype(np.float_), y.values)") #lp.run("base._preprocess_data(np.arange(rowx.shape[0]).reshape(-1, 1).astype(np.float_), rowx, fit_intercept=True)")
<import_from_stmt>pyBN.inference.map_exact *<import_from_stmt>pyBN.inference.marginal_approx *<import_from_stmt>pyBN.inference.marginal_exact *<line_sep>
<import_stmt>demistomock<as>demisto<import_from_stmt>Cymulate cymulate_test fetch_incidents cymulate_get_incident_info Client CymulateModuleTypeEnum<line_sep>BASE_URL='https://api.cymulate.com/v1/'<line_sep>MOKE_TEST={"success":<true> "data":["Phishing Awareness" "Web Application Firewall" "Lateral Movement" "Data Exfiltration" "Immediate Threats Intelligence" "Email Gateway" "Endpoint Security" "Web Gateway" "Full Kill-Chain APT"]}<line_sep>FETCH_INCIDENTS_TEST={"success":<true> "data":[{"Id":"5dbeaf53a910862fa859491e" "Name":" Ursnif infection with Dridex and Powershell Empire" "Timestamp":"03/11/2019 05:43:31" "InProgress":<false>} {"Id":"5dbea88c357ca849ac41bb2e" "Name":"Pcap and malware for an ISC diary (Emotet + Trickbot)" "Timestamp":"03/11/2019 05:14:36" "InProgress":<false>} {"Id":"5d528f78705e364e9055033c" "Name":"BlackSquid Drops XMRig Miner" "Timestamp":"13/08/2019 06:22:48" "InProgress":<false>} {"Id":"5d25dc5d86d73c22203d919f" "Name":"dll2" "Timestamp":"10/07/2019 08:38:53" "InProgress":<false>} {"Id":"5cc7109ca842693cc0f15588" "Name":"hot files test 8" "Timestamp":"29/04/2019 10:56:28" "InProgress":<false>} {"Id":"5c8e6cbf3dd9fe08186d7b64" "Name":"Hancitor malspam infections from 2018-08-13 and 2018-08-14" "Timestamp":"17/03/2019 11:50:23" "InProgress":<false>}]}<line_sep>CYMULATE_GET_INCIDENT_INFO_TEST={"success":<true> "data":[{"Module":"Immediate Threats Intelligence" "Penetration_Vector":"-" "Attack_Payload":"2019-07-08-Ursnif-binary-retrieved-by-Word-macro_"<concat>"2b999360-a3f9-11e9-980e-633d1efd31f3.exe" "Name":" Ursnif infection with Dridex and Powershell Empire" "Timestamp":"03/11/2019 05:45:47" "Sha1":"ff57bfaed6db3379bbf69a19404a6e21668a7a52" "Sha256":"0894e82d9397d909099c98fe186354591ae86a73230700f462b72ae36c700ddf" "Md5":"ef99338df4078fab6e9a8cf6797a1d14" "Status":"Penetrated" "Attack_Vector":"Endpoint Security" "Attack_Type":"Antivirus" "Mitigation":"N/A" "Description":"N/A" "ID":"c1d33138a2101724889862152444ec7e" "Related_URLS":"N/A" "Related_Email_Addresses":"N/A"}]}<line_sep>TECHNICAL_INCIDENTS_IDS=['5dbeaf53a910862fa859491e' '5dbea88c357ca849ac41bb2e' '5d528f78705e364e9055033c' '5d25dc5d86d73c22203d919f' '5cc7109ca842693cc0f15588' '5c8e6cbf3dd9fe08186d7b64']<line_sep>MOCK_TIMESTAMP="2020-12-02T16%3A32%3A37"<line_sep>ATTACK_ID="5dbeaf53a910862fa859491e"<def_stmt>local_get_last_run <block_start><return>{}<block_end><def_stmt>test_test_client requests_mock<block_start>requests_mock.get(BASE_URL+'user/modules' json=MOKE_TEST)<line_sep>client=Client(base_url=BASE_URL headers={"x-token":'<PASSWORD>'} verify=<false>)<line_sep>cymulate_test(client=client is_fetch=<false>)<block_end><def_stmt>test_fetch_incidents mocker requests_mock<block_start>requests_mock.get(BASE_URL+'immediate-threats/ids?from={}'.format(MOCK_TIMESTAMP) json=FETCH_INCIDENTS_TEST)<for_stmt>incident_id TECHNICAL_INCIDENTS_IDS<block_start>requests_mock.get(BASE_URL+'immediate-threats/attack/technical/'+incident_id json=CYMULATE_GET_INCIDENT_INFO_TEST)<block_end>mocker.patch.object(demisto 'params' return_value={'fetch_time':MOCK_TIMESTAMP})<line_sep>mocker.patch.object(demisto 'getLastRun' side_effect=local_get_last_run)<line_sep>client=Client(base_url=BASE_URL headers={"x-token":'<PASSWORD>'} verify=<false>)<line_sep>next_run,incidents,remain_incidents=fetch_incidents(client=client module_type=CymulateModuleTypeEnum.IMMEDIATE_THREATS last_run={'last_fetch':'2020-12-02T16:32:37'} first_fetch_time={} only_penatrated=<false> limit=20 integration_context=<none>)<assert_stmt>len(incidents)<eq>6<block_end><def_stmt>test_cymulate_get_incident_info mocker requests_mock<block_start>mocker.patch.object(demisto 'args' return_value={"module_type":CymulateModuleTypeEnum.IMMEDIATE_THREATS.name "attack_id":ATTACK_ID})<line_sep>requests_mock.get(BASE_URL+'immediate-threats/attack/technical/'+ATTACK_ID json=CYMULATE_GET_INCIDENT_INFO_TEST)<line_sep>client=Client(base_url=BASE_URL headers={"x-token":'<PASSWORD>'} verify=<false>)<line_sep># Get incident's parent id attack_id=demisto.args().get('attack_id')<line_sep>technical_info=cymulate_get_incident_info(client=client attack_id=attack_id)<assert_stmt>(technical_info[0]['ID']<eq>CYMULATE_GET_INCIDENT_INFO_TEST['data'][0]['ID'])<block_end>
<import_stmt>torch<class_stmt>Pool(torch.nn.Module)<block_start>"""A pool layer with mean/max/sum/last options."""<def_stmt>__init__ self op_type pool_dim keepdim=<true><block_start>super().__init__()<line_sep>self.op_type=op_type<line_sep>self.pool_dim=pool_dim<line_sep>self.keepdim=keepdim<assert_stmt>self.op_type<in>["last" "mean" "max" "sum"] "Pool() operation should be mean, max, sum or last."<if_stmt>self.op_type<eq>'last'<block_start>self.__pool_fn=<lambda>x:x.select(self.pool_dim -1).unsqueeze(0)<block_end><else_stmt><block_start><if_stmt>self.op_type<eq>'max'<block_start>self.__pool_fn=<lambda>x:torch.max(x dim=self.pool_dim keepdim=self.keepdim)[0]<block_end><elif_stmt>self.op_type<eq>'mean'<block_start>self.__pool_fn=<lambda>x:torch.mean(x dim=self.pool_dim keepdim=self.keepdim)<block_end><elif_stmt>self.op_type<eq>'sum'<block_start>self.__pool_fn=<lambda>x:torch.sum(x dim=self.pool_dim keepdim=self.keepdim)<block_end><block_end><block_end><def_stmt>forward self x<block_start><return>self.__pool_fn(x)<block_end><def_stmt>__repr__ self<block_start><return>"Pool(op_type={}, pool_dim={}, keepdim={})".format(self.op_type self.pool_dim self.keepdim)<block_end><block_end>
'''Autogenerated by xml_generate script, do not edit!'''<import_from_stmt>OpenGL platform<as>_p arrays<line_sep># Code generation uses this <import_from_stmt>OpenGL.raw.GL _types<as>_cs<line_sep># End users want this... <import_from_stmt>OpenGL.raw.GL._types *<import_from_stmt>OpenGL.raw.GL _errors<import_from_stmt>OpenGL.constant Constant<as>_C<import_stmt>ctypes<line_sep>_EXTENSION_NAME='GL_APPLE_vertex_array_range'<def_stmt>_f function<block_start><return>_p.createFunction(function _p.PLATFORM.GL 'GL_APPLE_vertex_array_range' error_checker=_errors._error_checker)<block_end>GL_STORAGE_CACHED_APPLE=_C('GL_STORAGE_CACHED_APPLE' 0x85BE)<line_sep>GL_STORAGE_CLIENT_APPLE=_C('GL_STORAGE_CLIENT_APPLE' 0x85B4)<line_sep>GL_STORAGE_SHARED_APPLE=_C('GL_STORAGE_SHARED_APPLE' 0x85BF)<line_sep>GL_VERTEX_ARRAY_RANGE_APPLE=_C('GL_VERTEX_ARRAY_RANGE_APPLE' 0x851D)<line_sep>GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE=_C('GL_VERTEX_ARRAY_RANGE_LENGTH_APPLE' 0x851E)<line_sep>GL_VERTEX_ARRAY_RANGE_POINTER_APPLE=_C('GL_VERTEX_ARRAY_RANGE_POINTER_APPLE' 0x8521)<line_sep>GL_VERTEX_ARRAY_STORAGE_HINT_APPLE=_C('GL_VERTEX_ARRAY_STORAGE_HINT_APPLE' 0x851F)<line_sep>@_f@_p.types(<none> _cs.GLsizei ctypes.c_void_p)<def_stmt>glFlushVertexArrayRangeAPPLE length pointer<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLenum _cs.GLint)<def_stmt>glVertexArrayParameteriAPPLE pname param<block_start><pass><block_end>@_f@_p.types(<none> _cs.GLsizei ctypes.c_void_p)<def_stmt>glVertexArrayRangeAPPLE length pointer<block_start><pass><block_end>
"""Add 'ports' column to certificate_associations table Revision ID: 4fe230f7a26e Revises: <KEY> Create Date: 2021-05-07 10:57:16.964743 """<line_sep># revision identifiers, used by Alembic. revision='4fe230f7a26e'<line_sep>down_revision='<KEY>'<import_stmt>sqlalchemy<as>sa<import_from_stmt>alembic op<import_from_stmt>sqlalchemy.dialects postgresql<def_stmt>upgrade # Add the "ports" column <block_start>op.add_column('certificate_associations' sa.Column('ports' postgresql.ARRAY(sa.Integer()) nullable=<true>))<line_sep># Make the existing foreign key columns non-nullable op.alter_column('certificate_associations' 'domain_id' existing_type=sa.INTEGER() nullable=<false>)<line_sep>op.alter_column('certificate_associations' 'certificate_id' existing_type=sa.INTEGER() nullable=<false>)<block_end><def_stmt>downgrade # Make the existing foreign key columns nullable <block_start>op.alter_column('certificate_associations' 'certificate_id' existing_type=sa.INTEGER() nullable=<true>)<line_sep>op.alter_column('certificate_associations' 'domain_id' existing_type=sa.INTEGER() nullable=<true>)<line_sep># Drop the "ports" column op.drop_column('certificate_associations' 'ports')<block_end>
""" Implementation of the original DQN Nature paper: https://storage.googleapis.com/deepmind-media/dqn/DQNNaturePaper.pdf Some of the complexity is captured via wrappers but the main components such as the DQN model itself, the training loop, the memory-efficient replay buffer are implemented from scratch. Some modifications: * Using Adam instead of RMSProp """<import_stmt>os<import_stmt>argparse<import_stmt>time<import_stmt>copy<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torch nn<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>torch.optim Adam<import_from_stmt>torch.utils.tensorboard SummaryWriter<import_stmt>utils.utils<as>utils<import_from_stmt>utils.replay_buffer ReplayBuffer<import_from_stmt>utils.constants *<import_from_stmt>models.definitions.DQN DQN<class_stmt>ActorLearner<block_start><def_stmt>__init__ self config env replay_buffer dqn target_dqn last_frame<block_start>self.start_time=time.time()<line_sep>self.config=config<line_sep>self.env=env<line_sep>self.last_frame=last_frame# always keeps the latest frame from the environment self.replay_buffer=replay_buffer<line_sep># DQN Models self.dqn=dqn<line_sep>self.target_dqn=target_dqn<line_sep># Logging/debugging-related self.debug=config['debug']<line_sep>self.log_freq=config['log_freq']<line_sep>self.episode_log_freq=config['episode_log_freq']<line_sep>self.grads_log_freq=config['grads_log_freq']<line_sep>self.checkpoint_freq=config['checkpoint_freq']<line_sep>self.tensorboard_writer=SummaryWriter()<line_sep>self.huber_loss=[]<line_sep>self.best_episode_reward=-np.inf<line_sep>self.best_dqn_model=<none># keeps a deep copy of the best DQN model so far (best = highest episode reward) # MSE/L2 between [-1,1] and L1 otherwise (as stated in the Nature paper) aka "Huber loss" self.loss=nn.SmoothL1Loss()<line_sep>self.optimizer=Adam(self.dqn.parameters() lr=config['learning_rate'])<line_sep>self.grad_clip_value=config['grad_clipping_value']<line_sep>self.acting_learning_step_ratio=config['acting_learning_step_ratio']<line_sep>self.num_warmup_steps=config['num_warmup_steps']<line_sep>self.batch_size=config['batch_size']<line_sep>self.gamma=config['gamma']# discount factor self.learner_cnt=0<line_sep>self.target_dqn_update_interval=config['target_dqn_update_interval']<line_sep># should perform a hard or a soft update of target DQN weights self.tau=config['tau']<block_end><def_stmt>collect_experience self# We're collecting more experience than we're doing weight updates (4x in the Nature paper) <block_start><for_stmt>_ range(self.acting_learning_step_ratio)<block_start>last_index=self.replay_buffer.store_frame(self.last_frame)<line_sep>state=self.replay_buffer.fetch_last_state()# state = 4 preprocessed last frames for Atari action=self.sample_action(state)<line_sep>new_frame,reward,done_flag,_=self.env.step(action)<line_sep>self.replay_buffer.store_action_reward_done(last_index action reward done_flag)<if_stmt>done_flag<block_start>new_frame=self.env.reset()<line_sep>self.maybe_log_episode()<block_end>self.last_frame=new_frame<if_stmt>self.debug<block_start>self.visualize_state(state)<line_sep>self.env.render()<block_end>self.maybe_log()<block_end><block_end><def_stmt>sample_action self state<block_start><if_stmt>self.env.get_total_steps()<l>self.num_warmup_steps<block_start>action=self.env.action_space.sample()# initial warm up period - no learning, acting randomly <block_end><else_stmt><block_start><with_stmt>torch.no_grad()<block_start>action=self.dqn.epsilon_greedy(state)<block_end><block_end><return>action<block_end><def_stmt>get_number_of_env_steps self<block_start><return>self.env.get_total_steps()<block_end><def_stmt>learn_from_experience self<block_start>current_states,actions,rewards,next_states,done_flags=self.replay_buffer.fetch_random_states(self.batch_size)<line_sep># Better than detaching: in addition to target dqn not being a part of the computational graph it also # saves time/memory because we're not storing activations during forward propagation needed for the backprop <with_stmt>torch.no_grad()# shape = (B, NA) -> (B, 1), where NA - number of actions # [0] because max returns (values, indices) tuples <block_start>next_state_max_q_values=self.target_dqn(next_states).max(dim=1 keepdim=<true>)[0]<line_sep># shape = (B, 1), TD targets. We need (1 - done) because when we're in a terminal state the next # state Q value should be 0 and we only use the reward information target_q_values=rewards+(1-done_flags)<times>self.gamma<times>next_state_max_q_values<block_end># shape = (B, 1), pick those Q values that correspond to the actions we made in those states current_state_q_values=self.dqn(current_states).gather(dim=1 index=actions)<line_sep>loss=self.loss(target_q_values current_state_q_values)<line_sep>self.huber_loss.append(loss.item())<line_sep>self.optimizer.zero_grad()<line_sep>loss.backward()# compute the gradients <if_stmt>self.grad_clip_value<is><not><none># potentially clip gradients for stability reasons <block_start>nn.utils.clip_grad_norm_(self.dqn.parameters() self.grad_clip_value)<block_end>self.optimizer.step()# update step self.learner_cnt<augadd>1<line_sep># Periodically update the target DQN weights (coupled to the number of DQN weight updates and not # env steps) <if_stmt>self.learner_cnt%self.target_dqn_update_interval<eq>0<block_start><if_stmt>self.tau<eq>1.<block_start>print('Update target DQN (hard update)')<line_sep>self.target_dqn.load_state_dict(self.dqn.state_dict())<block_end><else_stmt># soft update, the 2 branches can be merged together, leaving it like this for now <block_start><raise>Exception(f'Soft update is not yet implemented (hard update was used in the original paper)')<block_end><block_end><block_end>@staticmethod<def_stmt>visualize_state state<block_start>state=state[0].to('cpu').numpy()# (1/B, C, H, W) -> (C, H, W) stacked_frames=np.hstack([np.repeat((img<times>255).astype(np.uint8)[: : np.newaxis] 3 axis=2)<for>img state])# (C, H, W) -> (H, C*W, 3) plt.imshow(stacked_frames)<line_sep>plt.show()<block_end><def_stmt>maybe_log_episode self<block_start>rewards=self.env.get_episode_rewards()# we can do this thanks to the Monitor wrapper episode_lengths=self.env.get_episode_lengths()<line_sep>num_episodes=len(rewards)<if_stmt>self.episode_log_freq<is><not><none><and>num_episodes%self.episode_log_freq<eq>0<block_start>self.tensorboard_writer.add_scalar('Rewards per episode' rewards[-1] num_episodes)<line_sep>self.tensorboard_writer.add_scalar('Steps per episode' episode_lengths[-1] num_episodes)<block_end><if_stmt>rewards[-1]<g>self.best_episode_reward<block_start>self.best_episode_reward=rewards[-1]<line_sep>self.config['best_episode_reward']=self.best_episode_reward# metadata self.best_dqn_model=copy.deepcopy(self.dqn)<block_end><block_end># keep track of the model that gave the best reward <def_stmt>maybe_log self<block_start>num_steps=self.env.get_total_steps()<if_stmt>self.log_freq<is><not><none><and>num_steps<g>0<and>num_steps%self.log_freq<eq>0<block_start>self.tensorboard_writer.add_scalar('Epsilon' self.dqn.epsilon_value() num_steps)<if_stmt>len(self.huber_loss)<g>0<block_start>self.tensorboard_writer.add_scalar('Huber loss' np.mean(self.huber_loss) num_steps)<block_end>self.tensorboard_writer.add_scalar('FPS' num_steps/(time.time()-self.start_time) num_steps)<line_sep>self.huber_loss=[]<block_end># clear the loss values and start recollecting them again # Periodically save DQN models <if_stmt>self.checkpoint_freq<is><not><none><and>num_steps<g>0<and>num_steps%self.checkpoint_freq<eq>0<block_start>ckpt_model_name=f'dqn_{self.config["env_id"]}_ckpt_steps_{num_steps}.pth'<line_sep>torch.save(utils.get_training_state(self.config self.dqn) os.path.join(CHECKPOINTS_PATH ckpt_model_name))<block_end># Log the gradients <if_stmt>self.grads_log_freq<is><not><none><and>self.learner_cnt<g>0<and>self.learner_cnt%self.grads_log_freq<eq>0<block_start>total_grad_l2_norm=0<for_stmt>cnt,(name weight_or_bias_parameters) enumerate(self.dqn.named_parameters())<block_start>grad_l2_norm=weight_or_bias_parameters.grad.data.norm(p=2).item()<line_sep>self.tensorboard_writer.add_scalar(f'grad_norms/{name}' grad_l2_norm self.learner_cnt)<line_sep>total_grad_l2_norm<augadd>grad_l2_norm<power>2<block_end># As if we concatenated all of the params into a single vector and took L2 total_grad_l2_norm=total_grad_l2_norm<power>(1/2)<line_sep>self.tensorboard_writer.add_scalar(f'grad_norms/total' total_grad_l2_norm self.learner_cnt)<block_end><block_end><def_stmt>log_to_console self# keep it minimal for now, I mostly use tensorboard - feel free to expand functionality <block_start>print(f'Number of env steps = {self.get_number_of_env_steps()}')<block_end><block_end><def_stmt>train_dqn config<block_start>env=utils.get_env_wrapper(config['env_id'])<line_sep>replay_buffer=ReplayBuffer(config['replay_buffer_size'] crash_if_no_mem=config['dont_crash_if_no_mem'])<line_sep>utils.set_random_seeds(env config['seed'])<line_sep>linear_schedule=utils.LinearSchedule(config['epsilon_start_value'] config['epsilon_end_value'] config['epsilon_duration'])<line_sep>device=torch.device("cuda"<if>torch.cuda.is_available()<else>"cpu")<line_sep>dqn=DQN(env number_of_actions=env.action_space.n epsilon_schedule=linear_schedule).to(device)<line_sep>target_dqn=DQN(env number_of_actions=env.action_space.n).to(device)<line_sep># Don't get confused by the actor-learner terminology, DQN is not an actor-critic method, but conceptually # we can split the learning process into collecting experience/acting in the env and learning from that experience actor_learner=ActorLearner(config env replay_buffer dqn target_dqn env.reset())<while_stmt>actor_learner.get_number_of_env_steps()<l>config['num_of_training_steps']<block_start>num_env_steps=actor_learner.get_number_of_env_steps()<if_stmt>config['console_log_freq']<is><not><none><and>num_env_steps%config['console_log_freq']<eq>0<block_start>actor_learner.log_to_console()<block_end>actor_learner.collect_experience()<if_stmt>num_env_steps<g>config['num_warmup_steps']<block_start>actor_learner.learn_from_experience()<block_end><block_end>torch.save(# save the best DQN model overall (gave the highest reward in an episode) utils.get_training_state(config actor_learner.best_dqn_model) os.path.join(BINARIES_PATH utils.get_available_binary_name(config['env_id'])))<block_end><def_stmt>get_training_args <block_start>parser=argparse.ArgumentParser()<line_sep># Training related parser.add_argument("--seed" type=int help="Very important for reproducibility - set the random seed" default=23)<line_sep>parser.add_argument("--env_id" type=str help="Atari game id" default='BreakoutNoFrameskip-v4')<line_sep>parser.add_argument("--num_of_training_steps" type=int help="Number of training env steps" default=50000000)<line_sep>parser.add_argument("--acting_learning_step_ratio" type=int help="Number of experience collection steps for every learning step" default=4)<line_sep>parser.add_argument("--learning_rate" type=float default=1e-4)<line_sep>parser.add_argument("--grad_clipping_value" type=float default=5)# 5 is fairly arbitrarily chosen parser.add_argument("--replay_buffer_size" type=int help="Number of frames to store in buffer" default=1000000)<line_sep>parser.add_argument("--dont_crash_if_no_mem" action='store_false' help="Optimization - crash if not enough RAM before the training even starts (default=True)")<line_sep>parser.add_argument("--num_warmup_steps" type=int help="Number of steps before learning starts" default=50000)<line_sep>parser.add_argument("--target_dqn_update_interval" type=int help="Target DQN update freq per learning update" default=10000)<line_sep>parser.add_argument("--batch_size" type=int help="Number of states in a batch (from replay buffer)" default=32)<line_sep>parser.add_argument("--gamma" type=float help="Discount factor" default=0.99)<line_sep>parser.add_argument("--tau" type=float help='Set to 1 for a hard target DQN update, < 1 for a soft one' default=1.)<line_sep># epsilon-greedy annealing params parser.add_argument("--epsilon_start_value" type=float default=1.)<line_sep>parser.add_argument("--epsilon_end_value" type=float default=0.1)<line_sep>parser.add_argument("--epsilon_duration" type=int default=1000000)<line_sep># Logging/debugging/checkpoint related (helps a lot with experimentation) parser.add_argument("--console_log_freq" type=int help="Log to console after this many env steps (None = no logging)" default=10000)<line_sep>parser.add_argument("--log_freq" type=int help="Log metrics to tensorboard after this many env steps (None = no logging)" default=10000)<line_sep>parser.add_argument("--episode_log_freq" type=int help="Log metrics to tensorboard after this many episodes (None = no logging)" default=5)<line_sep>parser.add_argument("--checkpoint_freq" type=int help="Save checkpoint model after this many env steps (None = no checkpointing)" default=10000)<line_sep>parser.add_argument("--grads_log_freq" type=int help="Log grad norms after this many weight update steps (None = no logging)" default=2500)<line_sep>parser.add_argument("--debug" action='store_true' help='Train in debugging mode')<line_sep>args=parser.parse_args()<line_sep># Wrapping training configuration into a dictionary training_config=dict()<for_stmt>arg vars(args)<block_start>training_config[arg]=getattr(args arg)<block_end><return>training_config<block_end><if_stmt>__name__<eq>'__main__'# Train the DQN model <block_start>train_dqn(get_training_args())<block_end>
<import_stmt>ctypes<import_stmt>windows.generated_def<as>gdef<import_from_stmt>..apiproxy ApiProxy NeededParameter<import_from_stmt>..error fail_on_zero<class_stmt>Ktmw32Proxy(ApiProxy)<block_start>APIDLL="Ktmw32"<line_sep>default_error_check=staticmethod(fail_on_zero)<block_end>@Ktmw32Proxy()<def_stmt>CommitTransaction TransactionHandle<block_start><return>CommitTransaction.ctypes_function(TransactionHandle)<block_end>@Ktmw32Proxy()<def_stmt>CreateTransaction lpTransactionAttributes UOW CreateOptions IsolationLevel IsolationFlags Timeout Description<block_start><return>CreateTransaction.ctypes_function(lpTransactionAttributes UOW CreateOptions IsolationLevel IsolationFlags Timeout Description)<block_end>@Ktmw32Proxy()<def_stmt>RollbackTransaction TransactionHandle<block_start><return>RollbackTransaction.ctypes_function(TransactionHandle)<block_end>@Ktmw32Proxy()<def_stmt>OpenTransaction dwDesiredAccess TransactionId<block_start><return>OpenTransaction.ctypes_function(dwDesiredAccess TransactionId)<block_end>
# https://www.algoexpert.io/questions/Search%20In%20Sorted%20Matrix # O(n + m) time | O(1) space # where 'n' is the length of row and 'm' is the length on column <def_stmt>search_in_sorted_matrix matrix target<block_start>row=0<line_sep>col=len(matrix[0])-1<while_stmt>row<l>len(matrix)<and>col<ge>0<block_start><if_stmt>matrix[row][col]<g>target<block_start>col<augsub>1<block_end><elif_stmt>matrix[row][col]<l>row<block_start>row<augadd>1<block_end><else_stmt><block_start><return>[row col]<block_end><block_end><return>[-1 -1]<block_end>
<import_stmt>datetime<import_from_stmt>decimal Decimal BasicContext<import_from_stmt>email.utils parsedate<import_stmt>pytz<line_sep>ISO8601_DATE_FORMAT='%Y-%m-%d'<line_sep>ISO8601_DATETIME_FORMAT='%Y-%m-%dT%H:%M:%SZ'<def_stmt>iso8601_date s<block_start>""" Parses an ISO 8601 date string and returns a UTC date object or the string if the parsing failed. :param s: ISO 8601-formatted date string (2015-01-25) :return: """<try_stmt><block_start><return>datetime.datetime.strptime(s ISO8601_DATE_FORMAT).replace(tzinfo=pytz.utc).date()<block_end><except_stmt>(TypeError ValueError)<block_start><return>s<block_end><block_end><def_stmt>iso8601_datetime s<block_start>""" Parses an ISO 8601 datetime string and returns a UTC datetime object, or the string if parsing failed. :param s: ISO 8601-formatted datetime string (2015-01-25T12:34:56Z) :return: datetime or str """<try_stmt><block_start><return>datetime.datetime.strptime(s ISO8601_DATETIME_FORMAT).replace(tzinfo=pytz.utc)<block_end><except_stmt>(TypeError ValueError)<block_start><return>s<block_end><block_end><def_stmt>rfc2822_datetime s<block_start>""" Parses an RFC 2822 date string and returns a UTC datetime object, or the string if parsing failed. :param s: RFC 2822-formatted string date :return: datetime or str """<line_sep>date_tuple=parsedate(s)<if_stmt>date_tuple<is><none><block_start><return><none><block_end><return>datetime.datetime(*date_tuple[:6]).replace(tzinfo=pytz.utc)<block_end><def_stmt>decimal d<block_start>""" Parses a decimal string into a Decimal :param d: decimal string :return: Decimal """<if_stmt><not>d<block_start><return>d<block_end><return>Decimal(d BasicContext)<block_end><def_stmt>integer i<block_start>""" Parses an integer string into an int :param i: integer string :return: int """<try_stmt><block_start><return>int(i)<block_end><except_stmt>(TypeError ValueError)<block_start><return>i<block_end><block_end>
#Collects all used URLs in a policy <import_from_stmt>securityheaders.checkers InfoCollector Finding FindingType FindingSeverity<import_from_stmt>securityheaders.models ModelFactory<class_stmt>InfoURLCollector(InfoCollector)<block_start><def_stmt>check self headers opt_options=dict()<block_start>findings=[]<line_sep>headernames=ModelFactory().getheadernames()<for_stmt>header headernames<block_start>hdr=ModelFactory().getheader(header)<try_stmt><block_start>obj=self.extractheader(headers hdr)<if_stmt>obj<and>obj.parsedstring<block_start><if_stmt>hasattr(obj 'getdirectives')<and>hasattr(obj 'geturls')<block_start><for_stmt>directive obj.getdirectives()<block_start>urls=obj.geturls([directive])<if_stmt><not>urls<block_start>urls=[]<block_end><for_stmt>url urls<block_start>findings.append(Finding(obj.headerkey FindingType.INFO_URL str(url) FindingSeverity.NONE directive str(url)))<block_end><block_end><block_end><block_end><block_end><except_stmt><block_start><pass><block_end><block_end><return>findings<block_end><block_end>
<import_from_stmt>....vc.tests.contexts CITIZENSHIP_V1 CREDENTIALS_V1 EXAMPLES_V1 ODRL SCHEMA_ORG SECURITY_V1 SECURITY_V2 <import_from_stmt>. TEST_EURO_HEALTH TEST_SIGN_OBJ0 TEST_SIGN_OBJ1 TEST_SIGN_OBJ2 TEST_VALIDATE_ERROR_OBJ2 TEST_VERIFY_ERROR TEST_VERIFY_OBJ0 TEST_VERIFY_OBJ1 TEST_VERIFY_OBJ2 <line_sep>DOCUMENTS={TEST_SIGN_OBJ0["doc"]["id"]:TEST_SIGN_OBJ0["doc"] TEST_SIGN_OBJ1["doc"]["id"]:TEST_SIGN_OBJ1["doc"] TEST_VERIFY_ERROR["doc"]["id"]:TEST_VERIFY_ERROR["doc"] TEST_VERIFY_OBJ0["doc"]["id"]:TEST_VERIFY_OBJ0["doc"] TEST_VERIFY_OBJ1["doc"]["id"]:TEST_VERIFY_OBJ1["doc"] "https://w3id.org/citizenship/v1":CITIZENSHIP_V1 "https://www.w3.org/2018/credentials/v1":CREDENTIALS_V1 "https://www.w3.org/2018/credentials/examples/v1":EXAMPLES_V1 "https://www.w3.org/ns/odrl.jsonld":ODRL "http://schema.org/":SCHEMA_ORG "https://w3id.org/security/v1":SECURITY_V1 "https://w3id.org/security/v2":SECURITY_V2 ("https://essif-lab.pages.grnet.gr/interoperability/"<concat>"eidas-generic-use-case/contexts/ehic-v1.jsonld"):TEST_EURO_HEALTH }<def_stmt>custom_document_loader url:str options:dict# Check if full url (with fragments is in document map) <block_start><if_stmt>url<in>DOCUMENTS<block_start><return>{"contentType":"application/ld+json" "contextUrl":<none> "document":DOCUMENTS[url] "documentUrl":url }<block_end># Otherwise look if it is present without fragment without_fragment=url.split("#")[0]<if_stmt>without_fragment<in>DOCUMENTS<block_start><return>{"contentType":"application/ld+json" "contextUrl":<none> "document":DOCUMENTS[without_fragment] "documentUrl":url }<block_end><raise>Exception(f"No custom context support for {url}")<block_end>
<import_from_stmt>MicroTokenizer.training.train train_from_configure<def_stmt>train output_dir train_data configure_file=<none><block_start>train_from_configure([train_data] output_dir configure_file=configure_file)<block_end><if_stmt>__name__<eq>"__main__"<block_start><import_stmt>plac<line_sep>print(plac.call(train))<block_end>