content
stringlengths
0
1.55M
<import_stmt>codecs<import_stmt>pickle<import_stmt>pandas<as>pd<import_stmt>itertools<import_from_stmt>pathlib Path<import_from_stmt>sklearn.model_selection train_test_split<import_stmt>gluonnlp<as>nlp<import_from_stmt>pathlib Path<import_from_stmt>collections Counter<import_stmt>os<def_stmt>load_data_from_txt file_full_name<block_start><with_stmt>codecs.open(file_full_name "r" "utf-8")<as>io<block_start>lines=io.readlines()<line_sep># parsing에 문제가 있어서 아래 3개 변수 도입! prev_line=""<line_sep>save_flag=<false><line_sep>count=0<line_sep>sharp_lines=[]<for_stmt>line lines<block_start><if_stmt>prev_line<eq>"\n"<or>prev_line<eq>""<block_start>save_flag=<true><block_end><if_stmt>line[:3]<eq>"## "<and>save_flag<is><true><block_start>count<augadd>1<line_sep>sharp_lines.append(line[3:])<block_end><if_stmt>count<eq>3<block_start>count=0<line_sep>save_flag=<false><block_end>prev_line=line<block_end>list_of_source_no,list_of_source_str,list_of_target_str=sharp_lines[0::3] sharp_lines[1::3] sharp_lines[2::3]<block_end><return>list_of_source_no list_of_source_str list_of_target_str<block_end><def_stmt>main <block_start>cwd=Path.cwd()<line_sep>data_in=cwd/"data_in"<line_sep>train_data_in=data_in/"NER-master"/"말뭉치 - 형태소_개체명"<line_sep>list_of_file_name=[file_name<for>file_name os.listdir(train_data_in)<if>'.txt'<in>file_name]<line_sep>list_of_full_file_path=[train_data_in/file_name<for>file_name list_of_file_name]<line_sep>print("num of files: " len(list_of_full_file_path))<line_sep>list_of_total_source_no,list_of_total_source_str,list_of_total_target_str=[] [] []<for_stmt>i,full_file_path enumerate(list_of_full_file_path)<block_start>list_of_source_no,list_of_source_str,list_of_target_str=load_data_from_txt(file_full_name=full_file_path)<line_sep>list_of_total_source_str.extend(list_of_source_str)<line_sep>list_of_total_target_str.extend(list_of_target_str)<block_end>print("list_of_total_source_str: " list_of_total_source_str[0])<line_sep>print("list_of_total_target_str: " list_of_total_target_str[0])<line_sep>print("list_of_total_source_str: " list_of_total_source_str[-10:])<line_sep>print("list_of_total_target_str: " list_of_total_target_str[-10:])<line_sep>print("len(list_of_total_source_str): " len(list_of_total_source_str))<line_sep>print("len(list_of_total_target_str): " len(list_of_total_target_str))<assert_stmt>len(list_of_total_source_str)<eq>len(list_of_total_target_str)<line_sep>corpus_full_path='/var/tmp/corpus.txt'<line_sep>print("corpus_full_path:" corpus_full_path)<with_stmt>open(corpus_full_path 'w' encoding='utf-8')<as>io<block_start><for_stmt>line list_of_source_str<block_start>io.write(line)<block_end><block_end># kobert load 해서 쓸거기 때문에 이 vocab을 쓸수는 없음 # https://github.com/google/sentencepiece/issues/4 # what is hard_vocab_limit? <import_stmt>sentencepiece<as>spm<line_sep>templates='--input={} --model_prefix={} --vocab_size={} --hard_vocab_limit=false --user_defined_symbols=[CLS],[SEP],[MASK] --pad_id=0 --bos_id=1 --eos_id=2 --unk_id=3'<line_sep>prefix='sentencePiece'<line_sep>vocab_size=8000<line_sep>cmd=templates.format(corpus_full_path prefix vocab_size)<line_sep>spm.SentencePieceTrainer.Train(cmd)<line_sep># Load model sp=spm.SentencePieceProcessor()<line_sep>sp_model_path='{}.model'.format(prefix)<line_sep>sp.Load(sp_model_path)<line_sep>print(sp.pad_id())# 결과: 0 print(sp.bos_id())# 결과: 1 print(sp.eos_id())# 결과: 2 print(sp.unk_id())# 결과: 3 tokenizer=nlp.data.SentencepieceTokenizer(path=sp_model_path)<line_sep>detokenizer=nlp.data.SentencepieceDetokenizer(path=sp_model_path)<line_sep>print(tokenizer)<line_sep>print(tokenizer("안녕하세요 ㅋㅋ"))<line_sep>print(detokenizer(tokenizer("안녕하세요 ㅋㅋ")))<line_sep>list_of_source_tokens=[tokenizer(source_str)<for>source_str list_of_total_source_str]<line_sep>count_tokens=Counter(itertools.chain.from_iterable(list_of_source_tokens))<line_sep>print("list_of_tokens:" list_of_source_tokens)<line_sep>print("count_tokens: " count_tokens)<line_sep>reserved_tokens=['[CLS]' '[SEP]' '[MASK]']<line_sep>vocab=nlp.Vocab(counter=count_tokens bos_token=<none> eos_token=<none> reserved_tokens=reserved_tokens)<line_sep>print(vocab.unknown_token)<line_sep>print(vocab.padding_token)<line_sep>print(vocab.token_to_idx)<import_stmt>json<import_stmt>pickle<with_stmt>open(data_in/'token_to_index.json' 'w' encoding='utf-8')<as>io<block_start>json.dump(vocab.token_to_idx io ensure_ascii=<false> indent=4)<block_end><with_stmt>open(data_in/'vocab.pkl' mode='wb')<as>io<block_start>pickle.dump(vocab io)<block_end><with_stmt>open(data_in/'list_of_source_tokens.pkl' mode='wb')<as>io<block_start>pickle.dump(list_of_source_tokens io)<block_end># with open(data_in / 'list_of_label.pkl', mode='wb') as io: # pickle.dump(list_of_label, io) <block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
# BSD 3-Clause License; see https://github.com/scikit-hep/uproot4/blob/main/LICENSE <import_from_future_stmt> absolute_import<import_stmt>pytest<import_stmt>skhep_testdata<import_stmt>uproot<def_stmt>test <block_start><with_stmt>open(skhep_testdata.data_path("uproot-Zmumu.root") "rb")<as>f<block_start><assert_stmt>uproot.open({f:"events"})["px1"].array(library="np")[:10].tolist()<eq>[-41.1952876442 35.1180497674 35.1180497674 34.1444372454 22.7835819537 -19.8623073126 -19.8623073126 -20.1773731496 71.1437106445 51.0504859191 ]<block_end><block_end>
<import_stmt>argparse<import_stmt>collections<import_stmt>numpy<as>np<line_sep>parser=argparse.ArgumentParser(description='Convert T5 predictions into a TREC-formatted run.')<line_sep>parser.add_argument('--predictions' type=str required=<true> help='T5 predictions file.')<line_sep>parser.add_argument('--query_run_ids' type=str required=<true> help='File containing query doc id pairs paired with the T5\'s predictions file.')<line_sep>parser.add_argument('--output' type=str required=<true> help='run file in the TREC format.')<line_sep>args=parser.parse_args()<line_sep>examples=collections.defaultdict(dict)<with_stmt>open(args.query_run_ids)<as>f_query_run_ids open(args.predictions)<as>f_pred<block_start><for_stmt>line_query_doc_id,line_pred zip(f_query_run_ids f_pred)<block_start>query_id,doc_id_a,doc_id_b=line_query_doc_id.strip().split()<line_sep>doc_id_a=doc_id_a.split("#")[0]<line_sep>doc_id_b=doc_id_b.split("#")[0]<line_sep>_,score=line_pred.strip().split()<line_sep>score=float(score)<if_stmt>doc_id_a<not><in>examples[query_id]<block_start>examples[query_id][doc_id_a]=0<block_end><if_stmt>doc_id_b<not><in>examples[query_id]<block_start>examples[query_id][doc_id_b]=0<block_end>examples[query_id][doc_id_a]<augadd>np.exp(score)<line_sep>examples[query_id][doc_id_b]<augadd>1-np.exp(score)<block_end><block_end><with_stmt>open(args.output 'w')<as>fout<block_start><for_stmt>query_id,doc_ids_scores examples.items()<block_start>doc_ids_scores=[(doc_id scores)<for>doc_id,scores doc_ids_scores.items()]<line_sep>doc_ids_scores.sort(key=<lambda>x:x[1] reverse=<true>)<for_stmt>rank,(doc_id score) enumerate(doc_ids_scores)<block_start>print(2<times>(len(doc_ids_scores)-1))<line_sep>fout.write(f'{query_id} Q0 {doc_id} {rank+1} {score/(2<times>(len(doc_ids_scores)-1))} duot5\n')<block_end><block_end><block_end>
""" Miscelaneous Loud ML helpers """<import_stmt>datetime<import_stmt>dateutil.parser<import_stmt>hashlib<import_stmt>json<import_stmt>numpy<as>np<import_stmt>math<import_stmt>itertools<import_from_stmt>uuid getnode<import_from_stmt>jinja2 Environment meta<import_stmt>loudml<import_from_stmt>loudml errors <line_sep>QUOTE_ESCAPE_TRANS=str.maketrans({"'":"\\'" })<line_sep>DOUBLEQUOTE_ESCAPE_TRANS=str.maketrans({'"':'\\"' })<def_stmt>clear_fields obj fields include_fields<block_start><if_stmt>include_fields<block_start>out={key:obj.get(key)<for>key set(fields)}<line_sep>obj.clear()<line_sep>obj.update(out)<block_end><else_stmt><block_start>out={key:obj.get(key)<for>key (set(obj.keys())-set(fields))}<line_sep>obj.clear()<line_sep>obj.update(out)<block_end><block_end><def_stmt>escape_quotes string<block_start>""" Escape simple quotes """<line_sep><return>string.translate(QUOTE_ESCAPE_TRANS)<block_end><def_stmt>escape_doublequotes string<block_start>""" Escaping double quotes """<line_sep><return>string.translate(DOUBLEQUOTE_ESCAPE_TRANS)<block_end><def_stmt>build_agg_name measurement field<block_start><return>"agg_%s-%s"%(measurement field)<block_end><def_stmt>parse_timedelta delta min=<none> max=<none> min_included=<true> max_included=<true> <block_start>""" Parse time delta """<if_stmt>isinstance(delta str)<and>len(delta)<g>0<block_start>unit=delta[-1]<if_stmt>unit<in>'0123456789'<block_start>unit='s'<line_sep>value=delta<block_end><else_stmt><block_start>value=delta[:-1]<block_end><block_end><else_stmt><block_start>unit='s'<line_sep>value=delta<block_end><try_stmt><block_start>value=float(value)<block_end><except_stmt>ValueError<block_start><raise>errors.Invalid("invalid time delta value")<block_end><if_stmt>unit<eq>'M'<block_start>value<augmul>30<line_sep>unit='d'<block_end><elif_stmt>unit<eq>'y'<block_start>value<augmul>365<line_sep>unit='d'<block_end>unit={'s':'seconds' 'm':'minutes' 'h':'hours' 'd':'days' 'w':'weeks' }.get(unit)<if_stmt>unit<is><none><block_start><raise>errors.Invalid("invalid time delta unit")<block_end>message="time delta must be {} {} seconds"<if_stmt>min<is><not><none><block_start><if_stmt>min_included<block_start><if_stmt>value<l>min<block_start><raise>errors.Invalid(message.format(">=" min))<block_end><block_end><else_stmt><block_start><if_stmt>value<le>min<block_start><raise>errors.Invalid(message.format(">" min))<block_end><block_end><block_end><if_stmt>max<is><not><none><block_start><if_stmt>max_included<block_start><if_stmt>value<g>max<block_start><raise>errors.Invalid(message.format("<=" max))<block_end><block_end><else_stmt><block_start><if_stmt>value<ge>max<block_start><raise>errors.Invalid(message.format("<" max))<block_end><block_end><block_end><return>datetime.timedelta(**{unit:value})<block_end><def_stmt>ts_to_datetime ts<block_start>""" Convert timestamp to datetime """<line_sep><return>datetime.datetime.fromtimestamp(ts datetime.timezone.utc)<block_end><def_stmt>ts_to_str ts<block_start>""" Convert timestamp to string """<line_sep><return>datetime_to_str(ts_to_datetime(ts))<block_end><def_stmt>str_to_datetime string<block_start>""" Convert string (ISO or relative) to timestamp """<if_stmt>string.startswith("now")<block_start>now=datetime.datetime.now()<if_stmt>len(string)<eq>3<block_start><return>now<block_end><return>now+parse_timedelta(string[3:])<block_end><else_stmt><block_start><return>dateutil.parser.parse(string)<block_end><block_end><def_stmt>str_to_ts string<block_start>""" Convert string to timestamp """<line_sep><return>str_to_datetime(string).timestamp()<block_end><def_stmt>make_datetime mixed<block_start>""" Build a datetime object from a mixed input (second timestamp or string) """<try_stmt><block_start><return>ts_to_datetime(float(mixed))<block_end><except_stmt>ValueError<as>exn<block_start><if_stmt>isinstance(mixed str)<block_start><return>str_to_datetime(mixed)<block_end><else_stmt><block_start><raise>exn<block_end><block_end><block_end><def_stmt>make_ts mixed<block_start>""" Build a timestamp from a mixed input (second timestamp or ISO string or relative time) """<try_stmt><block_start><return>float(mixed)<block_end><except_stmt>ValueError<block_start><return>str_to_ts(mixed)<block_end><block_end><def_stmt>datetime_to_str dt<block_start>""" Convert datetime to string """<line_sep><return>"%s.%03dZ"%(dt.strftime("%Y-%m-%dT%H:%M:%S") dt.microsecond/1000)<block_end><def_stmt>dt_get_daytime dt<block_start>""" Return daytime of a datetime """<line_sep><return>(dt.timestamp()/3600)%24<block_end><def_stmt>dt_get_weekday dt<block_start>""" Return weekday of a datetime """<line_sep><return>dt.isoweekday()<block_end><class_stmt>DateRange<block_start><def_stmt>__init__ self from_date to_date<block_start>self.from_ts=make_ts(from_date)<line_sep>self.to_ts=make_ts(to_date)<if_stmt>self.to_ts<l>self.from_ts<block_start><raise>errors.Invalid("invalid date range: {}".format(self))<block_end><block_end>@classmethod<def_stmt>build_date_range cls from_date to_date bucket_interval<block_start>""" Fixup date range to be sure that is a multiple of bucket_interval return timestamps """<line_sep>from_ts=make_ts(from_date)<line_sep>to_ts=make_ts(to_date)<line_sep>from_ts=math.floor(from_ts/bucket_interval)<times>bucket_interval<line_sep>to_ts=math.ceil(to_ts/bucket_interval)<times>bucket_interval<line_sep><return>cls(from_ts to_ts)<block_end>@property<def_stmt>from_str self<block_start><return>ts_to_str(self.from_ts)<block_end>@property<def_stmt>to_str self<block_start><return>ts_to_str(self.to_ts)<block_end><def_stmt>__str__ self<block_start><return>"{}-{}".format(self.from_str self.to_str )<block_end><block_end><def_stmt>parse_addr addr default_port=<none><block_start>addr=addr.split(':')<line_sep><return>{'host':'localhost'<if>len(addr[0])<eq>0<else>addr[0] 'port':default_port<if>len(addr)<eq>1<else>int(addr[1]) }<block_end><def_stmt>make_bool mixed<block_start>""" Convert value to boolean """<if_stmt>mixed<is><none><block_start><return><false><block_end><if_stmt>isinstance(mixed bool)<block_start><return>mixed<block_end><try_stmt><block_start><return>int(mixed)<ne>0<block_end><except_stmt>ValueError<block_start><pass><block_end><if_stmt>isinstance(mixed str)<block_start>mixed=mixed.lower()<if_stmt>mixed<in>['' 'false' 'no']<block_start><return><false><block_end><if_stmt>mixed<in>['true' 'yes']<block_start><return><true><block_end><block_end><raise>ValueError<block_end><def_stmt>get_date_ranges from_ts max_ts span interval<block_start><while_stmt>(from_ts+span)<l>max_ts<block_start>to_ts=from_ts+span<line_sep><yield>ts_to_str(from_ts) ts_to_str(to_ts)<line_sep>from_ts<augadd>interval<block_end><block_end><def_stmt>load_hook hook_name hook_data model storage source<block_start>hook_type=hook_data.get('type')<line_sep>hook_cls=loudml.load_entry_point('loudml.hooks' hook_type)<if_stmt>hook_cls<is><none><block_start><raise>errors.NotFound("unknown hook type '{}'".format(hook_type))<block_end><return>hook_cls(hook_name hook_data.get('config') model storage source )<block_end><def_stmt>parse_constraint constraint<block_start><try_stmt><block_start>feature,_type,threshold=constraint.split(':')<block_end><except_stmt>ValueError<block_start><raise>errors.Invalid("invalid format for 'constraint' parameter")<block_end><if_stmt>_type<not><in>('low' 'high')<block_start><raise>errors.Invalid("invalid threshold type for 'constraint' parameter")<block_end><try_stmt><block_start>threshold=float(threshold)<block_end><except_stmt>ValueError<block_start><raise>errors.Invalid("invalid threshold for 'constraint' parameter")<block_end><return>{'feature':feature 'type':_type 'threshold':threshold }<block_end>#http://stackoverflow.com/questions/4284991/parsing-nested-parentheses-in-python-grab-content-by-level # noqa <def_stmt>parse_expression string<block_start>"""Generate parenthesized contents in string as pairs (level, contents)."""<line_sep>stack=[]<for_stmt>i,c enumerate(string)<block_start><if_stmt>c<eq>'('<block_start>stack.append(i)<block_end><elif_stmt>c<eq>')'<and>stack<block_start>start=stack.pop()<line_sep><yield>(len(stack) string[start+1:i])<block_end><block_end><block_end><def_stmt>nan_to_none x<block_start>""" Convert value to None if its NaN """<line_sep><return><none><if>x<is>np.nan<or>np.isnan(x)<else>x<block_end><def_stmt>list_from_np array<block_start>""" Convert numpy array into a jsonifiable list """<line_sep><return>[nan_to_none(x)<for>x array]<block_end><def_stmt>hash_dict data<block_start>ctx=hashlib.sha1()<line_sep>ctx.update(json.dumps(data sort_keys=<true>).encode("utf-8"))<line_sep><return>ctx.hexdigest()<block_end><def_stmt>chunks iterable size=1<block_start>iterator=iter(iterable)<for_stmt>first iterator# stops when iterator is depleted <block_start><def_stmt>chunk # construct generator for next chunk <block_start><yield>first# yield element from for loop <for_stmt>more itertools.islice(iterator size-1)<block_start><yield>more# yield more elements from the iterator <block_end><block_end><yield>chunk()# in outer generator, yield next chunk <block_end><block_end><def_stmt>my_host_id <block_start>""" Compute host identifier. Identifier is based on: - identifier computed by Python uuid library (usually MAC address) - MD5 hashing It is NOT based on: - system UUID in DMI entries (requires root privileges and may not be avalaible) - root filesystem UUID (requires root privileges) """<line_sep>m=hashlib.md5()<line_sep>m.update(str(getnode()).encode('ascii'))<line_sep><return>m.hexdigest()<block_end><def_stmt>find_undeclared_variables settings<block_start>env=Environment(autoescape=<true>)# autoescape added via Sonarlint ast=env.parse(json.dumps(settings))<line_sep><return>meta.find_undeclared_variables(ast)<block_end>
<import_stmt>dash_bootstrap_components<as>dbc<line_sep>carousel=dbc.Carousel(items=[{"key":"1" "src":"/static/images/slide1.svg"} {"key":"2" "src":"/static/images/slide2.svg"} {"key":"3" "src":"/static/images/slide3.svg"} ] controls=<true> indicators=<true> )<line_sep>
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. <import_stmt>torch<import_from_stmt>.stage0 Stage0<import_from_stmt>.stage1 Stage1<import_from_stmt>.stage2 Stage2<import_from_stmt>.stage3 Stage3<class_stmt>GNMTSplit(torch.nn.Module)<block_start><def_stmt>__init__ self<block_start>super(GNMTSplit self).__init__()<line_sep>self.stage0=Stage0()<line_sep>self.stage1=Stage1()<line_sep>self.stage2=Stage2()<line_sep>self.stage3=Stage3()<block_end><def_stmt>forward self input0 input1 input2<block_start>(out0 out2 out1 out3)=self.stage0(input0 input1 input2)<line_sep>(out12 out13 out4 out5 out6)=self.stage1(out0 out2 out1 out3)<line_sep>(out14 out15 out16 out17)=self.stage2(out12 out13 out4 out5 out6)<line_sep>out18=self.stage3(out12 out14 out15 out16 out17)<line_sep><return>out18<block_end><def_stmt>_initialize_weights self<block_start><for_stmt>m self.modules()<block_start><if_stmt>isinstance(m torch.nn.Conv2d)<block_start>torch.nn.init.kaiming_normal_(m.weight mode='fan_out' nonlinearity='relu')<if_stmt>m.bias<is><not><none><block_start>torch.nn.init.constant_(m.bias 0)<block_end><block_end><elif_stmt>isinstance(m torch.nn.BatchNorm2d)<block_start>torch.nn.init.constant_(m.weight 1)<line_sep>torch.nn.init.constant_(m.bias 0)<block_end><elif_stmt>isinstance(m torch.nn.Linear)<block_start>torch.nn.init.normal_(m.weight 0 0.01)<line_sep>torch.nn.init.constant_(m.bias 0)<block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> absolute_import division print_function unicode_literals<import_from_stmt>gratipay.models.package NPM Package<import_from_stmt>gratipay.testing Harness<class_stmt>Tests(Harness)<block_start><def_stmt>setUp self<block_start>self.make_package()<block_end><def_stmt>test_trailing_slash_redirects self<block_start>response=self.client.GxT('/on/npm/foo/')<assert_stmt>response.code<eq>302<assert_stmt>response.headers['Location']<eq>'/on/npm/foo'<block_end><def_stmt>test_anon_gets_signin_page_from_unclaimed self<block_start>body=self.client.GET('/on/npm/foo').body<assert_stmt>'foo</a> npm package on Gratipay:'<in>body<block_end><def_stmt>test_auth_gets_send_confirmation_page_from_unclaimed self<block_start>self.make_participant('bob' claimed_time='now')<line_sep>body=self.client.GET('/on/npm/foo' auth_as='bob').body<assert_stmt>'foo</a> npm package:'<in>body<assert_stmt>'<EMAIL>'<in>body<block_end><def_stmt>test_auth_gets_multiple_options_if_present self<block_start>self.make_package(NPM 'bar' 'Bar' ['<EMAIL>' '<EMAIL>'])<line_sep>self.make_participant('bob' claimed_time='now')<line_sep>body=self.client.GET('/on/npm/bar' auth_as='bob').body<assert_stmt>'<EMAIL>'<in>body<assert_stmt>'<EMAIL>'<in>body<block_end><def_stmt>test_auth_gets_something_if_no_emails self<block_start>self.make_package(NPM 'bar' 'Bar' [])<line_sep>self.make_participant('bob' claimed_time='now')<line_sep>body=self.client.GET('/on/npm/bar' auth_as='bob').body<assert_stmt>"No email addresses on file"<in>body<block_end><def_stmt>claim_package self<block_start>foo=Package.from_names('npm' 'foo')<line_sep>alice=self.make_participant('alice' claimed_time='now')<line_sep>alice.start_email_verification('<EMAIL>' foo)<line_sep>nonce=alice.get_email('<EMAIL>').nonce<line_sep>alice.finish_email_verification('<EMAIL>' nonce)<line_sep>team=alice.get_teams()[0]<assert_stmt>team.package<eq>foo<line_sep><return>team.slug<block_end><def_stmt>test_package_redirects_to_project_if_claimed self<block_start>self.claim_package()<line_sep>response=self.client.GxT('/on/npm/foo')<assert_stmt>response.code<eq>302<assert_stmt>response.headers['Location']<eq>'/foo/'<block_end><def_stmt>test_package_served_as_project_if_claimed self<block_start>self.claim_package()<assert_stmt>'owned by'<in>self.client.GET('/foo/').body<block_end><block_end><class_stmt>Bulk(Harness)<block_start><def_stmt>setUp self<block_start>self.make_package()<block_end><def_stmt>test_anon_gets_payment_flow self<block_start>body=self.client.GET('/on/npm/').body<assert_stmt>'Paste a package.json'<in>body<assert_stmt>'0 out of all 1 npm package'<in>body<block_end><block_end>
# Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for the ParallelInterleaveDataset serialization."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>numpy<as>np<import_from_stmt>tensorflow.python.data.experimental.kernel_tests.serialization dataset_serialization_test_base<import_from_stmt>tensorflow.python.data.experimental.ops interleave_ops<import_from_stmt>tensorflow.python.data.ops dataset_ops<import_from_stmt>tensorflow.python.framework sparse_tensor<import_from_stmt>tensorflow.python.ops sparse_ops<import_from_stmt>tensorflow.python.platform test<class_stmt>ParallelInterleaveDatasetSerializationTest(dataset_serialization_test_base.DatasetSerializationTestBase)<block_start><def_stmt>setUp self<block_start>self.input_values=np.array([4 5 6] dtype=np.int64)<line_sep>self.num_repeats=2<line_sep>self.num_outputs=np.sum(self.input_values)<times>2<block_end><def_stmt>_build_ds self cycle_length block_length sloppy=<false><block_start><return>(dataset_ops.Dataset.from_tensor_slices(self.input_values).repeat(self.num_repeats).apply(interleave_ops.parallel_interleave(<lambda>x:dataset_ops.Dataset.range(10<times>x 11<times>x) cycle_length block_length sloppy)))<block_end><def_stmt>testSerializationCore self# cycle_length > 1, block_length > 1 <block_start>cycle_length=2<line_sep>block_length=3<line_sep>self.run_core_tests(<lambda>:self._build_ds(cycle_length block_length) self.num_outputs)<line_sep># cycle_length = 1 cycle_length=1<line_sep>block_length=3<line_sep>self.run_core_tests(<lambda>:self._build_ds(cycle_length block_length) self.num_outputs)<line_sep># block_length = 1 cycle_length=2<line_sep>block_length=1<line_sep>self.run_core_tests(<lambda>:self._build_ds(cycle_length block_length) self.num_outputs)<block_end><def_stmt>testSerializationWithSloppy self<block_start>break_points=self.gen_break_points(self.num_outputs 10)<line_sep>expected_outputs=np.repeat(np.concatenate([np.arange(10<times>x 11<times>x)<for>x self.input_values]) self.num_repeats).tolist()<def_stmt>run_test cycle_length block_length<block_start>actual=self.gen_outputs(<lambda>:self._build_ds(cycle_length block_length <true>) break_points self.num_outputs)<line_sep>self.assertSequenceEqual(sorted(actual) expected_outputs)<block_end># cycle_length > 1, block_length > 1 run_test(2 3)<line_sep># cycle_length = 1 run_test(1 3)<line_sep># block_length = 1 run_test(2 1)<block_end><def_stmt>testSparseCore self<block_start><def_stmt>_map_fn i<block_start><return>sparse_tensor.SparseTensorValue(indices=[[0 0] [1 1]] values=(i<times>[1 -1]) dense_shape=[2 2])<block_end><def_stmt>_interleave_fn x<block_start><return>dataset_ops.Dataset.from_tensor_slices(sparse_ops.sparse_to_dense(x.indices x.dense_shape x.values))<block_end><def_stmt>_build_dataset <block_start><return>dataset_ops.Dataset.range(10).map(_map_fn).apply(interleave_ops.parallel_interleave(_interleave_fn 1))<block_end>self.run_core_tests(_build_dataset 20)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>test.main()<block_end>
# Copyright 2020 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Simple client to send profiling request to ModelServer."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.profiler profiler_client<def_stmt>main argv<block_start>server=argv[1]<if>len(argv)<g>1<else>'localhost:8500'<line_sep>logdir=argv[2]<if>len(argv)<g>2<else>'/tmp'<line_sep>duration_ms=argv[3]<if>len(argv)<g>3<else>2000<line_sep>profiler_client.trace(server logdir duration_ms)<block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.compat.v1.app.run()<block_end>
<import_from_stmt>dexy.doc Doc<import_from_stmt>tests.utils assert_in_output<import_from_stmt>tests.utils wrap<import_from_stmt>nose.exc SkipTest<def_stmt>test_shint_filter <block_start><with_stmt>wrap()<as>wrapper<block_start>src=""" ### @export "touch" touch newfile.txt ### @export "ls" ls """<line_sep>doc=Doc("example.sh|idio|shint|pyg" wrapper [] contents=src)<line_sep>wrapper.run_docs(doc)<assert_stmt>list(doc.output_data().keys())<eq>['1' 'touch' 'ls']<block_end><block_end>SCALA="""object HelloWorld { def main(args: Array[String]) { println("Hello, world!") } } """<def_stmt>test_scala_repl <block_start><raise>SkipTest()<with_stmt>wrap()<as>wrapper<block_start>doc=Doc("HelloWorld.scala|scalai" wrapper [] contents=SCALA)<line_sep>wrapper.run_docs(doc)<assert_stmt>"defined module HelloWorld"<in>str(doc.output_data())<block_end><block_end>RUST="""fn main() { io::println("hello?"); }"""<def_stmt>test_rust_interactive <block_start><raise>SkipTest("Need to get rust interactive filter working.")<with_stmt>wrap()<as>wrapper<block_start>doc=Doc("example.rs|rusti" wrapper [] contents="1+1")<line_sep>wrapper.run_docs(doc)<assert_stmt>"rusti> 1+1\n2"<in>str(doc.output_data())<block_end><block_end><def_stmt>test_rust <block_start><with_stmt>wrap()<as>wrapper<block_start>doc=Doc("example.rs|rustc" wrapper [] contents=RUST)<line_sep>wrapper.run_docs(doc)<assert_stmt>str(doc.output_data())<eq>"hello?\n"<block_end><block_end>PYTHON_CONTENT=""" x = 6 y = 7 """<def_stmt>test_python_filter_record_vars <block_start><with_stmt>wrap()<as>wrapper<block_start>doc=Doc("example.py|pycon" wrapper [] pycon={'record-vars':<true>} contents=PYTHON_CONTENT)<line_sep>wrapper.run_docs(doc)<assert_stmt>"doc:example.py-vars.json"<in>wrapper.nodes<block_end><block_end><def_stmt>test_matlab_filter <block_start><raise>SkipTest()<line_sep>assert_in_output('matlabint' "fprintf (1, 'Hello, world\\n')\n" "< M A T L A B (R) >")<block_end><def_stmt>test_clj_filter <block_start>assert_in_output('cljint' '1+1' "user=> 1+1")<block_end><def_stmt>test_ksh_filter <block_start>assert_in_output('kshint' 'ls' "example.txt")<block_end><def_stmt>test_php_filter <block_start>assert_in_output('phpint' '1+1' "php > 1+1")<block_end><def_stmt>test_rhino_filter <block_start>assert_in_output('rhinoint' '1+1' "js> 1+1")<block_end><def_stmt>test_irb_filter <block_start>assert_in_output('irb' "puts 'hello'" ">> puts 'hello'")<block_end><def_stmt>test_pycon_filter_single_section <block_start>assert_in_output('pycon' "print 'hello'" ">>> print 'hello'")<block_end><def_stmt>test_ipython_filter <block_start>assert_in_output('ipython' "print 'hello'" ">>> print 'hello'")<block_end><def_stmt>test_r_filter <block_start>assert_in_output('r' '1+1' '> 1+1')<block_end><def_stmt>test_pycon_filter <block_start><with_stmt>wrap()<as>wrapper<block_start>src=""" ### @export "vars" x = 6 y = 7 ### @export "multiply" x*y """<line_sep>node=Doc("example.py|idio|pycon" wrapper [] contents=src)<line_sep>wrapper.run_docs(node)<assert_stmt>list(node.output_data().keys())<eq>['1' 'vars' 'multiply']<assert_stmt>str(node.output_data()['vars'])<eq>""" >>> x = 6 >>> y = 7"""<assert_stmt>str(node.output_data()['multiply'])<eq>""" >>> x*y 42"""<block_end><block_end>
"""make scheduler_params a separate JSONB field Revision ID: f5f55452fa58 Revises: <PASSWORD> Create Date: 2021-09-28 16:48:42.834962 """<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy.dialects postgresql<line_sep># revision identifiers, used by Alembic. revision='f5f55452fa58'<line_sep>down_revision='<PASSWORD>'<line_sep>branch_labels=<none><line_sep>depends_on=<none><def_stmt>upgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.add_column('kpi' sa.Column('scheduler_params' postgresql.JSONB(astext_type=sa.Text()) nullable=<true>))<line_sep># ### end Alembic commands ### <block_end><def_stmt>downgrade # ### commands auto generated by Alembic - please adjust! ### <block_start>op.drop_column('kpi' 'scheduler_params')<line_sep># ### end Alembic commands ### <block_end>
# UK Holidays <import_stmt>pandas<as>pd<import_from_stmt>pandas DateOffset Timestamp<import_from_stmt>pandas.tseries.holiday Holiday MO previous_friday weekend_to_monday<import_from_stmt>pandas_market_calendars.market_calendar MONDAY TUESDAY<line_sep># New Year's Eve LSENewYearsEve=Holiday("New Year's Eve" month=12 day=31 observance=previous_friday )<line_sep># New Year's Day LSENewYearsDay=Holiday("New Year's Day" month=1 day=1 observance=weekend_to_monday )<line_sep># Early May bank holiday has two exceptions based on the 50th and 75th anniversary of VE-Day # 1995-05-01 Early May bank holiday removed for VE-day 50th anniversary # 2020-05-04 Early May bank holiday removed for VE-day 75th anniversary # Early May bank holiday pre-1995 MayBank_pre_1995=Holiday("Early May Bank Holiday" month=5 offset=DateOffset(weekday=MO(1)) day=1 end_date=Timestamp('1994-12-31') )<line_sep># Early May bank holiday post-1995 and pre-2020 MayBank_post_1995_pre_2020=Holiday("Early May Bank Holiday" month=5 offset=DateOffset(weekday=MO(1)) day=1 start_date=Timestamp('1996-01-01') end_date=Timestamp('2019-12-31') )<line_sep># Early May bank holiday post 2020 MayBank_post_2020=Holiday("Early May Bank Holiday" month=5 offset=DateOffset(weekday=MO(1)) day=1 start_date=Timestamp('2021-01-01'))<line_sep># Spring bank holiday has two exceptions based on the Golden & Diamond Jubilee # 2002-05-27 Spring bank holiday removed for Golden Jubilee # 2012-05-28 Spring bank holiday removed for Diamond Jubilee # 2022-05-31 Spring bank holiday removed for Platinum Jubilee # Spring bank holiday SpringBank_pre_2002=Holiday("Spring Bank Holiday" month=5 day=31 offset=DateOffset(weekday=MO(-1)) end_date=Timestamp('2001-12-31') )<line_sep>SpringBank_post_2002_pre_2012=Holiday("Spring Bank Holiday" month=5 day=31 offset=DateOffset(weekday=MO(-1)) start_date=Timestamp('2003-01-01') end_date=Timestamp('2011-12-31') )<line_sep>SpringBank_post_2012_pre_2022=Holiday("Spring Bank Holiday" month=5 day=31 offset=DateOffset(weekday=MO(-1)) start_date=Timestamp('2013-01-01') end_date=Timestamp('2021-12-31') )<line_sep>SpringBank_post_2022=Holiday("Spring Bank Holiday" month=5 day=31 offset=DateOffset(weekday=MO(-1)) start_date=Timestamp('2022-01-01') )<line_sep># Summer bank holiday SummerBank=Holiday("Summer Bank Holiday" month=8 day=31 offset=DateOffset(weekday=MO(-1)) )<line_sep># Christmas Eve ChristmasEve=Holiday('Christmas Eve' month=12 day=24 observance=previous_friday )<line_sep># Christmas Christmas=Holiday("Christmas" month=12 day=25 )<line_sep># If christmas day is Saturday Monday 27th is a holiday # If christmas day is sunday the Tuesday 27th is a holiday WeekendChristmas=Holiday("Weekend Christmas" month=12 day=27 days_of_week=(MONDAY TUESDAY) )<line_sep># Boxing day BoxingDay=Holiday("Boxing Day" month=12 day=26 )<line_sep># If boxing day is saturday then Monday 28th is a holiday # If boxing day is sunday then Tuesday 28th is a holiday WeekendBoxingDay=Holiday("Weekend Boxing Day" month=12 day=28 days_of_week=(MONDAY TUESDAY) )<line_sep># One-off holiday additions and removals in England UniqueCloses=[]<line_sep># VE-Day Anniversary UniqueCloses.append(pd.Timestamp("1995-05-08" tz='UTC'))# 50th Anniversary UniqueCloses.append(pd.Timestamp("2020-05-08" tz='UTC'))# 75th Anniversary # Queen Elizabeth II Jubilees # Silver Jubilee UniqueCloses.append(pd.Timestamp("1977-06-07" tz='UTC'))<line_sep># Golden Jubilee UniqueCloses.append(pd.Timestamp("2002-06-03" tz='UTC'))<line_sep>UniqueCloses.append(pd.Timestamp("2002-06-04" tz='UTC'))<line_sep># Diamond Jubilee UniqueCloses.append(pd.Timestamp("2012-06-04" tz='UTC'))<line_sep>UniqueCloses.append(pd.Timestamp("2012-06-05" tz='UTC'))<line_sep># Platinum Jubilee UniqueCloses.append(pd.Timestamp("2022-06-02" tz='UTC'))<line_sep>UniqueCloses.append(pd.Timestamp("2022-06-03" tz='UTC'))<line_sep># Royal Weddings UniqueCloses.append(pd.Timestamp("1973-11-14" tz='UTC'))# Wedding Day of <NAME> and <NAME> UniqueCloses.append(pd.Timestamp("1981-07-29" tz='UTC'))# Wedding Day of <NAME> and <NAME> UniqueCloses.append(pd.Timestamp("2011-04-29" tz='UTC'))# Wedding Day of <NAME> and <NAME> # Miscellaneous UniqueCloses.append(pd.Timestamp("1999-12-31" tz='UTC'))# Eve of 3rd Millenium A.D.
<import_stmt>logging<import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>xskillscore<as>xs<import_from_stmt>climpred.exceptions CoordinateError<import_from_stmt>climpred.prediction compute_hindcast<def_stmt>test_same_inits_initializations hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime caplog<block_start>"""Tests that inits are identical at all leads for `same_inits` alignment."""<with_stmt>caplog.at_level(logging.INFO)<block_start>compute_hindcast(hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime alignment="same_inits" )<for_stmt>i,record enumerate(caplog.record_tuples)<block_start><if_stmt>i<ge>2<block_start>print(record)<assert_stmt>"inits: 1954-01-01 00:00:00-2007-01-01 00:00:00"<in>record[2]<block_end><block_end><block_end><block_end><def_stmt>test_same_inits_verification_dates hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime caplog<block_start>"""Tests that appropriate verifs are being used at each lead for `same_inits` alignment."""<with_stmt>caplog.at_level(logging.INFO)<block_start>FIRST_INIT,LAST_INIT=1954 2007<line_sep>compute_hindcast(hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime alignment="same_inits" )<line_sep>nleads=hind_ds_initialized_1d_cftime["lead"].size<for_stmt>i,record zip(np.arange(nleads+2) caplog.record_tuples )<block_start><if_stmt>i<ge>2<block_start>print(record)<assert_stmt>(f"verifs: {FIRST_INIT+i}-01-01 00:00:00-{LAST_INIT+i}-01-01"<in>record[2])<block_end><block_end><block_end><block_end>@pytest.mark.parametrize("alignment" ["same_inits" "same_verifs"])<def_stmt>test_disjoint_verif_time small_initialized_da small_verif_da alignment<block_start>"""Tests that alignment works with disjoint time in the verification data, i.e., non-continuous time sampling to verify against."""<line_sep>hind=small_initialized_da<line_sep>verif=small_verif_da.drop_sel(time=1992)<line_sep>actual=compute_hindcast(hind verif alignment=alignment metric="mse")<assert_stmt>actual.notnull().all()<line_sep># hindcast inits: [1990, 1991, 1992, 1993] # verif times: [1990, 1991, 1993, 1994] a=hind.sel(init=[1990 1992 1993]).rename({"init":"time"})<line_sep>b=verif.sel(time=[1991 1993 1994])<line_sep>a["time"]=b["time"]<line_sep>expected=xs.mse(a b "time")<assert_stmt>actual<eq>expected<block_end>@pytest.mark.parametrize("alignment" ["same_inits" "same_verifs"])<def_stmt>test_disjoint_inits small_initialized_da small_verif_da alignment<block_start>"""Tests that alignment works with disjoint inits in the verification data, i.e., non-continuous initializing to verify with."""<line_sep>hind=small_initialized_da.drop_sel(init=1991)<line_sep>verif=small_verif_da<line_sep>actual=compute_hindcast(hind verif alignment=alignment metric="mse")<assert_stmt>actual.notnull().all()<line_sep># hindcast inits: [1990, 1992, 1993] # verif times: [1990, 1991, 1992, 1993, 1994] a=hind.rename({"init":"time"})<line_sep>b=verif.sel(time=[1991 1993 1994])<line_sep>a["time"]=b["time"]<line_sep>expected=xs.mse(a b "time")<assert_stmt>actual<eq>expected<block_end><def_stmt>test_same_verifs_verification_dates hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime caplog<block_start>"""Tests that verifs are identical at all leads for `same_verifs` alignment."""<with_stmt>caplog.at_level(logging.INFO)<block_start>compute_hindcast(hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime alignment="same_verifs" )<for_stmt>i,record enumerate(caplog.record_tuples)<block_start><if_stmt>i<ge>2<block_start>print(record)<assert_stmt>"verifs: 1964-01-01 00:00:00-2017-01-01 00:00:00"<in>record[2]<block_end><block_end><block_end><block_end><def_stmt>test_same_verifs_initializations hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime caplog<block_start>"""Tests that appropriate verifs are being used at each lead for `same_inits` alignment."""<with_stmt>caplog.at_level(logging.INFO)<block_start>FIRST_INIT,LAST_INIT=1964 2017<line_sep>compute_hindcast(hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime alignment="same_verifs" )<line_sep>nleads=hind_ds_initialized_1d_cftime["lead"].size<for_stmt>i,record zip(np.arange(nleads+2) caplog.record_tuples )<block_start><if_stmt>i<ge>2<block_start>print(record)<assert_stmt>(f"inits: {FIRST_INIT-i}-01-01 00:00:00-{LAST_INIT-i}-01-01 00:00:00"<in>record[2])<block_end><block_end><block_end><block_end><def_stmt>test_same_verifs_raises_error_when_not_possible hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime<block_start>"""Tests that appropriate error is raised when a common set of verification dates cannot be found with the supplied initializations."""<line_sep>hind=hind_ds_initialized_1d_cftime.isel(lead=slice(0 3) init=[1 3 5 7 9])<with_stmt>pytest.raises(CoordinateError)<block_start>compute_hindcast(hind reconstruction_ds_1d_cftime alignment="same_verifs")<block_end><block_end><def_stmt>test_maximize_alignment_inits hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime caplog<block_start>"""Tests that appropriate inits are selected for `maximize` alignment."""<with_stmt>caplog.at_level(logging.INFO)<block_start>compute_hindcast(hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime alignment="maximize" )<line_sep># Add dummy values for the first two lines since they are just metadata. <for_stmt>i,record zip(np.concatenate(([0 0] hind_ds_initialized_1d_cftime.lead.values)) caplog.record_tuples )<block_start><if_stmt>i<ge>1<block_start>print(record)<assert_stmt>(f"inits: 1954-01-01 00:00:00-{2016-i}-01-01 00:00:00"<in>record[2])<block_end><block_end><block_end><block_end><def_stmt>test_maximize_alignment_verifs hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime caplog<block_start>"""Tests that appropriate verifs are selected for `maximize` alignment."""<with_stmt>caplog.at_level(logging.INFO)<block_start>compute_hindcast(hind_ds_initialized_1d_cftime reconstruction_ds_1d_cftime alignment="maximize" )<line_sep># Add dummy values for the first two lines since they are just metadata. <for_stmt>i,record zip(np.concatenate(([0 0] hind_ds_initialized_1d_cftime.lead.values)) caplog.record_tuples )<block_start><if_stmt>i<ge>1<block_start>print(record)<assert_stmt>(f"verifs: {1955+i}-01-01 00:00:00-2017-01-01 00:00:00"<in>record[2])<block_end><block_end><block_end><block_end>
''' Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: MIT-0 '''<import_stmt>json<import_stmt>urllib.request<import_stmt>os<import_stmt>time<import_from_stmt>neptune_python_utils.endpoints Endpoints<class_stmt>BulkLoad<block_start><def_stmt>__init__ self source format='csv' role=<none> region=<none> endpoints=<none><block_start>self.source=source<line_sep>self.format=format<if_stmt>role<is><none><block_start><assert_stmt>('NEPTUNE_LOAD_FROM_S3_ROLE_ARN'<in>os.environ) 'role is missing.'<line_sep>self.role=os.environ['NEPTUNE_LOAD_FROM_S3_ROLE_ARN']<block_end><else_stmt><block_start>self.role=role<block_end><if_stmt>region<is><none><block_start><assert_stmt>('AWS_REGION'<in>os.environ) 'region is missing.'<line_sep>self.region=os.environ['AWS_REGION']<block_end><else_stmt><block_start>self.region=region<block_end><if_stmt>endpoints<is><none><block_start>self.endpoints=Endpoints()<block_end><else_stmt><block_start>self.endpoints=endpoints<block_end><block_end><def_stmt>__load_from self source format role region<block_start><return>{'source':source 'format':format 'iamRoleArn':role 'region':region 'failOnError':'FALSE'}<block_end><def_stmt>__load self loader_url data<block_start>jsondataasbytes=json.dumps(data).encode('utf8')<line_sep>req=urllib.request.Request(loader_url data=jsondataasbytes headers={'Content-Type':'application/json'})<line_sep>response=urllib.request.urlopen(req)<line_sep>jsonresponse=json.loads(response.read().decode('utf8'))<line_sep><return>jsonresponse['payload']['loadId']<block_end><def_stmt>load_async self<block_start>localised_source=self.source.replace('${AWS_REGION}' self.region)<line_sep>loader_url=self.endpoints.loader_endpoint()<line_sep>json_payload=self.__load_from(localised_source self.format self.role self.region)<line_sep>print('''curl -X POST \\ -H 'Content-Type: application/json' \\ {} -d \'{}\''''.format(loader_url json.dumps(json_payload indent=4)))<line_sep>load_id=self.__load(loader_url json_payload)<line_sep><return>BulkLoadStatus(self.endpoints.load_status_endpoint(load_id))<block_end><def_stmt>load self interval=2<block_start>status=self.load_async()<line_sep>print('status_uri: {}'.format(status.uri()))<line_sep>status.wait(interval)<block_end><block_end><class_stmt>BulkLoadStatus<block_start><def_stmt>__init__ self status_uri<block_start>self.status_uri=status_uri<block_end><def_stmt>status self<block_start>req=urllib.request.Request(self.status_uri)<line_sep>response=urllib.request.urlopen(req)<line_sep>jsonresponse=json.loads(response.read().decode('utf8'))<line_sep>status=jsonresponse['payload']['overallStatus']['status']<line_sep><return>(status jsonresponse)<block_end><def_stmt>uri self<block_start><return>self.status_uri<block_end><def_stmt>wait self interval=2<block_start><while_stmt><true><block_start>status,jsonresponse=self.status()<if_stmt>status<eq>'LOAD_COMPLETED'<block_start>print('load completed')<line_sep><break><block_end><if_stmt>status<eq>'LOAD_IN_PROGRESS'<block_start>print('loading... {} records inserted'.format(jsonresponse['payload']['overallStatus']['totalRecords']))<line_sep>time.sleep(interval)<block_end><else_stmt><block_start><raise>Exception(jsonresponse)<block_end><block_end><block_end><block_end>
<import_from_stmt>amuse.support.core late<import_from_stmt>amuse.support exceptions<import_stmt>numpy<import_from_stmt>amuse.support.core memoize<import_from_stmt>amuse.support.core MultitonMetaClass<class_stmt>system(object)<block_start>ALL={}<def_stmt>__init__ self name<block_start>self.name=name<line_sep>self.bases=[]<line_sep>self.mapping_from_base_name_to_base={}<line_sep>self.ALL[self.name]=self<line_sep>self.index=len(self.ALL)<block_end><def_stmt>reference_string self<block_start><return>"{0}.get({1!r})".format('system' self.name)<block_end><def_stmt>add_base self unit<block_start>unit.system=self<line_sep>unit.index=len(self.bases)<line_sep>self.bases.append(unit)<line_sep>self.mapping_from_base_name_to_base[unit.quantity]=unit<block_end><def_stmt>base self name<block_start><return>self.mapping_from_base_name_to_base[name]<block_end>@classmethod<def_stmt>get cls name<block_start><try_stmt><block_start><return>cls.ALL[name]<block_end><except_stmt>KeyError<as>ex<block_start><import_from_stmt>amuse.units nbody_system<import_from_stmt>amuse.units si<line_sep><return>cls.ALL[name]<block_end><block_end><def_stmt>__reduce__ self<block_start><return>(get_system_with_name (self.name ))<block_end><def_stmt>__str__ self<block_start><return>self.name<block_end><block_end><class_stmt>unit(object)<block_start>""" Abstract base class for unit objects. Two classes of units are defined: base units The base units in a given system of units. For SI, these are meter, kilogram, second, ampere, kelvin, mole and candele. See the si module :mod:`amuse.units.si` derived units Derived units are created by dividing or multiplying with a number or with another unit. For example, to get a velocity unit we can devine vel = 1000 * m / s Units can also be named, by creating a named unit. """<line_sep>__array_priority__=100<def_stmt>__mul__ self other<block_start><if_stmt>isinstance(other unit)<block_start><return>mul_unit(self other)<block_end><else_stmt><block_start><return>other<times>self<block_end><block_end># return factor_unit(other, self) <def_stmt>__truediv__ self other<block_start><if_stmt>isinstance(other unit)<block_start><return>div_unit(self other)<block_end><else_stmt><block_start><return>(1.0/other)<times>self<block_end><block_end># return factor_unit(1.0 / other, self) <def_stmt>__rmul__ self other<block_start><if_stmt>other<eq>1<block_start><return>self<block_end><else_stmt><block_start><return>factor_unit(other self)<block_end><block_end><def_stmt>__ror__ self value<block_start>"""Create a new Quantity object. :argument value: numeric value of the quantity, can be a number or a sequence (list or ndarray) :returns: new ScalarQuantity or VectorQuantity object with this unit Examples >>> from amuse.units import units >>> 100 | units.kg quantity<100 kg> """<line_sep><return>self.new_quantity(value)<block_end><def_stmt>__rtruediv__ self other<block_start><return>factor_unit(other pow_unit(-1 self))<block_end><def_stmt>__div__ self other<block_start><return>self.__truediv__(other)<block_end><def_stmt>__rdiv__ self other<block_start><return>self.__rtruediv__(other)<block_end><def_stmt>__pow__ self other<block_start><if_stmt>other<eq>1<block_start><return>self<block_end><else_stmt><block_start><return>pow_unit(other self)<block_end><block_end><def_stmt>__call__ self x<block_start><return>self.new_quantity(x)<block_end><def_stmt>__eq__ self other<block_start><if_stmt>self<is>other<block_start><return><true><block_end><elif_stmt>isinstance(other unit)<block_start><return>self.base<eq>other.base<and>self.factor<eq>other.factor<block_end><else_stmt><block_start><return><false><block_end><block_end><def_stmt>__ne__ self other<block_start><if_stmt>isinstance(other unit)<block_start><if_stmt>(isinstance(self base_unit)<and>isinstance(other base_unit))<or>isinstance(self nonnumeric_unit)<or>isinstance(other nonnumeric_unit)<block_start><return>NotImplemented<block_end><return>self.base<ne>other.base<and>self.factor<ne>other.factor<block_end><else_stmt><block_start><return><true><block_end><block_end><def_stmt>__hash__ self<block_start><return>self._hash<block_end>@late<def_stmt>_hash self<block_start><return>hash(id(self))<block_end>@property<def_stmt>dtype self<block_start><return><none><block_end>@property<def_stmt>number self<block_start><return>1.0<block_end>@property<def_stmt>unit self<block_start><return>self<block_end><def_stmt>is_zero self<block_start><return><false><block_end><def_stmt>iskey self<block_start><return><false><block_end><def_stmt>new_quantity self value<block_start>"""Create a new Quantity object. :argument value: numeric value of the quantity, can be a number or a sequence (list or ndarray) :returns: new ScalarQuantity or VectorQuantity object with this unit """<import_from_stmt>amuse.units quantities<line_sep><return>quantities.new_quantity(value self)<block_end><def_stmt>to_simple_form self<block_start>"""Convert unit to a form with only one factor and powers :result: Unit with only a factor and power terms >>> from amuse.units import units >>> N = (units.m * units.kg) / (units.s * units.s) >>> N unit<m * kg / (s * s)> >>> J = N * units.m >>> J unit<m * kg / (s * s) * m> >>> J.to_simple_form() unit<m**2 * kg * s**-2> """<if_stmt><not>self.base<block_start><return>none_unit('none' 'none')<times>self.factor<block_end>result=self.factor<for_stmt>n,base self.base<block_start><if_stmt>n<eq>1<block_start><if_stmt>result<eq>1<block_start>result=base<block_end><else_stmt><block_start>result=result<times>base<block_end><block_end><else_stmt><block_start>result=result<times>(base<power>n)<block_end><block_end><return>result<block_end><def_stmt>to_reduced_form self<block_start>"""Convert unit to a reduced (simpler) form """<if_stmt><not>self.base<block_start><return>none_unit('none' 'none')<times>self.factor<block_end>total_factor=1<line_sep>combined_unit=<none><for_stmt>factor,power,unit self.get_parts_with_power()<block_start>total_factor<augmul>factor<if_stmt>power<eq>0<block_start><pass><block_end><else_stmt><block_start><if_stmt>combined_unit<is><none><block_start>combined_unit=unit<power>power<block_end><else_stmt><block_start>combined_unit=combined_unit<times>(unit<power>power)<block_end><block_end><block_end><if_stmt>total_factor<eq>1<block_start><return>combined_unit<block_end><else_stmt><block_start><return>factor_unit(total_factor combined_unit)<block_end><block_end><def_stmt>to_factor_and_reduced_form self<block_start>"""Convert unit to a reduced (simpler) form """<if_stmt><not>self.base<block_start><return>none_unit('none' 'none')<times>self.factor<block_end>total_factor=1<line_sep>combined_unit=<none><for_stmt>factor,power,unit self.get_parts_with_power()<block_start>total_factor<augmul>factor<if_stmt>power<eq>0<block_start><pass><block_end><else_stmt><block_start><if_stmt>combined_unit<is><none><block_start>combined_unit=unit<power>power<block_end><else_stmt><block_start>combined_unit=combined_unit<times>(unit<power>power)<block_end><block_end><block_end><return>total_factor combined_unit<block_end><def_stmt>are_bases_equal self other<block_start><if_stmt>len(self.base)<ne>len(other.base)<block_start><return><false><block_end><for_stmt>n1,unit1 sorted(self.base key=<lambda>x:x[1].index)<block_start>found=<false><for_stmt>n2,unit2 other.base<block_start><if_stmt>unit1<eq>unit2<block_start><if_stmt><not>n2<eq>n1<block_start><return><false><block_end>found=<true><line_sep><break><block_end><block_end><if_stmt><not>found<block_start><return><false><block_end><block_end><return><true><block_end><def_stmt>_compare_bases self other eps=<none><block_start><if_stmt>len(self.base)<ne>len(other.base)<block_start><return><false><block_end><if_stmt>eps<is><none><block_start>eps=numpy.finfo(numpy.double).eps<block_end><for_stmt>(n1 unit1),(n2 unit2) zip(self.base other.base)<block_start><if_stmt><not>unit1<eq>unit2<block_start><return><false><block_end><if_stmt>n1<eq>n2<block_start><continue><block_end><else_stmt><block_start><if_stmt>abs(n1-n2)<l>eps<block_start><continue><block_end><if_stmt>abs(n2)<g>abs(n1)<block_start>relativeError=abs((n1-n2)<times>1.0/n2)<block_end><else_stmt><block_start>relativeError=abs((n1-n2)<times>1.0/n1)<block_end><if_stmt>relativeError<le>eps<block_start><continue><block_end><else_stmt><block_start><return><false><block_end><block_end><block_end><return><true><block_end>@memoize<def_stmt>conversion_factor_from self x<block_start><if_stmt>x.base<is><none><block_start><return>self.factor<times>1.0<block_end><elif_stmt>self._compare_bases(x)<block_start>this_factor=self.factor<times>1.0<line_sep>other_factor=x.factor<line_sep><return>1<times>(this_factor<eq>other_factor)<or>this_factor/other_factor<block_end><else_stmt><block_start><raise>IncompatibleUnitsException(x self)<block_end><block_end><def_stmt>in_ self x<block_start>"""Express this quantity in the given unit :argument unit: The unit to express this quantity in :result: A Quantity object Examples >>> from amuse.units import units >>> l = 1 | units.AU >>> l.in_(units.km) quantity<149597870.691 km> """<line_sep><return>self.as_quantity_in(x)<block_end><def_stmt>as_quantity_in self unit<block_start>"""Express this unit as a quantity in the given unit :argument unit: The unit to express this unit in :result: A Quantity object Examples >>> from amuse.units import units >>> ton = 1000 * units.kg >>> ton.as_quantity_in(units.kg) quantity<1000.0 kg> """<import_from_stmt>amuse.units quantities<if_stmt>isinstance(unit quantities.Quantity)<block_start><raise>exceptions.AmuseException("Cannot expres a unit in a quantity")<block_end>factor=self.conversion_factor_from(unit)<line_sep><return>quantities.new_quantity(factor unit)<block_end><def_stmt>value_in self unit<block_start>""" Return a numeric value of this unit in the given unit. Works only when the units are compatible, i.e. from tonnage to kg's. A number is returned without any unit information. :argument unit: wanted unit of the value :returns: number in the given unit >>> from amuse.units import units >>> x = units.km >>> x.value_in(units.m) 1000.0 """<line_sep><return>self.conversion_factor_from(unit)<block_end><def_stmt>__repr__ self<block_start><return>'unit<'+str(self)+'>'<block_end><def_stmt>combine_bases self base1 base2<block_start>indexed1=[<none>]<times>7<for_stmt>n1,unit1 base1<block_start>indexed1[unit1.index]=(n1 unit1)<block_end>indexed2=[<none>]<times>7<for_stmt>n2,unit2 base2<block_start>indexed2[unit2.index]=(n2 unit2)<block_end>result=[]<for_stmt>sub1,sub2 zip(indexed1 indexed2)<block_start><if_stmt><not>sub1<is><none><block_start><if_stmt><not>sub2<is><none><block_start><if_stmt>sub1[1]<eq>sub2[1]<block_start>result.append((sub1[0] sub2[0] sub1[1]))<block_end><else_stmt><block_start><raise>exceptions.AmuseException("Cannot combine units from "<concat>"different systems: {0} and {1}".format(sub1[1] sub2[1]))<block_end><block_end><else_stmt><block_start>result.append((sub1[0] 0 sub1[1]))<block_end><block_end><elif_stmt><not>sub2<is><none><block_start>result.append((0 sub2[0] sub2[1]))<block_end><block_end><return>result<block_end><def_stmt>has_same_base_as self other<block_start>"""Determine if the base of other is the same as the base of self. :argument other: unit to compare base to :result: True, if bases are compatiple. >>> from amuse.units import units >>> mps = units.m / units.s >>> kph = units.km / units.hour >>> mps.has_same_base_as(kph) True >>> mps.has_same_base_as(units.km) False """<line_sep><return>other.base<eq>self.base<block_end><def_stmt>base_unit self<block_start><if_stmt><not>self.base<block_start><return>none_unit('none' 'none')<block_end>unit=1<for_stmt>n,base self.base<block_start><if_stmt>n<eq>1<block_start>unit=unit<times>base<block_end><else_stmt><block_start>unit=unit<times>(base<power>n)<block_end><block_end><return>unit<block_end><def_stmt>is_non_numeric self<block_start><return><false><block_end><def_stmt>is_generic self<block_start><return><false><block_end><def_stmt>is_none self<block_start><return><false><block_end><def_stmt>get_parts_with_power self<block_start>""" The parts of this units as a list of tuple with factor, power and unit """<line_sep><return>((1.0 1 self) )<block_end><def_stmt>convert_result_value self method definition value<block_start><return>self.new_quantity(value)<block_end><def_stmt>convert_argument_value self method definition value<block_start><return>value.value_in(self)<block_end><def_stmt>append_result_value self method definition value result<block_start>result.append(self.convert_result_value(method definition value))<block_end><def_stmt>to_array_of_floats self<block_start>"""Represent a unit as an array of 8 64-bit floats. First float represents the factor, the other 7 the power of each base unit. Cannot be used for non numeric units """<line_sep>result=numpy.zeros(9 dtype=numpy.float64)<if_stmt><not>self.base<block_start><return>result<block_end>result[0]=self.factor<for_stmt>n,base self.base<block_start>result[base.index+2]=n<line_sep>result[1]=base.system.index<block_end><return>result<block_end><def_stmt>describe_array_of_floats self<block_start>"""Create a human readable description of the array of floats """<if_stmt><not>self.base<block_start><return>'not a numerical unit'<block_end>parts=['factor']<line_sep>parts.extend(['-']<times>8)<for_stmt>n,base self.base<block_start><if_stmt>n<ne>0<block_start>parts[base.index+2]=str(base)<block_end><else_stmt><block_start>parts[base.index+2]='-'<block_end>parts[1]=str(base.system)<block_end><return>', '.join(parts)<block_end>@property<def_stmt>base_system self<block_start>base=self.base<line_sep>system=self.base[0][1].system<for_stmt>b base<block_start><if_stmt>system<ne>b[1].system<block_start><raise>Exception("inconsistent unit found")<block_end><block_end><return>self.base[0][1].system<block_end><block_end><class_stmt>base_unit(unit)<block_start>""" base_unit objects are orthogonal, indivisable units of a sytem of units. A system of units contains a set of base units :argument quantity: name of the base quantity, for example *length* :argument name: name of the unit, for example *meter* :argument symbol: symbol of the unit, for example *m* :argument system: system of units object >>> cgs = system("cgs") >>> cm = base_unit("length", "centimetre", "cm", cgs) >>> cm unit<cm> """<def_stmt>__init__ self quantity name symbol system<block_start>self.quantity=quantity<line_sep>self.name=name<line_sep>self.symbol=symbol<line_sep>self.system=system<line_sep>system.add_base(self)<block_end><def_stmt>__str__ self<block_start><return>self.symbol<block_end><def_stmt>__hash__ self<block_start><return>self._hash<block_end>@property<def_stmt>factor self<block_start>""" The multiplication factor of a unit. For example, factor is 1000 for km. """<line_sep><return>1<block_end>@late<def_stmt>base self<block_start>""" The base represented as a list of tuples. Each tuple consists of an power and a unit. """<line_sep><return>((1 self) )<block_end><def_stmt>reference_string self<block_start><return>'{0}.base({1!r})'.format(self.system.reference_string() self.quantity)<block_end><def_stmt>__reduce__ self<block_start><return>(get_base_unit_with_name (self.system self.quantity ))<block_end><def_stmt>__eq__ self other<block_start><if_stmt>self<is>other<block_start><return><true><block_end><elif_stmt>isinstance(other base_unit)<block_start><return>NotImplemented<block_end><else_stmt><block_start><return><false><block_end><block_end><block_end><class_stmt>no_system(object)<block_start>ALL={}<line_sep>@classmethod<def_stmt>set cls unit<block_start>cls.ALL[unit.name]=unit<block_end>@classmethod<def_stmt>get cls name<block_start><return>cls.ALL[name]<block_end><block_end><class_stmt>none_unit(unit metaclass=MultitonMetaClass)<block_start><def_stmt>__init__ self name symbol<block_start>self.name=name<line_sep>self.symbol=symbol<line_sep>no_system.set(self)<block_end><def_stmt>__str__ self<block_start><return>self.symbol<block_end><def_stmt>reference_string self<block_start><return>'no_system.get({0!r})'.format(self.name)<block_end>@late<def_stmt>factor self<block_start><return>1<block_end>@late<def_stmt>base self<block_start><return>()<block_end><def_stmt>get_parts_with_power self<block_start><return>()<block_end><def_stmt>is_none self<block_start><return><true><block_end><block_end><class_stmt>zero_unit(none_unit)<block_start><def_stmt>__init__ self<block_start>none_unit.__init__(self 'zero' 'zero')<block_end><def_stmt>__str__ self<block_start><return>self.symbol<block_end><def_stmt>is_zero self<block_start><return><true><block_end>@late<def_stmt>base self<block_start><return><none><block_end><def_stmt>get_parts_with_power self<block_start><return><none><block_end><def_stmt>conversion_factor_from self x<block_start><if_stmt>x.base<is><none><block_start><return>1.0<block_end><else_stmt><block_start><return>x.factor<block_end><block_end><block_end><class_stmt>key_unit(none_unit)<block_start><def_stmt>iskey self<block_start><return><true><block_end>@property<def_stmt>dtype self<block_start><return>numpy.dtype('uint64')<block_end><block_end><class_stmt>nonnumeric_unit(unit)<block_start>""" nonnumeric_unit objects are indivisable units not connected to any system of units. nonnumeric_units cannot be used to derive new units from. nonnumeric_units have no physical meaning. """<def_stmt>__init__ self name symbol<block_start>self.name=name<line_sep>self.symbol=symbol<line_sep>no_system.set(self)<block_end><def_stmt>__str__ self<block_start><return>self.symbol<block_end><def_stmt>reference_string self<block_start><return>'no_system.get({0!r})'.format(self.name)<block_end><def_stmt>__mul__ self other<block_start><if_stmt>other<eq>1<block_start><return>self<block_end><raise>exceptions.AmuseException("Cannot derive other units from a non numeric unit")<block_end><def_stmt>__truediv__ self other<block_start><raise>exceptions.AmuseException("Cannot derive other units from a non numeric unit")<block_end><def_stmt>__rmul__ self other<block_start><if_stmt>other<eq>1<block_start><return>self<block_end><raise>exceptions.AmuseException("Cannot derive other units from a non numeric unit")<block_end><def_stmt>__rtruediv__ self other<block_start><if_stmt>other<eq>1<block_start><return>self<block_end><raise>exceptions.AmuseException("Cannot derive other units from a non numeric unit")<block_end><def_stmt>__pow__ self other<block_start><if_stmt>other<eq>1<block_start><return>self<block_end><raise>exceptions.AmuseException("Cannot derive other units from a non numeric unit")<block_end><def_stmt>__div__ self other<block_start><return>self.__truediv__(other)<block_end><def_stmt>__rdiv__ self other<block_start><return>self.__rtruediv__(other)<block_end><def_stmt>is_non_numeric self<block_start><return><true><block_end>@property<def_stmt>factor self<block_start><return>1<block_end>@property<def_stmt>base self<block_start><return>((1 self) )<block_end><def_stmt>value_to_string self value<block_start><return><none><block_end><def_stmt>is_valid_value self value<block_start><return><false><block_end><def_stmt>__eq__ self other<block_start><if_stmt>self<is>other<block_start><return><true><block_end><elif_stmt>isinstance(other nonnumeric_unit)<block_start><return>NotImplemented<block_end><else_stmt><block_start><return><false><block_end><block_end><block_end><class_stmt>string_unit(nonnumeric_unit)<block_start>""" String unit objects define quantities with a string value. These have no physical meaning, but are needed for some legacy codes. For example the path of a file. """<def_stmt>__init__ self name symbol<block_start>nonnumeric_unit.__init__(self name symbol)<block_end><def_stmt>value_to_string self value<block_start><return>''<if>value<is><none><else>value<block_end><def_stmt>is_valid_value self value<block_start><return>value<is><none><or>isinstance(value str)<block_end>@property<def_stmt>dtype self<block_start><return>numpy.dtype('S256')<block_end><block_end><class_stmt>enumeration_unit(nonnumeric_unit)<block_start>DEFINED={}<line_sep>""" Enumeration unit objects define a fixed set of quantities. A quantity with a enumeration_unit can only have a value defined in the set of values of the enumeration_unit. :argument possible_values: A sequence or iterable with all the possible values. If None the possible values are integers ranging from 0 to the length of the names_for_values argument :argument names_for_values: A sequence of strings defining a display name for each value. If None the names are the string vales of the values in the possible_values arguments Examples >>> my_unit = enumeration_unit('my_unit','my_unit', [1,2,5], ["star","gas","planet"]) >>> 2 | my_unit quantity<2 - gas> >>> list(my_unit.quantities()) [quantity<1 - star>, quantity<2 - gas>, quantity<5 - planet>] >>> 3 | my_unit Traceback (most recent call last): ... AmuseException: <3> is not a valid value for unit<my_unit> Or, with default values: >>> my_unit = enumeration_unit('my_unit','my_unit', None, ["star","gas","planet"]) >>> 2 | my_unit quantity<2 - planet> >>> list(my_unit.quantities()) [quantity<0 - star>, quantity<1 - gas>, quantity<2 - planet>] """<def_stmt>__init__ self name symbol possible_values=<none> names_for_values=<none><block_start>nonnumeric_unit.__init__(self name symbol)<line_sep>self.possible_values=self._initial_list_of_possible_values(possible_values names_for_values)<line_sep>self.names_for_values=self._initial_names_for_values(possible_values names_for_values)<if_stmt><not>len(self.possible_values)<eq>len(self.names_for_values)<block_start><raise>exceptions.AmuseException("Must provide equal lenght list for values({0}) and names({1})".format(len(self.possible_values) len(self.names_for_values)))<block_end>self.mapping_from_values_to_names=self._initial_mapping_from_values_to_names()<line_sep>self.DEFINED[name]=self<block_end><def_stmt>_initial_list_of_possible_values self possible_values names_for_values<block_start><if_stmt>possible_values<is><none><block_start><if_stmt>names_for_values<is><none><block_start><raise>exceptions.AmuseException("Must provide a list of values and / or a list of names for each value")<block_end><return>list(range(len(names_for_values)))<block_end><else_stmt><block_start><return>list(possible_values)<block_end><block_end><def_stmt>_initial_mapping_from_values_to_names self<block_start>result={}<for_stmt>value,name zip(self.possible_values self.names_for_values)<block_start>result[value]=name<block_end><return>result<block_end><def_stmt>_initial_names_for_values self possible_values names_for_values<block_start><if_stmt>names_for_values<is><none><block_start><if_stmt>possible_values<is><none><block_start><raise>exceptions.AmuseException("Must provide a list of values and / or a list of names for each value")<block_end><return>[str(x)<for>x possible_values]<block_end><else_stmt><block_start><return>list(names_for_values)<block_end><block_end><def_stmt>__hash__ self<block_start><return>self._hash<block_end><def_stmt>is_valid_value self value<block_start><return>value<in>self.mapping_from_values_to_names<block_end><def_stmt>value_to_string self value<block_start><return>self.mapping_from_values_to_names[value]<block_end><def_stmt>quantities self<block_start><for_stmt>x self.possible_values<block_start><yield>x|self<block_end><block_end><def_stmt>__call__ self string<block_start>index=self.names_for_values.index(string)<if_stmt>index<g>0<block_start><return>self.possible_values[index]|self<block_end><else_stmt><block_start><raise>exceptions.AmuseException("{0} is not a valid name for {1} enumeration type".format(string self.name))<block_end><block_end>@property<def_stmt>dtype self<block_start><return>numpy.dtype('int32')<block_end>@classmethod<def_stmt>get cls name<block_start><try_stmt><block_start><return>cls.DEFINED[name]<block_end><except_stmt>KeyError<as>ex<block_start><import_from_stmt>amuse.units nbody_system<import_from_stmt>amuse.units si<line_sep><return>cls.DEFINED[name]<block_end><block_end><def_stmt>__reduce__ self<block_start><return>(get_enumeration_unit_with_name (self.name ))<block_end><block_end><class_stmt>named_unit(unit)<block_start>""" A named_unit object defines an alias for another unit. When printing a named_unit, the symbol is shown and not the unit parts. For all other operations the named_units works exactly like the aliased unit. :argument name: Long name or description of the unit :argument symbol: Short name to show when printing units or quantities :argument unit: The unit to alias >>> from amuse.units import si >>> 60.0 * si.s unit<60.0 * s> >>> minute = named_unit("minute","min", 60*si.s) >>> minute unit<min> >>> (20.0 | (60.0 * si.s)).as_quantity_in(minute) quantity<20.0 min> """<def_stmt>__init__ self name symbol unit<block_start>self.name=name<line_sep>self.symbol=symbol<line_sep>self.local_unit=unit<block_end><def_stmt>__str__ self<block_start><return>self.symbol<block_end><def_stmt>reference_string self<block_start><return>self.to_simple_form().reference_string()<block_end>@late<def_stmt>factor self<block_start><return>self.local_unit.factor<block_end>@late<def_stmt>base self<block_start><return>self.local_unit.base<block_end><def_stmt>is_none self<block_start><return>self.local_unit.is_none()<block_end><block_end><class_stmt>derived_unit(unit metaclass=MultitonMetaClass)<block_start>""" Abstract base class of derived units. New units can be derived from base_units. Each operation on a unit creates a new derived_unit. """<line_sep><pass><block_end><class_stmt>factor_unit(derived_unit)<block_start>""" A factor_unit object defines a unit multiplied by a number. Do not call this method directly, factor_unit objects are supposed to be created by multiplying a number with a unit. :argument unit: The unit to derive from. :argument factor: The multiplication factor. >>> from amuse.units import si >>> minute = 60.0 * si.s >>> minute.as_quantity_in(si.s) quantity<60.0 s> >>> hour = 60.0 * minute >>> hour unit<60.0 * 60.0 * s> >>> hour.as_quantity_in(si.s) quantity<3600.0 s> """<def_stmt>__init__ self factor unit name=<none> symbol=<none><block_start>self.name=name<line_sep>self.symbol=symbol<line_sep>self.local_factor=factor<line_sep>self.local_unit=unit<block_end><def_stmt>__str__ self<block_start><if_stmt>self.symbol<is><none><block_start><return>str(self.local_factor)+' * '+str(self.local_unit)<block_end><return>self.symbol+str(self.local_unit)<block_end><def_stmt>reference_string self<block_start><return>'('+str(self.local_factor)+' * '+self.local_unit.reference_string()+')'<block_end>@late<def_stmt>factor self<block_start><return>self.local_factor<times>self.local_unit.factor<block_end>@late<def_stmt>base self<block_start><return>self.local_unit.base<block_end><def_stmt>get_parts_with_power self<block_start>local_unit_parts=self.local_unit.get_parts_with_power()<line_sep>result=[]<line_sep>is_first=<true><for_stmt>factor,power,unit local_unit_parts<block_start><if_stmt>is_first<block_start>factor<augmul>self.local_factor<line_sep>is_first=<false><block_end>result.append((factor power unit))<block_end><return>result<block_end><block_end><class_stmt>mul_unit(derived_unit)<block_start>""" A mul_unit object defines a unit multiplied by another unit. Do not call this method directly, mul_unit objects are supposed to be created by multiplying units. :argument left_hand: Left hand side of the multiplication. :argument right_hand: Right hand side of the multiplication. >>> from amuse.units import si >>> area = si.m * si.m >>> area unit<m * m> >>> hectare = (100 * si.m) * (100 * si.m) >>> hectare.as_quantity_in(area) quantity<10000.0 m * m> """<def_stmt>__init__ self left_hand right_hand<block_start>self.left_hand=left_hand<line_sep>self.right_hand=right_hand<block_end><def_stmt>__str__ self<block_start><return>str(self.left_hand)+' * '+str(self.right_hand)<block_end><def_stmt>reference_string self<block_start><return>'('+self.left_hand.reference_string()+' * '+self.right_hand.reference_string()+')'<block_end>@late<def_stmt>factor self<block_start><return>self.left_hand.factor<times>self.right_hand.factor<block_end>@late<def_stmt>base self<block_start><return>tuple([x<for>x [(x[0]+x[1] x[2])<for>x self.combine_bases(self.left_hand.base self.right_hand.base)]<if>x[0]<ne>0])<block_end><def_stmt>get_parts_with_power self<block_start>lhs_parts=list(self.left_hand.get_parts_with_power())<line_sep>rhs_parts=list(self.right_hand.get_parts_with_power())<line_sep>result=[]<for_stmt>lhs_factor,lhs_power,lhs_unit lhs_parts<block_start>rhs_index=0<line_sep>found_match=<false><for_stmt>rhs_factor,rhs_power,rhs_unit rhs_parts<block_start><if_stmt>lhs_unit<is>rhs_unit<block_start>result.append((lhs_factor<times>rhs_factor lhs_power+rhs_power lhs_unit ))<line_sep>found_match=<true><del_stmt>rhs_parts[rhs_index]<line_sep><break><block_end>rhs_index<augadd>1<block_end><if_stmt><not>found_match<block_start>result.append((lhs_factor lhs_power lhs_unit ))<block_end><block_end><for_stmt>rhs_factor,rhs_power,rhs_unit rhs_parts<block_start>result.append((rhs_factor rhs_power rhs_unit ))<block_end><return>result<block_end><block_end><class_stmt>pow_unit(derived_unit)<block_start>""" A pow_unit object defines a unit as another unit to a specified power. Do not call this method directly, pow_unit objects are supposed to be created by taking powers of units. :argument power: Power of the unit :argument unit: The unit to derive from >>> from amuse.units import si >>> area = si.m**2 >>> area unit<m**2> >>> area.as_quantity_in(si.m * si.m) quantity<1 m * m> >>> hectare = (100 * si.m) ** 2 >>> hectare.as_quantity_in(area) quantity<10000.0 m**2> """<def_stmt>__init__ self power unit<block_start>self.power=power<line_sep>self.local_unit=unit<block_end><def_stmt>__str__ self<block_start><if_stmt>isinstance(self.local_unit derived_unit)<block_start><return>'('+str(self.local_unit)+')**'+str(self.power)<block_end><else_stmt><block_start><return>str(self.local_unit)+'**'+str(self.power)<block_end><block_end><def_stmt>reference_string self<block_start><return>'('+self.local_unit.reference_string()+'**'+str(self.power)+')'<block_end>@late<def_stmt>base self<block_start><return>tuple([x<for>x [(x[0]<times>self.power x[1])<for>x self.local_unit.base]<if>x[0]<ne>0])<block_end>@late<def_stmt>factor self<block_start><return>self.local_unit.factor<power>self.power<block_end><def_stmt>get_parts_with_power self<block_start>result=[]<for_stmt>factor,power,unit self.local_unit.get_parts_with_power()<block_start>result.append((factor<power>self.power power<times>self.power unit ))<block_end><return>result<block_end><block_end><class_stmt>div_unit(derived_unit)<block_start>""" A div_unit object defines a unit multiplied by another unit. Do not call this method directly, div_unit objects are supposed to be created by dividing units. :argument left_hand: Left hand side of the multiplication. :argument right_hand: Right hand side of the multiplication. >>> from amuse.units import si >>> speed = si.m / si.s >>> speed unit<m / s> >>> speed_with_powers = si.m * si.s ** -1 >>> speed.as_quantity_in(speed_with_powers) quantity<1 m * s**-1> """<def_stmt>__init__ self left_hand right_hand<block_start>self.left_hand=left_hand<line_sep>self.right_hand=right_hand<block_end><def_stmt>__str__ self<block_start><if_stmt>isinstance(self.right_hand derived_unit)<block_start><return>str(self.left_hand)+' / ('+str(self.right_hand)+')'<block_end><else_stmt><block_start><return>str(self.left_hand)+' / '+str(self.right_hand)+''<block_end><block_end><def_stmt>reference_string self<block_start><return>'('+self.left_hand.reference_string()+'/'+self.right_hand.reference_string()+')'<block_end>@late<def_stmt>factor self<block_start><return>self.left_hand.factor<times>1.0/self.right_hand.factor<block_end>@late<def_stmt>base self<block_start><return>tuple([x<for>x [(x[0]-x[1] x[2])<for>x self.combine_bases(self.left_hand.base self.right_hand.base)]<if>x[0]<ne>0])<block_end><def_stmt>get_parts_with_power self<block_start>lhs_parts=list(self.left_hand.get_parts_with_power())<line_sep>rhs_parts=list(self.right_hand.get_parts_with_power())<line_sep>result=[]<for_stmt>lhs_factor,lhs_power,lhs_unit lhs_parts<block_start>rhs_index=0<line_sep>found_match=<false><for_stmt>rhs_factor,rhs_power,rhs_unit rhs_parts<block_start><if_stmt>lhs_unit<is>rhs_unit<block_start>result.append((lhs_factor/rhs_factor lhs_power-rhs_power lhs_unit ))<line_sep>found_match=<true><del_stmt>rhs_parts[rhs_index]<line_sep><break><block_end>rhs_index<augadd>1<block_end><if_stmt><not>found_match<block_start>result.append((lhs_factor lhs_power lhs_unit ))<block_end><block_end><for_stmt>rhs_factor,rhs_power,rhs_unit rhs_parts<block_start>result.append((1.0/rhs_factor -rhs_power rhs_unit ))<block_end><return>result<block_end><block_end><class_stmt>UnitException(exceptions.AmuseException)<block_start>formatstring="Unit exception: {0}"<block_end><class_stmt>IncompatibleUnitsException(exceptions.AmuseException)<block_start>formatstring="Cannot express {1} in {0}, the units do not have the same bases"<def_stmt>__init__ self *arguments<block_start>Exception.__init__(self)<line_sep>self.arguments=arguments<block_end><block_end><def_stmt>get_system_with_name name<block_start><return>system.get(name)<block_end><def_stmt>get_enumeration_unit_with_name name<block_start><return>enumeration_unit.get(name)<block_end><def_stmt>get_base_unit_with_name system name<block_start><return>system.base(name)<block_end><class_stmt>UnitWithSpecificDtype(named_unit)<block_start><def_stmt>__init__ self unit dtype<block_start>self.specific_dtype=dtype<line_sep>symbol=str(unit)+"_"+str(dtype)<line_sep>named_unit.__init__(self symbol symbol unit)<block_end>@property<def_stmt>dtype self<block_start><return>self.specific_dtype<block_end><block_end>@memoize<def_stmt>unit_with_specific_dtype unit dtype<block_start><if_stmt>unit<is><none><or>dtype<is><none><block_start><return>unit<block_end><return>UnitWithSpecificDtype(unit dtype)<block_end>
<import_from_future_stmt> absolute_import division print_function<import_from_stmt>libtbx easy_run<import_stmt>libtbx.load_env<import_stmt>os.path<import_stmt>time<line_sep># taken from phenix_regression/refinement/ncs/tst_ncs_0.py pdb_str="""\ CRYST1 100.000 100.000 100.000 90.00 90.00 90.00 P 1 ATOM 1 N ALA A 1 27.344 16.348 30.784 1.00 10.00 N ATOM 2 CA ALA A 1 26.429 15.281 31.335 1.00 10.00 C ATOM 3 C ALA A 1 26.610 14.025 30.603 1.00 10.00 C ATOM 4 O ALA A 1 26.479 13.979 29.356 1.00 10.00 O ATOM 5 CB ALA A 1 24.874 15.800 31.300 1.00 10.00 C ATOM 1 N ALA A 2 26.812 12.925 31.345 1.00 10.00 N ATOM 2 CA ALA A 2 27.084 11.577 30.797 1.00 10.00 C ATOM 3 C ALA A 2 25.856 10.737 30.707 1.00 10.00 C ATOM 4 O ALA A 2 25.741 9.860 29.891 1.00 10.00 O ATOM 5 CB ALA A 2 28.151 10.950 31.721 1.00 10.00 C ATOM 1 N ALA A 3 25.009 10.973 31.714 1.00 10.00 N ATOM 2 CA ALA A 3 23.621 10.543 31.560 1.00 10.00 C ATOM 3 C ALA A 3 23.023 11.008 30.214 1.00 10.00 C ATOM 4 O ALA A 3 22.786 10.233 29.249 1.00 10.00 O ATOM 5 CB ALA A 3 22.760 11.040 32.654 1.00 10.00 C ATOM 1 N ALA A 4 22.798 12.304 30.175 1.00 10.00 N ATOM 2 CA ALA A 4 22.329 13.084 28.981 1.00 10.00 C ATOM 3 C ALA A 4 23.116 12.816 27.721 1.00 10.00 C ATOM 4 O ALA A 4 22.533 12.805 26.670 1.00 10.00 O ATOM 5 CB ALA A 4 22.372 14.607 29.318 1.00 10.00 C ATOM 1 N ALA A 5 24.448 12.622 27.823 1.00 10.00 N ATOM 2 CA ALA A 5 25.228 12.407 26.573 1.00 10.00 C ATOM 3 C ALA A 5 25.222 10.947 26.143 1.00 10.00 C ATOM 4 O ALA A 5 25.386 10.664 24.983 1.00 10.00 O ATOM 5 CB ALA A 5 26.634 12.906 26.746 1.00 10.00 C ATOM 1 N ALA A 6 24.976 10.048 27.071 1.00 10.00 N ATOM 2 CA ALA A 6 24.857 8.614 26.805 1.00 10.00 C ATOM 3 C ALA A 6 23.537 8.349 26.054 1.00 10.00 C ATOM 4 O ALA A 6 23.439 7.570 25.057 1.00 10.00 O ATOM 5 CB ALA A 6 24.874 7.845 28.114 1.00 10.00 C ATOM 1 N ALA A 7 22.542 9.039 26.580 1.00 10.00 N ATOM 2 CA ALA A 7 21.228 8.903 25.942 1.00 10.00 C ATOM 3 C ALA A 7 21.329 9.698 24.628 1.00 10.00 C ATOM 4 O ALA A 7 20.707 9.383 23.632 1.00 10.00 O ATOM 5 CB ALA A 7 20.146 9.465 26.862 1.00 10.00 C ATOM 1 N ALA A 8 22.181 10.696 24.613 1.00 10.00 N ATOM 2 CA ALA A 8 22.526 11.372 23.378 1.00 10.00 C ATOM 3 C ALA A 8 23.351 10.555 22.448 1.00 10.00 C ATOM 4 O ALA A 8 23.618 10.883 21.252 1.00 10.00 O ATOM 5 CB ALA A 8 23.168 12.697 23.693 1.00 10.00 C ATOM 1 N ALA A 9 23.864 9.423 22.961 1.00 10.00 N ATOM 2 CA ALA A 9 24.785 8.541 22.264 1.00 10.00 C ATOM 3 C ALA A 9 24.057 7.451 21.484 1.00 10.00 C ATOM 4 O ALA A 9 24.127 7.381 20.257 1.00 10.00 O ATOM 5 CB ALA A 9 25.815 7.975 23.249 1.00 10.00 C ATOM 1 N ALA A 10 23.518 6.548 22.264 1.00 10.00 N ATOM 2 CA ALA A 10 22.629 5.525 21.690 1.00 10.00 C ATOM 3 C ALA A 10 21.549 6.308 21.009 1.00 10.00 C ATOM 4 O ALA A 10 21.114 5.933 19.930 1.00 10.00 O ATOM 5 CB ALA A 10 22.057 4.714 22.784 1.00 10.00 C ATOM 1 N ALA A 11 21.120 7.452 21.541 1.00 10.00 N ATOM 2 CA ALA A 11 20.186 8.260 20.874 1.00 10.00 C ATOM 3 C ALA A 11 20.978 9.215 19.937 1.00 10.00 C ATOM 4 O ALA A 11 20.386 10.177 19.507 1.00 10.00 O ATOM 5 CB ALA A 11 19.295 9.031 21.867 1.00 10.00 C ATOM 1 N ALA A 12 22.222 8.932 19.598 1.00 10.00 N ATOM 2 CA ALA A 12 22.896 9.709 18.563 1.00 10.00 C ATOM 3 C ALA A 12 22.924 8.925 17.308 1.00 10.00 C ATOM 4 O ALA A 12 22.982 9.445 16.193 1.00 10.00 O ATOM 5 CB ALA A 12 24.294 10.138 18.994 1.00 10.00 C ATOM 1 N ALA A 13 22.951 7.633 17.508 1.00 10.00 N ATOM 2 CA ALA A 13 22.709 6.629 16.554 1.00 10.00 C ATOM 3 C ALA A 13 21.275 6.673 16.206 1.00 10.00 C ATOM 4 O ALA A 13 20.870 6.521 15.092 1.00 10.00 O ATOM 5 CB ALA A 13 23.077 5.254 17.025 1.00 10.00 C ATOM 1 N ALA A 14 20.471 6.929 17.226 1.00 10.00 N ATOM 2 CA ALA A 14 19.039 6.992 17.025 1.00 10.00 C ATOM 3 C ALA A 14 18.676 8.380 16.528 1.00 10.00 C ATOM 4 O ALA A 14 17.748 8.556 15.761 1.00 10.00 O ATOM 5 CB ALA A 14 18.240 6.715 18.272 1.00 10.00 C ATOM 1 N ALA A 15 19.381 9.390 17.055 1.00 10.00 N ATOM 2 CA ALA A 15 19.204 10.743 16.669 1.00 10.00 C ATOM 3 C ALA A 15 19.407 10.807 15.174 1.00 10.00 C ATOM 4 O ALA A 15 18.402 10.987 14.424 1.00 10.00 O ATOM 5 CB ALA A 15 20.190 11.665 17.493 1.00 10.00 C ATOM 1 N ALA A 16 20.702 10.653 14.831 1.00 10.00 N ATOM 2 CA ALA A 16 21.206 10.546 13.480 1.00 10.00 C ATOM 3 C ALA A 16 20.484 9.612 12.585 1.00 10.00 C ATOM 4 O ALA A 16 20.380 9.918 11.386 1.00 10.00 O ATOM 5 CB ALA A 16 22.631 10.174 13.475 1.00 10.00 C ATOM 1 N ALA A 17 20.064 8.475 13.175 1.00 10.00 N ATOM 2 CA ALA A 17 19.355 7.473 12.426 1.00 10.00 C ATOM 3 C ALA A 17 17.924 7.807 12.064 1.00 10.00 C ATOM 4 O ALA A 17 17.535 7.721 10.871 1.00 10.00 O ATOM 5 CB ALA A 17 19.359 6.123 13.216 1.00 10.00 C ATOM 1 N ALA A 18 17.152 8.115 13.031 1.00 10.00 N ATOM 2 CA ALA A 18 15.835 8.594 12.861 1.00 10.00 C ATOM 3 C ALA A 18 15.811 9.835 11.861 1.00 10.00 C ATOM 4 O ALA A 18 15.020 9.889 10.868 1.00 10.00 O ATOM 5 CB ALA A 18 15.272 8.918 14.234 1.00 10.00 C ATOM 1 N ALA A 19 16.661 10.845 12.100 1.00 10.00 N ATOM 2 CA ALA A 19 16.435 12.061 11.275 1.00 10.00 C ATOM 3 C ALA A 19 17.004 11.815 9.833 1.00 10.00 C ATOM 4 O ALA A 19 16.334 12.117 8.857 1.00 10.00 O ATOM 5 CB ALA A 19 17.059 13.242 11.866 1.00 10.00 C ATOM 1 N ALA A 20 18.191 11.200 9.841 1.00 10.00 N ATOM 2 CA ALA A 20 19.091 11.247 8.697 1.00 10.00 C ATOM 3 C ALA A 20 19.549 9.835 8.231 1.00 10.00 C ATOM 4 O ALA A 20 20.670 9.692 7.663 1.00 10.00 O ATOM 5 CB ALA A 20 20.326 12.105 9.035 1.00 10.00 C ATOM 1 N ALA A 21 18.654 8.850 8.523 1.00 10.00 N ATOM 2 CA ALA A 21 18.827 7.437 8.168 1.00 10.00 C ATOM 3 C ALA A 21 17.565 6.607 8.282 1.00 10.00 C ATOM 4 O ALA A 21 16.485 6.992 7.820 1.00 10.00 O ATOM 5 CB ALA A 21 19.888 6.838 8.983 1.00 10.00 C TER ATOM 1 N ALA B 1 16.348 17.420 35.897 1.00 50.00 N ATOM 2 CA ALA B 1 16.783 16.083 36.351 1.00 50.00 C ATOM 3 C ALA B 1 16.794 15.172 35.139 1.00 50.00 C ATOM 4 O ALA B 1 16.167 15.477 34.133 1.00 50.00 O ATOM 5 CB ALA B 1 15.785 15.534 37.468 1.00 50.00 C ATOM 1 N ALA B 2 17.491 14.058 35.255 1.00 50.00 N ATOM 2 CA ALA B 2 17.790 13.267 34.127 1.00 50.00 C ATOM 3 C ALA B 2 16.716 12.232 33.688 1.00 50.00 C ATOM 4 O ALA B 2 16.676 11.869 32.543 1.00 50.00 O ATOM 5 CB ALA B 2 19.125 12.656 34.415 1.00 50.00 C ATOM 1 N ALA B 3 15.904 11.687 34.605 1.00 50.00 N ATOM 2 CA ALA B 3 14.798 10.901 34.173 1.00 50.00 C ATOM 3 C ALA B 3 13.740 11.723 33.536 1.00 50.00 C ATOM 4 O ALA B 3 13.398 11.501 32.356 1.00 50.00 O ATOM 5 CB ALA B 3 14.148 10.176 35.403 1.00 50.00 C ATOM 1 N ALA B 4 13.239 12.708 34.247 1.00 50.00 N ATOM 2 CA ALA B 4 12.158 13.487 33.709 1.00 50.00 C ATOM 3 C ALA B 4 12.674 14.248 32.495 1.00 50.00 C ATOM 4 O ALA B 4 11.935 14.376 31.526 1.00 50.00 O ATOM 5 CB ALA B 4 11.553 14.432 34.712 1.00 50.00 C ATOM 1 N ALA B 5 13.947 14.627 32.479 1.00 50.00 N ATOM 2 CA ALA B 5 14.416 15.490 31.405 1.00 50.00 C ATOM 3 C ALA B 5 14.960 14.730 30.186 1.00 50.00 C ATOM 4 O ALA B 5 14.575 14.940 29.054 1.00 50.00 O ATOM 5 CB ALA B 5 15.464 16.431 31.928 1.00 50.00 C ATOM 1 N ALA B 6 15.867 13.827 30.546 1.00 50.00 N ATOM 2 CA ALA B 6 16.575 12.918 29.615 1.00 50.00 C ATOM 3 C ALA B 6 15.465 12.002 28.975 1.00 50.00 C ATOM 4 O ALA B 6 15.450 11.709 27.742 1.00 50.00 O ATOM 5 CB ALA B 6 17.632 12.157 30.362 1.00 50.00 C ATOM 1 N ALA B 7 14.542 11.597 29.783 1.00 50.00 N ATOM 2 CA ALA B 7 13.529 10.701 29.277 1.00 50.00 C ATOM 3 C ALA B 7 12.175 11.364 28.835 1.00 50.00 C ATOM 4 O ALA B 7 11.466 10.770 27.969 1.00 50.00 O ATOM 5 CB ALA B 7 13.161 9.644 30.376 1.00 50.00 C ATOM 1 N ALA B 8 11.753 12.455 29.452 1.00 50.00 N ATOM 2 CA ALA B 8 10.536 13.193 28.972 1.00 50.00 C ATOM 3 C ALA B 8 10.919 13.923 27.670 1.00 50.00 C ATOM 4 O ALA B 8 10.171 14.036 26.729 1.00 50.00 O ATOM 5 CB ALA B 8 10.032 14.139 30.014 1.00 50.00 C ATOM 1 N ALA B 9 12.185 14.247 27.579 1.00 50.00 N ATOM 2 CA ALA B 9 12.754 14.849 26.385 1.00 50.00 C ATOM 3 C ALA B 9 12.892 13.859 25.320 1.00 50.00 C ATOM 4 O ALA B 9 12.234 13.980 24.290 1.00 50.00 O ATOM 5 CB ALA B 9 14.108 15.448 26.695 1.00 50.00 C ATOM 1 N ALA B 10 13.655 12.794 25.566 1.00 50.00 N ATOM 2 CA ALA B 10 13.831 11.803 24.529 1.00 50.00 C ATOM 3 C ALA B 10 12.551 10.987 24.319 1.00 50.00 C ATOM 4 O ALA B 10 12.514 10.237 23.390 1.00 50.00 O ATOM 5 CB ALA B 10 15.024 10.750 24.992 1.00 50.00 C ATOM 1 N ALA B 11 11.558 11.184 25.126 1.00 50.00 N ATOM 2 CA ALA B 11 10.334 10.457 24.931 1.00 50.00 C ATOM 3 C ALA B 11 9.326 11.284 24.168 1.00 50.00 C ATOM 4 O ALA B 11 8.566 10.707 23.476 1.00 50.00 O ATOM 5 CB ALA B 11 9.644 10.042 26.251 1.00 50.00 C ATOM 1 N ALA B 12 9.277 12.611 24.334 1.00 50.00 N ATOM 2 CA ALA B 12 8.354 13.375 23.644 1.00 50.00 C ATOM 3 C ALA B 12 9.019 13.546 22.264 1.00 50.00 C ATOM 4 O ALA B 12 8.400 13.891 21.317 1.00 50.00 O ATOM 5 CB ALA B 12 8.056 14.678 24.287 1.00 50.00 C ATOM 1 N ALA B 13 10.333 13.339 22.264 1.00 50.00 N ATOM 2 CA ALA B 13 11.239 13.471 21.127 1.00 50.00 C ATOM 3 C ALA B 13 11.096 12.161 20.325 1.00 50.00 C ATOM 4 O ALA B 13 11.145 12.175 19.123 1.00 50.00 O ATOM 5 CB ALA B 13 12.584 13.665 21.596 1.00 50.00 C ATOM 1 N ALA B 14 11.051 11.078 21.086 1.00 50.00 N ATOM 2 CA ALA B 14 10.953 9.771 20.454 1.00 50.00 C ATOM 3 C ALA B 14 9.550 9.463 20.117 1.00 50.00 C ATOM 4 O ALA B 14 9.233 8.571 19.367 1.00 50.00 O ATOM 5 CB ALA B 14 11.461 8.697 21.413 1.00 50.00 C ATOM 1 N ALA B 15 8.669 10.215 20.743 1.00 50.00 N ATOM 2 CA ALA B 15 7.282 10.010 20.486 1.00 50.00 C ATOM 3 C ALA B 15 6.825 10.982 19.376 1.00 50.00 C ATOM 4 O ALA B 15 5.855 10.783 18.619 1.00 50.00 O ATOM 5 CB ALA B 15 6.367 10.306 21.797 1.00 50.00 C ATOM 1 N ALA B 16 7.511 12.143 19.430 1.00 50.00 N ATOM 2 CA ALA B 16 7.233 13.302 18.551 1.00 50.00 C ATOM 3 C ALA B 16 7.912 13.082 17.205 1.00 50.00 C ATOM 4 O ALA B 16 7.492 13.573 16.111 1.00 50.00 O ATOM 5 CB ALA B 16 7.762 14.594 19.165 1.00 50.00 C ATOM 1 N ALA B 17 9.071 12.427 17.269 1.00 50.00 N ATOM 2 CA ALA B 17 9.595 11.771 16.091 1.00 50.00 C ATOM 3 C ALA B 17 8.883 10.519 15.763 1.00 50.00 C ATOM 4 O ALA B 17 8.890 10.193 14.597 1.00 50.00 O ATOM 5 CB ALA B 17 11.046 11.518 16.265 1.00 50.00 C ATOM 1 N ALA B 18 8.315 9.809 16.722 1.00 50.00 N ATOM 2 CA ALA B 18 7.515 8.647 16.448 1.00 50.00 C ATOM 3 C ALA B 18 6.253 9.063 15.707 1.00 50.00 C ATOM 4 O ALA B 18 5.559 8.173 15.198 1.00 50.00 O ATOM 5 CB ALA B 18 7.129 7.915 17.695 1.00 50.00 C ATOM 1 N ALA B 19 5.866 10.332 15.772 1.00 50.00 N ATOM 2 CA ALA B 19 4.686 10.808 15.089 1.00 50.00 C ATOM 3 C ALA B 19 5.011 11.578 13.803 1.00 50.00 C ATOM 4 O ALA B 19 4.291 11.514 12.837 1.00 50.00 O ATOM 5 CB ALA B 19 3.854 11.710 15.960 1.00 50.00 C ATOM 1 N ALA B 20 6.176 12.195 13.822 1.00 50.00 N ATOM 2 CA ALA B 20 6.614 13.121 12.789 1.00 50.00 C ATOM 3 C ALA B 20 7.933 12.759 12.098 1.00 50.00 C ATOM 4 O ALA B 20 8.620 13.613 11.585 1.00 50.00 O ATOM 5 CB ALA B 20 6.823 14.498 13.449 1.00 50.00 C ATOM 1 N ALA B 21 8.284 11.511 12.050 1.00 50.00 N ATOM 2 CA ALA B 21 9.513 11.117 11.323 1.00 50.00 C ATOM 3 C ALA B 21 9.313 9.628 11.029 1.00 50.00 C ATOM 4 O ALA B 21 9.731 8.751 11.795 1.00 50.00 O ATOM 5 CB ALA B 21 10.799 11.332 12.178 1.00 50.00 C TER """<def_stmt>exercise_04 prefix="tst_mi_map_test_04"<block_start>""" Run with reference map. Check if working with NCS in the model. Without symmetry. """<line_sep># without cryst pdb_file=open("%s_start.pdb"%prefix "w")<line_sep>pdb_file.write(pdb_str)<line_sep>pdb_file.close()<line_sep>cmd=" ".join(["phenix.model_idealization" "%s_start.pdb"%prefix "use_map_for_reference=True" "loop_idealization.number_of_ccd_trials=1" "number_of_refinement_cycles=1" "n_macro=1" "debug=True" ">%s.log"%prefix])<line_sep>print(cmd)<assert_stmt><not>easy_run.call(cmd)<assert_stmt>os.path.isfile("%s_start.pdb_all_idealized.pdb"%prefix)<line_sep>res_log=open("%s.log"%prefix "r")<line_sep>log_lines=res_log.readlines()<line_sep># NCS constraints with map are not implemented yet <for_stmt>l [# "Using ncs\n", "Using map as reference\n" " Minimizing... (NCS)\n" # "Ramachandran outliers: 0.00 0.00 0.00 0.00 0.00\n", "All done.\n"]<block_start><assert_stmt>l<in>log_lines "'%s' not in log file."%l<block_end>res_log.close()<block_end><if_stmt>(__name__<eq>"__main__")<block_start>t0=time.time()<if_stmt>(<not>libtbx.env.has_module(name="probe"))<block_start>print("Skipping: probe not configured")<block_end><else_stmt><block_start>exercise_04()<block_end>print("Time: %.2f"%(time.time()-t0))<line_sep>print("OK")<block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>PhysicsTools.SelectorUtils.centralIDRegistry central_id_registry<import_from_stmt>PhysicsTools.SelectorUtils.trivialCutFlow_cff *<line_sep>trivialCutFlowMD5=central_id_registry.getMD5FromName(trivialCutFlow.idName)<line_sep>egmPatElectronIDs=cms.EDProducer("VersionedPatElectronIdProducer" physicsObjectSrc=cms.InputTag('patElectrons') physicsObjectIDs=cms.VPSet(cms.PSet(idDefinition=trivialCutFlow idMD5=cms.string(trivialCutFlowMD5))))<line_sep>
<import_stmt>json<line_sep>commcare_build_config=json.loads("""{ "_id": "config--commcare-builds", "doc_type": "CommCareBuildConfig", "preview": { "version": "1.2.1", "build_number": null, "latest": true }, "defaults": [{ "version": "1.2.1", "build_number": null, "latest": true }, { "version": "2.0.0", "build_number": null, "latest": true }], "application_versions": ["1.0", "2.0"], "menu": [ { "build": { "version": "1.1.1", "build_number": null, "latest": true }, "label": "CommCare 1.1.1" }, { "build": { "version": "1.2.1", "build_number": null, "latest": true }, "label": "CommCare 1.2.1" }, { "build": { "version": "1.3.0", "build_number": null, "latest": true }, "label": "CommCare 1.3 (RC5)" }, { "build": { "version": "2.0.0", "build_number": null, "latest": true }, "label": "CommCare 2.0 (unstable)" } ], "ID": "config--commcare-builds" }""")<line_sep>
<import_from_stmt>staffjoy.resource Resource<class_stmt>Session(Resource)<block_start>"""User session"""<line_sep>PATH="users/{user_id}/sessions/{session_id}"<line_sep>ID_NAME="session_id"<block_end>
# -*- coding: UTF-8 -*- <import_from_stmt>contextlib contextmanager<import_stmt>gc<import_from_stmt>multiprocessing Process<import_stmt>subprocess<import_stmt>unittest<import_from_stmt>py4j.java_gateway JavaGateway GatewayParameters CallbackServerParameters DEFAULT_PORT DEFAULT_PYTHON_PROXY_PORT <import_from_stmt>py4j.clientserver ClientServer JavaParameters PythonParameters <import_from_stmt>py4j.tests.java_gateway_test PY4J_JAVA_PATH check_connection sleep <import_from_stmt>py4j.tests.py4j_callback_recursive_example HelloState<import_from_stmt>py4j.tests.instrumented InstrJavaGateway InstrumentedPythonPing register_creation CREATED FINALIZED MEMORY_HOOKS InstrClientServer <def_stmt>start_instrumented_gateway_server <block_start>subprocess.call(["java" "-Xmx512m" "-cp" PY4J_JAVA_PATH "py4j.instrumented.InstrumentedApplication"])<block_end><def_stmt>start_instrumented_clientserver <block_start>subprocess.call(["java" "-Xmx512m" "-cp" PY4J_JAVA_PATH "py4j.instrumented.InstrumentedClientServerApplication"])<block_end><def_stmt>start_gateway_server_example_app_process start_gateway_server=<true># XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED <block_start><if_stmt>start_gateway_server<block_start>p=Process(target=start_instrumented_gateway_server)<block_end><else_stmt><block_start>p=Process(target=start_instrumented_clientserver)<block_end>p.start()<line_sep>sleep()<line_sep>check_connection()<line_sep><return>p<block_end>@contextmanager<def_stmt>gateway_server_example_app_process start_gateway_server=<true><block_start>p=start_gateway_server_example_app_process(start_gateway_server)<try_stmt><block_start><yield>p<block_end><finally_stmt><block_start>p.join()<block_end><block_end><class_stmt>HelloState2(HelloState)<block_start><def_stmt>__init__ self run_gc=<true><block_start>self.gateway=<none><line_sep>self.run_gc=run_gc<line_sep>super(HelloState2 self).__init__()<line_sep>register_creation(self)<block_end><def_stmt>_play_with_jvm self<block_start>al=self.gateway.jvm.java.util.ArrayList()<line_sep>al.append("Hello World")<line_sep>obj=self.gateway.jvm.py4j.instrumented.InstrumentedObject("test")<line_sep>al.append(obj)<line_sep><return>str(al)<block_end><def_stmt>sayHello self int_value=<none> string_value=<none><block_start>self._play_with_jvm()<if_stmt>self.run_gc<block_start>python_gc()<block_end><return>super(HelloState2 self).sayHello(int_value string_value)<block_end><class_stmt>Java<block_start>implements=["py4j.examples.IHello"]<block_end><block_end><def_stmt>assert_python_memory test size<block_start>test.assertEqual(size len(CREATED))<line_sep>test.assertEqual(size len(FINALIZED))<line_sep>test.assertEqual(set(CREATED) set(FINALIZED))<block_end><def_stmt>python_gc <block_start>"""Runs the gc three times to ensure that all circular reference are correctly removed. """<for_stmt>i range(3)<block_start>gc.collect()<block_end><block_end><class_stmt>GatewayServerTest(unittest.TestCase)<block_start><def_stmt>tearDown self<block_start>MEMORY_HOOKS.clear()<line_sep>CREATED.clear()<line_sep>FINALIZED.clear()<block_end><def_stmt>testPythonToJava self<block_start><def_stmt>work_with_object gateway<block_start>obj=gateway.jvm.py4j.instrumented.InstrumentedObject("test")<line_sep><return>str(obj)<block_end><def_stmt>internal_work <block_start>gateway2=InstrJavaGateway(gateway_parameters=GatewayParameters(port=DEFAULT_PORT+5))<line_sep>sleep()<line_sep>work_with_object(gateway2)<line_sep>python_gc()<line_sep>sleep()<line_sep>gateway2.shutdown()<block_end><with_stmt>gateway_server_example_app_process()<block_start>gateway=JavaGateway()<line_sep>gateway.entry_point.startServer2()<line_sep>internal_work()<line_sep>python_gc()<line_sep>gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=gateway.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=gateway.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 4 objects: GatewayServer, GatewayConnection, CallbackClient, # InstrumentedObject self.assertEqual(4 len(createdSet))<line_sep>self.assertEqual(4 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>gateway.shutdown()<line_sep># 4 objects: JavaGateway, GatewayClient, GatewayProperty, # GatewayConnection assert_python_memory(self 4)<block_end><block_end><def_stmt>testPythonToJavaToPython self<block_start><def_stmt>play_with_ping gateway<block_start>ping=InstrumentedPythonPing()<line_sep>pingpong=gateway.jvm.py4j.examples.PingPong()<line_sep>total=pingpong.start(ping)<line_sep><return>total<block_end><def_stmt>internal_work <block_start>gateway2=InstrJavaGateway(gateway_parameters=GatewayParameters(port=DEFAULT_PORT+5) callback_server_parameters=CallbackServerParameters(port=DEFAULT_PYTHON_PROXY_PORT+5))<line_sep>sleep()<line_sep>play_with_ping(gateway2)<line_sep>python_gc()<line_sep>sleep()<line_sep>gateway2.shutdown()<block_end><with_stmt>gateway_server_example_app_process()<block_start>gateway=JavaGateway()<line_sep>gateway.entry_point.startServer2()<line_sep>internal_work()<line_sep>python_gc()<line_sep>gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=gateway.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=gateway.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 9 objects: GatewayServer, 4 GatewayConnection, CallbackClient, # 3 CallbackConnection self.assertEqual(9 len(createdSet))<line_sep>self.assertEqual(9 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>gateway.shutdown()<line_sep># 11 objects: JavaGateway, CallbackSerer, GatewayClient, # GatewayProperty, PythonPing, 4 GatewayConnection, # 3 CallbackConnection. Notice the symmetry assert_python_memory(self 12)<block_end><block_end><def_stmt>testPythonToJavaToPythonClose self<block_start><def_stmt>play_with_ping gateway<block_start>ping=InstrumentedPythonPing()<line_sep>pingpong=gateway.jvm.py4j.examples.PingPong()<line_sep>total=pingpong.start(ping)<line_sep><return>total<block_end><def_stmt>internal_work assert_memory<block_start>gateway2=InstrJavaGateway(gateway_parameters=GatewayParameters(port=DEFAULT_PORT+5) callback_server_parameters=CallbackServerParameters(port=DEFAULT_PYTHON_PROXY_PORT+5))<line_sep>sleep()<line_sep>play_with_ping(gateway2)<line_sep>python_gc()<line_sep>sleep()<line_sep>gateway2.close(close_callback_server_connections=<true> keep_callback_server=<true>)<line_sep>sleep()<line_sep>assert_memory()<line_sep>gateway2.shutdown()<line_sep>sleep()<block_end><with_stmt>gateway_server_example_app_process()<block_start>gateway=JavaGateway()<line_sep>gateway.entry_point.startServer2()<def_stmt>perform_memory_tests <block_start>python_gc()<line_sep>gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=gateway.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=gateway.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 10 objects: GatewayServer, 4 GatewayConnection, # CallbackClient, 4 CallbackConnection self.assertEqual(10 len(createdSet))<line_sep># 13 objects: JavaGateway, CallbackSerer, GatewayClient, # GatewayProperty, PythonPing, 4 GatewayConnection, # 4 CallbackConnection. Notice the symmetry between callback # and gateway connections. self.assertEqual(13 len(CREATED))<line_sep># 4 gateway connections, 3 callback connections. # There is still one callback connection staying around # following Java finalization that called back Python. self.assertEqual(7 len(finalizedSet))<line_sep># Same amount of connections for the Python side self.assertEqual(7 len(FINALIZED))<block_end>internal_work(perform_memory_tests)<line_sep>python_gc()<line_sep>gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>gateway.shutdown()<line_sep># 14 objects: JavaGateway, CallbackSerer, GatewayClient, # GatewayProperty, PythonPing, 5 GatewayConnection, # 4 CallbackConnection. Notice the symmetry # One more gateway connection created because we called shutdown # after close (which requires a connection to send a shutdown # command). assert_python_memory(self 14)<block_end><block_end><def_stmt>testJavaToPythonToJavaCleanGC self<block_start><def_stmt>internal_work gateway<block_start>hello_state=HelloState2()<line_sep>gateway2=InstrJavaGateway(gateway_parameters=GatewayParameters(port=DEFAULT_PORT+5) callback_server_parameters=CallbackServerParameters(port=DEFAULT_PYTHON_PROXY_PORT+5) python_server_entry_point=hello_state)<line_sep>hello_state.gateway=gateway2<line_sep>sleep()<line_sep>gateway.entry_point.startServerWithPythonEntry(<true>)<line_sep>sleep()<line_sep>gateway2.shutdown()<line_sep># Check that Java correctly called Python self.assertEqual(2 len(hello_state.calls))<line_sep>self.assertEqual((<none> <none>) hello_state.calls[0])<line_sep>self.assertEqual((2 "Hello World") hello_state.calls[1])<block_end><with_stmt>gateway_server_example_app_process()<block_start>gateway=JavaGateway()<line_sep>internal_work(gateway)<line_sep>python_gc()<line_sep>gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=gateway.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=gateway.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 6 objects: 2 InstrumentedObject (sayHello called twice), 1 # InstrGatewayServer, 1 CallbackClient, 1 CallbackConnection, 1 # GatewayConnection self.assertEqual(6 len(createdSet))<line_sep>self.assertEqual(6 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>gateway.shutdown()<line_sep># 7 objects: JavaGateway, GatewayClient, CallbackServer, # GatewayProperty, HelloState, GatewayConnection, # CallbackConnection assert_python_memory(self 7)<block_end><block_end><def_stmt>testJavaToPythonToJavaNoGC self<block_start><def_stmt>internal_work gateway<block_start>hello_state=HelloState2(run_gc=<false>)<line_sep>gateway2=InstrJavaGateway(gateway_parameters=GatewayParameters(port=DEFAULT_PORT+5) callback_server_parameters=CallbackServerParameters(port=DEFAULT_PYTHON_PROXY_PORT+5) python_server_entry_point=hello_state)<line_sep>hello_state.gateway=gateway2<line_sep>sleep()<line_sep>gateway.entry_point.startServerWithPythonEntry(<true>)<line_sep>sleep()<line_sep>gateway2.shutdown()<line_sep># Check that Java correctly called Python self.assertEqual(2 len(hello_state.calls))<line_sep>self.assertEqual((<none> <none>) hello_state.calls[0])<line_sep>self.assertEqual((2 "Hello World") hello_state.calls[1])<block_end><with_stmt>gateway_server_example_app_process()<block_start>gateway=JavaGateway()<line_sep># We disable gc to test whether a shut down on one side will # garbage collect everything. gc.disable()<line_sep>internal_work(gateway)<line_sep>gc.enable()<line_sep>python_gc()<line_sep>gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=gateway.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=gateway.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 6 objects: 2 InstrumentedObject (sayHello called twice), 1 # InstrGatewayServer, 1 CallbackClient, 1 CallbackConnection, 1 # GatewayConnection self.assertEqual(6 len(createdSet))<line_sep>self.assertEqual(6 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>gateway.shutdown()<line_sep># 7 objects: JavaGateway, GatewayClient, CallbackServer, # GatewayProperty, HelloState, GatewayConnection, # CallbackConnection assert_python_memory(self 7)<block_end><block_end><def_stmt>testJavaToPythonToJavaCleanGCNoShutdown self<block_start><def_stmt>internal_work gateway<block_start>hello_state=HelloState2()<line_sep>gateway2=InstrJavaGateway(gateway_parameters=GatewayParameters(port=DEFAULT_PORT+5) callback_server_parameters=CallbackServerParameters(port=DEFAULT_PYTHON_PROXY_PORT+5) python_server_entry_point=hello_state)<line_sep>hello_state.gateway=gateway2<line_sep>sleep()<line_sep>gateway.entry_point.startServerWithPythonEntry(<false>)<line_sep>sleep()<line_sep>gateway2.shutdown()<line_sep># Check that Java correctly called Python self.assertEqual(2 len(hello_state.calls))<line_sep>self.assertEqual((<none> <none>) hello_state.calls[0])<line_sep>self.assertEqual((2 "Hello World") hello_state.calls[1])<block_end><with_stmt>gateway_server_example_app_process()<block_start>gateway=JavaGateway()<line_sep>internal_work(gateway)<line_sep>python_gc()<line_sep>gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=gateway.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=gateway.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 6 objects: 2 InstrumentedObject (sayHello called twice), 1 # InstrGatewayServer, 1 CallbackClient, 1 CallbackConnection, 1 # GatewayConnection self.assertEqual(6 len(createdSet))<line_sep>self.assertEqual(6 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>gateway.shutdown()<line_sep># 7 objects: JavaGateway, GatewayClient, CallbackServer, # GatewayProperty, HelloState, GatewayConnection, # CallbackConnection assert_python_memory(self 7)<block_end><block_end><def_stmt>testJavaToPythonToJavaNoGCNoShutdown self<block_start><def_stmt>internal_work gateway<block_start>hello_state=HelloState2(run_gc=<false>)<line_sep>gateway2=InstrJavaGateway(gateway_parameters=GatewayParameters(port=DEFAULT_PORT+5) callback_server_parameters=CallbackServerParameters(port=DEFAULT_PYTHON_PROXY_PORT+5) python_server_entry_point=hello_state)<line_sep>hello_state.gateway=gateway2<line_sep>sleep()<line_sep>gateway.entry_point.startServerWithPythonEntry(<false>)<line_sep>sleep()<line_sep>gateway2.shutdown()<line_sep># Check that Java correctly called Python self.assertEqual(2 len(hello_state.calls))<line_sep>self.assertEqual((<none> <none>) hello_state.calls[0])<line_sep>self.assertEqual((2 "Hello World") hello_state.calls[1])<block_end><with_stmt>gateway_server_example_app_process()<block_start>gateway=JavaGateway()<line_sep># We disable gc to test whether a shut down on one side will # garbage collect everything. gc.disable()<line_sep>internal_work(gateway)<line_sep>gc.enable()<line_sep>python_gc()<line_sep>gateway.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=gateway.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=gateway.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 6 objects: 2 InstrumentedObject (sayHello called twice), 1 # InstrGatewayServer, 1 CallbackClient, 1 CallbackConnection, 1 # GatewayConnection self.assertEqual(6 len(createdSet))<line_sep>self.assertEqual(6 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>gateway.shutdown()<line_sep># 7 objects: JavaGateway, GatewayClient, CallbackServer, # GatewayProperty, HelloState, GatewayConnection, # CallbackConnection assert_python_memory(self 7)<block_end><block_end><block_end><class_stmt>ClientServerTest(unittest.TestCase)<block_start><def_stmt>tearDown self<block_start>MEMORY_HOOKS.clear()<line_sep>CREATED.clear()<line_sep>FINALIZED.clear()<block_end><def_stmt>testPythonToJava self<block_start><def_stmt>work_with_object clientserver<block_start>obj=clientserver.jvm.py4j.instrumented.InstrumentedObject("test")<line_sep><return>str(obj)<block_end><def_stmt>internal_work <block_start>clientserver2=InstrClientServer(JavaParameters(port=DEFAULT_PORT+5) PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5))<line_sep>sleep()<line_sep>work_with_object(clientserver2)<line_sep>python_gc()<line_sep>sleep()<line_sep>clientserver2.shutdown()<block_end><with_stmt>gateway_server_example_app_process(<false>)<block_start>clientserver=ClientServer()<line_sep>clientserver.entry_point.startServer2()<line_sep>internal_work()<line_sep>python_gc()<line_sep>clientserver.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 5 objects: ClientServer, ClientServerConnection, PythonClient, # JavaServer, InstrumentedObject self.assertEqual(5 len(createdSet))<line_sep>self.assertEqual(5 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>clientserver.shutdown()<line_sep># 5 objects: ClientServer, ClientServerConnection, PythonClient, # JavaServer, GatewayProperty assert_python_memory(self 5)<block_end><block_end><def_stmt>testPythonToJavaToPython self<block_start><def_stmt>play_with_ping clientserver<block_start>ping=InstrumentedPythonPing()<line_sep>pingpong=clientserver.jvm.py4j.examples.PingPong()<line_sep>total=pingpong.start(ping)<line_sep><return>total<block_end><def_stmt>internal_work <block_start>clientserver2=InstrClientServer(JavaParameters(port=DEFAULT_PORT+5) PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5))<line_sep>sleep()<line_sep>play_with_ping(clientserver2)<line_sep>python_gc()<line_sep>sleep()<line_sep>clientserver2.shutdown()<block_end><with_stmt>gateway_server_example_app_process(<false>)<block_start>clientserver=ClientServer()<line_sep>clientserver.entry_point.startServer2()<line_sep>internal_work()<line_sep>python_gc()<line_sep>clientserver.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 4 objects: ClientServer, ClientServerConnection, JavaServer, # PythonClient self.assertEqual(4 len(createdSet))<line_sep>self.assertEqual(4 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>clientserver.shutdown()<line_sep># 6 objects: ClientServer, PythonServer, JavaClient, # GatewayProperty, PythonPing, ClientServerConnection <block_end><block_end><def_stmt>testPythonToJavaToPythonClose self<block_start><def_stmt>play_with_ping clientserver<block_start>ping=InstrumentedPythonPing()<line_sep>pingpong=clientserver.jvm.py4j.examples.PingPong()<line_sep>total=pingpong.start(ping)<line_sep><return>total<block_end><def_stmt>internal_work assert_memory<block_start>clientserver2=InstrClientServer(JavaParameters(port=DEFAULT_PORT+5) PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5))<line_sep>sleep()<line_sep>play_with_ping(clientserver2)<line_sep>python_gc()<line_sep>sleep()<line_sep>clientserver2.close(close_callback_server_connections=<true> keep_callback_server=<true>)<line_sep>sleep()<line_sep>assert_memory()<line_sep>clientserver2.shutdown()<line_sep>sleep()<block_end><with_stmt>gateway_server_example_app_process(<false>)<block_start>clientserver=ClientServer()<line_sep>clientserver.entry_point.startServer2()<def_stmt>perform_memory_tests <block_start>python_gc()<line_sep>clientserver.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 6 objects: ClientServer, JavaServer, # PythonClient, 3 ClientServerConnection. self.assertEqual(6 len(createdSet))<line_sep># Should be 2: ClientServer, 1 ClientServerConnection # But for some reasons, Java refuses to collect the # clientserverconnection even though there are no strong # references. self.assertEqual(1 len(finalizedSet))<line_sep># 8 objects: ClientServer, PythonServer, JavaClient, # GatewayProperty, PythonPing, 3 ClientServerConnection self.assertEqual(8 len(CREATED))<line_sep># PythonPing + ClientServerConnection self.assertEqual(2 len(FINALIZED))<block_end>internal_work(perform_memory_tests)<line_sep>python_gc()<line_sep>clientserver.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>clientserver.shutdown()<line_sep># 9 objects: ClientServer, PythonServer, JavaClient, # GatewayProperty, PythonPing, 4 ClientServerConnection assert_python_memory(self 9)<block_end><block_end><def_stmt>testJavaToPythonToJavaCleanGC self<block_start><def_stmt>internal_work clientserver<block_start>hello_state=HelloState2()<line_sep>clientserver2=InstrClientServer(JavaParameters(port=DEFAULT_PORT+5) PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5) python_server_entry_point=hello_state)<line_sep>hello_state.gateway=clientserver2<line_sep>sleep()<line_sep>clientserver.entry_point.startServerWithPythonEntry(<true>)<line_sep>sleep()<line_sep>clientserver2.shutdown()<line_sep># Check that Java correctly called Python self.assertEqual(2 len(hello_state.calls))<line_sep>self.assertEqual((<none> <none>) hello_state.calls[0])<line_sep>self.assertEqual((2 "Hello World") hello_state.calls[1])<block_end><with_stmt>gateway_server_example_app_process(<false>)<block_start>clientserver=ClientServer()<line_sep>internal_work(clientserver)<line_sep>python_gc()<line_sep>clientserver.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 7 objects: 2 InstrumentedObject (sayHello called twice), 1 # JavaServer, 1 PythonClient, 1 ClientServer, 2 # ClientServerConnection (1 to call sayHello) self.assertEqual(6 len(createdSet))<line_sep>self.assertEqual(6 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>clientserver.shutdown()<line_sep># 8 objects: ClientServer (ok), PythonServer (ok), JavaClient, # GatewayProperty, HelloState (ok), 3 ClientServer Connections (1) assert_python_memory(self 7)<block_end><block_end><def_stmt>testJavaToPythonToJavaNoGC self<block_start><def_stmt>internal_work clientserver<block_start>hello_state=HelloState2()<line_sep>clientserver2=InstrClientServer(JavaParameters(port=DEFAULT_PORT+5) PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5) python_server_entry_point=hello_state)<line_sep>hello_state.gateway=clientserver2<line_sep>sleep()<line_sep>clientserver.entry_point.startServerWithPythonEntry(<true>)<line_sep>sleep()<line_sep>clientserver2.shutdown()<line_sep># Check that Java correctly called Python self.assertEqual(2 len(hello_state.calls))<line_sep>self.assertEqual((<none> <none>) hello_state.calls[0])<line_sep>self.assertEqual((2 "Hello World") hello_state.calls[1])<block_end><with_stmt>gateway_server_example_app_process(<false>)<block_start>clientserver=ClientServer()<line_sep># We disable gc to test whether a shut down on one side will # garbage collect everything. gc.disable()<line_sep>internal_work(clientserver)<line_sep>gc.enable()<line_sep>python_gc()<line_sep>clientserver.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 7 objects: 2 InstrumentedObject (sayHello called twice), 1 # JavaServer, 1 PythonClient, 1 ClientServer, 2 # ClientServerConnection (1 to call sayHello) self.assertEqual(6 len(createdSet))<line_sep>self.assertEqual(6 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>clientserver.shutdown()<line_sep># 8 objects: ClientServer (ok), PythonServer (ok), JavaClient, # GatewayProperty, HelloState (ok), 3 ClientServer Connections (2) assert_python_memory(self 7)<block_end><block_end><def_stmt>testJavaToPythonToJavaCleanGCNoShutdown self<block_start><def_stmt>internal_work clientserver<block_start>hello_state=HelloState2()<line_sep>clientserver2=InstrClientServer(JavaParameters(port=DEFAULT_PORT+5) PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5) python_server_entry_point=hello_state)<line_sep>hello_state.gateway=clientserver2<line_sep>sleep()<line_sep>clientserver.entry_point.startServerWithPythonEntry(<false>)<line_sep>sleep()<line_sep>clientserver2.shutdown()<line_sep># Check that Java correctly called Python self.assertEqual(2 len(hello_state.calls))<line_sep>self.assertEqual((<none> <none>) hello_state.calls[0])<line_sep>self.assertEqual((2 "Hello World") hello_state.calls[1])<block_end><with_stmt>gateway_server_example_app_process(<false>)<block_start>clientserver=ClientServer()<line_sep># We disable gc to test whether a shut down on one side will # garbage collect everything. internal_work(clientserver)<line_sep>python_gc()<line_sep>clientserver.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 8 objects: 2 InstrumentedObject (sayHello called twice), 1 # JavaServer, 1 PythonClient, 1 ClientServer, 3 # ClientServerConnection (1 to call sayHello, # 1 that receives shutdown command) self.assertEqual(7 len(createdSet))<line_sep>self.assertEqual(7 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>clientserver.shutdown()<line_sep># 8 objects: ClientServer (ok), PythonServer (ok), JavaClient, # GatewayProperty, HelloState (ok), 3 ClientServer Connections (2) assert_python_memory(self 7)<block_end><block_end><def_stmt>testJavaToPythonToJavaNoGCNoShutdown self<block_start><def_stmt>internal_work clientserver<block_start>hello_state=HelloState2()<line_sep>clientserver2=InstrClientServer(JavaParameters(port=DEFAULT_PORT+5) PythonParameters(port=DEFAULT_PYTHON_PROXY_PORT+5) python_server_entry_point=hello_state)<line_sep>hello_state.gateway=clientserver2<line_sep>sleep()<line_sep>clientserver.entry_point.startServerWithPythonEntry(<false>)<line_sep>sleep()<line_sep>clientserver2.shutdown()<line_sep># Check that Java correctly called Python self.assertEqual(2 len(hello_state.calls))<line_sep>self.assertEqual((<none> <none>) hello_state.calls[0])<line_sep>self.assertEqual((2 "Hello World") hello_state.calls[1])<block_end><with_stmt>gateway_server_example_app_process(<false>)<block_start>clientserver=ClientServer()<line_sep># We disable gc to test whether a shut down on one side will # garbage collect everything. gc.disable()<line_sep>internal_work(clientserver)<line_sep>gc.enable()<line_sep>python_gc()<line_sep>clientserver.jvm.py4j.instrumented.MetricRegistry.forceFinalization()<line_sep>sleep()<line_sep>createdSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getCreatedObjectsKeySet()<line_sep>finalizedSet=clientserver.jvm.py4j.instrumented.MetricRegistry.getFinalizedObjectsKeySet()<line_sep># 7 objects: 2 InstrumentedObject (sayHello called twice), 1 # JavaServer, 1 PythonClient, 1 ClientServer, 3 # ClientServerConnection (1 to call sayHello, # 1 that receives shutdown command) self.assertEqual(7 len(createdSet))<line_sep>self.assertEqual(7 len(finalizedSet))<line_sep>self.assertEqual(createdSet finalizedSet)<line_sep>clientserver.shutdown()<line_sep># 8 objects: ClientServer (ok), PythonServer (ok), JavaClient, # GatewayProperty, HelloState (ok), 3 ClientServer Connections (2) assert_python_memory(self 7)<block_end><block_end><block_end>
""" Compose multiple datasets in a single loader. """<import_stmt>numpy<as>np<import_from_stmt>copy deepcopy<import_from_stmt>torch.utils.data Dataset<import_from_stmt>dataset.wireframe_dataset WireframeDataset<import_from_stmt>dataset.holicity_dataset HolicityDataset<class_stmt>MergeDataset(Dataset)<block_start><def_stmt>__init__ self mode config=<none><block_start>super(MergeDataset self).__init__()<line_sep># Initialize the datasets self._datasets=[]<line_sep>spec_config=deepcopy(config)<for_stmt>i,d enumerate(config['datasets'])<block_start>spec_config['dataset_name']=d<line_sep>spec_config['gt_source_train']=config['gt_source_train'][i]<line_sep>spec_config['gt_source_test']=config['gt_source_test'][i]<if_stmt>d<eq>"wireframe"<block_start>self._datasets.append(WireframeDataset(mode spec_config))<block_end><elif_stmt>d<eq>"holicity"<block_start>spec_config['train_split']=config['train_splits'][i]<line_sep>self._datasets.append(HolicityDataset(mode spec_config))<block_end><else_stmt><block_start><raise>ValueError("Unknown dataset: "+d)<block_end><block_end>self._weights=config['weights']<block_end><def_stmt>__getitem__ self item<block_start>dataset=self._datasets[np.random.choice(range(len(self._datasets)) p=self._weights)]<line_sep><return>dataset[np.random.randint(len(dataset))]<block_end><def_stmt>__len__ self<block_start><return>np.sum([len(d)<for>d self._datasets])<block_end><block_end>
# Generated by Django 1.10.7 on 2017-05-23 11:39 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('data_interfaces' '0009_scheduling_integration') ]<line_sep>operations=[migrations.AddField(model_name='automaticupdaterule' name='workflow' field=models.CharField(default='CASE_UPDATE' max_length=126) preserve_default=<false> ) ]<block_end>
# -*- coding: utf-8 -*- <import_stmt>os<import_stmt>platform<import_stmt>pytest<import_stmt>yaml<import_stmt>giraffez<import_from_stmt>giraffez.constants *<import_from_stmt>giraffez.errors *<import_from_stmt>giraffez.types Columns<import_from_stmt>giraffez.utils *<line_sep>@pytest.mark.usefixtures('config' 'tmpfiles')<class_stmt>TestConfig(object)<block_start><def_stmt>test_get_set_list_value self tmpfiles<block_start><with_stmt>giraffez.Config(tmpfiles.conf "w" tmpfiles.key)<as>config<block_start>value=config.get_value("test")<assert_stmt>value<eq>{}<line_sep>value=config.get_value("connections.default")<assert_stmt>value<eq>"db1"<line_sep>config.set_value("connections.default" "db2")<line_sep>value=config.get_value("connections.default")<assert_stmt>value<eq>"db2"<line_sep>value=config.list_value(decrypt=<false>)<block_end><block_end><def_stmt>test_get_multi_value self tmpfiles<block_start><with_stmt>giraffez.Config(tmpfiles.conf "w" tmpfiles.key)<as>config<block_start>value=config.get_value("connections")<block_end><block_end><def_stmt>test_get_trailing_dot self tmpfiles<block_start><with_stmt>giraffez.Config(tmpfiles.conf "w" tmpfiles.key)<as>config<block_start>value1=config.get_value("connections")<line_sep>value2=config.get_value("connections.")<assert_stmt>value1<eq>value2<block_end><block_end><def_stmt>test_unset_value self tmpfiles<block_start>expected_dsn="db2"<with_stmt>giraffez.Config(tmpfiles.conf "w" tmpfiles.key)<as>config<block_start>config.unset_value("connections.db1")<line_sep>value=config.get_value("connections.db1")<assert_stmt>value<eq>{}<block_end><block_end><def_stmt>test_read_only self tmpfiles<block_start><with_stmt>pytest.raises(ConfigReadOnly)<block_start><with_stmt>giraffez.Config(tmpfiles.conf "r" tmpfiles.key)<as>config<block_start>config.set_value("connections.default" "db2")<line_sep>config.write()<block_end><block_end><block_end><def_stmt>test_config_conf_missing self tmpfiles<block_start><with_stmt>pytest.raises(ConfigNotFound)<block_start><with_stmt>giraffez.Config("None" "r" tmpfiles.key)<as>config<block_start><pass><block_end><block_end><block_end><def_stmt>test_config_key_missing self tmpfiles<block_start><with_stmt>pytest.raises(KeyNotFound)<block_start><with_stmt>giraffez.Config(tmpfiles.conf "r" "None")<as>config<block_start><pass><block_end><block_end><block_end><def_stmt>test_config_conf_bad_permissions self tmpfiles# Tests for permissions on linux or unix-like system only. Windows # requires the use of Windows-only APIs to determine and set the # permissions on files. <block_start><if_stmt>platform.system()<eq>'Windows'<block_start><return><block_end><with_stmt>pytest.raises(ConfigurationError)<block_start>os.chmod(tmpfiles.conf 0o655)<with_stmt>giraffez.Config(tmpfiles.conf "r" tmpfiles.key)<as>config<block_start><pass><block_end><block_end>os.chmod(tmpfiles.conf 0o600)<block_end><def_stmt>test_config_key_bad_permissions self tmpfiles# Tests for permissions on linux or unix-like system only. Windows # requires the use of Windows-only APIs to determine and set the # permissions on files. <block_start><if_stmt>platform.system()<eq>'Windows'<block_start><return><block_end><with_stmt>pytest.raises(ConfigurationError)<block_start>os.chmod(tmpfiles.key 0o655)<with_stmt>giraffez.Config(tmpfiles.conf "r" tmpfiles.key)<as>config<block_start><pass><block_end><block_end>os.chmod(tmpfiles.key 0o400)<block_end><def_stmt>test_config_connections self tmpfiles<block_start><with_stmt>giraffez.Config(tmpfiles.conf "r" tmpfiles.key)<as>config<block_start>connections=config.connections<line_sep>dsn=config.get_connection("db1")<assert_stmt>dsn.get("host")<eq><none><block_end><block_end><def_stmt>test_config_lock self tmpfiles<block_start><with_stmt>giraffez.Config(tmpfiles.conf "r" tmpfiles.key)<as>config<block_start>giraffez.Config.lock_connection(tmpfiles.conf "db1" key=tmpfiles.key)<line_sep>giraffez.Config.lock_connection(tmpfiles.conf "db1" key=tmpfiles.key)<with_stmt>pytest.raises(ConnectionLock)<block_start>giraffez.Config.lock_connection(tmpfiles.conf "db1" key=tmpfiles.key)<block_end>config.reload()<line_sep>lock_value=config.get_value("connections.db1.lock")<assert_stmt>lock_value<eq>2<line_sep>giraffez.Config.unlock_connection(tmpfiles.conf "db1" key=tmpfiles.key)<line_sep>config.reload()<line_sep>lock_value=config.get_value("connections.db1.lock")<assert_stmt>lock_value<eq>{}<block_end><block_end><def_stmt>test_secret_decrypt self tmpfiles<block_start>expected_username="user123"<line_sep>expected_password="<PASSWORD>"<with_stmt>giraffez.Config(tmpfiles.conf "w" tmpfiles.key)<as>config<block_start>config.set_value("connections.db1.username" expected_username)<line_sep>config.set_value("connections.db1.password" expected_password)<line_sep>config.write()<block_end><with_stmt>giraffez.Secret(tmpfiles.conf "r" tmpfiles.key)<as>secret<block_start>username,password=secret("connections.db1.username, connections.db1.password")<assert_stmt>expected_username<eq>username<assert_stmt>expected_password<eq>password<block_end><with_stmt>giraffez.Secret(tmpfiles.conf "w" tmpfiles.key)<as>secret<block_start>secret.set("db1.username" expected_username)<line_sep>secret.set("db1.password" expected_password)<line_sep>username,password=secret("db1.username, db1.password")<assert_stmt>expected_username<eq>username<assert_stmt>expected_password<eq>password<block_end><block_end><block_end>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['FlexibleServerConfigurationArgs' 'FlexibleServerConfiguration']<line_sep>@pulumi.input_type<class_stmt>FlexibleServerConfigurationArgs<block_start><def_stmt>__init__ __self__ * resource_group_name:pulumi.Input[str] server_name:pulumi.Input[str] value:pulumi.Input[str] name:Optional[pulumi.Input[str]]=<none><block_start>""" The set of arguments for constructing a FlexibleServerConfiguration resource. :param pulumi.Input[str] resource_group_name: The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created. :param pulumi.Input[str] server_name: Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. :param pulumi.Input[str] value: Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values. :param pulumi.Input[str] name: Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created. """<line_sep>pulumi.set(__self__ "resource_group_name" resource_group_name)<line_sep>pulumi.set(__self__ "server_name" server_name)<line_sep>pulumi.set(__self__ "value" value)<if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><block_end>@[email protected](name="resourceGroupName")<def_stmt>resource_group_name self<arrow>pulumi.Input[str]<block_start>""" The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created. """<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@resource_group_name.setter<def_stmt>resource_group_name self value:pulumi.Input[str]<block_start>pulumi.set(self "resource_group_name" value)<block_end>@[email protected](name="serverName")<def_stmt>server_name self<arrow>pulumi.Input[str]<block_start>""" Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. """<line_sep><return>pulumi.get(self "server_name")<block_end>@server_name.setter<def_stmt>server_name self value:pulumi.Input[str]<block_start>pulumi.set(self "server_name" value)<block_end>@[email protected]<def_stmt>value self<arrow>pulumi.Input[str]<block_start>""" Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values. """<line_sep><return>pulumi.get(self "value")<block_end>@value.setter<def_stmt>value self value:pulumi.Input[str]<block_start>pulumi.set(self "value" value)<block_end>@[email protected]<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>""" Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end><block_end>@pulumi.input_type<class_stmt>_FlexibleServerConfigurationState<block_start><def_stmt>__init__ __self__ * name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> server_name:Optional[pulumi.Input[str]]=<none> value:Optional[pulumi.Input[str]]=<none><block_start>""" Input properties used for looking up and filtering FlexibleServerConfiguration resources. :param pulumi.Input[str] name: Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created. :param pulumi.Input[str] server_name: Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. :param pulumi.Input[str] value: Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values. """<if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>resource_group_name<is><not><none><block_start>pulumi.set(__self__ "resource_group_name" resource_group_name)<block_end><if_stmt>server_name<is><not><none><block_start>pulumi.set(__self__ "server_name" server_name)<block_end><if_stmt>value<is><not><none><block_start>pulumi.set(__self__ "value" value)<block_end><block_end>@[email protected]<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>""" Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created. """<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@[email protected](name="resourceGroupName")<def_stmt>resource_group_name self<arrow>Optional[pulumi.Input[str]]<block_start>""" The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created. """<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@resource_group_name.setter<def_stmt>resource_group_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "resource_group_name" value)<block_end>@[email protected](name="serverName")<def_stmt>server_name self<arrow>Optional[pulumi.Input[str]]<block_start>""" Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. """<line_sep><return>pulumi.get(self "server_name")<block_end>@server_name.setter<def_stmt>server_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "server_name" value)<block_end>@[email protected]<def_stmt>value self<arrow>Optional[pulumi.Input[str]]<block_start>""" Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values. """<line_sep><return>pulumi.get(self "value")<block_end>@value.setter<def_stmt>value self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "value" value)<block_end><block_end><class_stmt>FlexibleServerConfiguration(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> server_name:Optional[pulumi.Input[str]]=<none> value:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start>""" Sets a MySQL Flexible Server Configuration value on a MySQL Flexible Server. ## Disclaimers > **Note:** Since this resource is provisioned by default, the Azure Provider will not check for the presence of an existing resource prior to attempting to create it. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_flexible_server = azure.mysql.FlexibleServer("exampleFlexibleServer", resource_group_name=azurerm_resource_group["test"]["name"], location=azurerm_resource_group["test"]["location"], administrator_login="adminTerraform", administrator_password="<PASSWORD>!", sku_name="GP_Standard_D2ds_v4") example_flexible_server_configuration = azure.mysql.FlexibleServerConfiguration("exampleFlexibleServerConfiguration", resource_group_name=example_resource_group.name, server_name=azurerm_mysql_server["example"]["name"], value="600") ``` ## Import MySQL Flexible Server Configurations can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:mysql/flexibleServerConfiguration:FlexibleServerConfiguration interactive_timeout /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/configurations/interactive_timeout ``` :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] name: Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created. :param pulumi.Input[str] server_name: Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. :param pulumi.Input[str] value: Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values. """<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:FlexibleServerConfigurationArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>""" Sets a MySQL Flexible Server Configuration value on a MySQL Flexible Server. ## Disclaimers > **Note:** Since this resource is provisioned by default, the Azure Provider will not check for the presence of an existing resource prior to attempting to create it. ## Example Usage ```python import pulumi import pulumi_azure as azure example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe") example_flexible_server = azure.mysql.FlexibleServer("exampleFlexibleServer", resource_group_name=azurerm_resource_group["test"]["name"], location=azurerm_resource_group["test"]["location"], administrator_login="adminTerraform", administrator_password="<PASSWORD>!", sku_name="GP_Standard_D2ds_v4") example_flexible_server_configuration = azure.mysql.FlexibleServerConfiguration("exampleFlexibleServerConfiguration", resource_group_name=example_resource_group.name, server_name=azurerm_mysql_server["example"]["name"], value="600") ``` ## Import MySQL Flexible Server Configurations can be imported using the `resource id`, e.g. ```sh $ pulumi import azure:mysql/flexibleServerConfiguration:FlexibleServerConfiguration interactive_timeout /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/group1/providers/Microsoft.DBforMySQL/flexibleServers/flexibleServer1/configurations/interactive_timeout ``` :param str resource_name: The name of the resource. :param FlexibleServerConfigurationArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(FlexibleServerConfigurationArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> server_name:Optional[pulumi.Input[str]]=<none> value:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=FlexibleServerConfigurationArgs.__new__(FlexibleServerConfigurationArgs)<line_sep>__props__.__dict__["name"]=name<if_stmt>resource_group_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'resource_group_name'")<block_end>__props__.__dict__["resource_group_name"]=resource_group_name<if_stmt>server_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'server_name'")<block_end>__props__.__dict__["server_name"]=server_name<if_stmt>value<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'value'")<block_end>__props__.__dict__["value"]=value<block_end>super(FlexibleServerConfiguration __self__).__init__('azure:mysql/flexibleServerConfiguration:FlexibleServerConfiguration' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> name:Optional[pulumi.Input[str]]=<none> resource_group_name:Optional[pulumi.Input[str]]=<none> server_name:Optional[pulumi.Input[str]]=<none> value:Optional[pulumi.Input[str]]=<none><arrow>'FlexibleServerConfiguration'<block_start>""" Get an existing FlexibleServerConfiguration resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] name: Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created. :param pulumi.Input[str] resource_group_name: The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created. :param pulumi.Input[str] server_name: Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. :param pulumi.Input[str] value: Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values. """<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_FlexibleServerConfigurationState.__new__(_FlexibleServerConfigurationState)<line_sep>__props__.__dict__["name"]=name<line_sep>__props__.__dict__["resource_group_name"]=resource_group_name<line_sep>__props__.__dict__["server_name"]=server_name<line_sep>__props__.__dict__["value"]=value<line_sep><return>FlexibleServerConfiguration(resource_name opts=opts __props__=__props__)<block_end>@[email protected]<def_stmt>name self<arrow>pulumi.Output[str]<block_start>""" Specifies the name of the MySQL Flexible Server Configuration, which needs [to be a valid MySQL configuration name](https://dev.mysql.com/doc/refman/5.7/en/server-configuration.html). Changing this forces a new resource to be created. """<line_sep><return>pulumi.get(self "name")<block_end>@[email protected](name="resourceGroupName")<def_stmt>resource_group_name self<arrow>pulumi.Output[str]<block_start>""" The name of the resource group in which the MySQL Flexible Server exists. Changing this forces a new resource to be created. """<line_sep><return>pulumi.get(self "resource_group_name")<block_end>@[email protected](name="serverName")<def_stmt>server_name self<arrow>pulumi.Output[str]<block_start>""" Specifies the name of the MySQL Flexible Server. Changing this forces a new resource to be created. """<line_sep><return>pulumi.get(self "server_name")<block_end>@[email protected]<def_stmt>value self<arrow>pulumi.Output[str]<block_start>""" Specifies the value of the MySQL Flexible Server Configuration. See the MySQL documentation for valid values. """<line_sep><return>pulumi.get(self "value")<block_end><block_end>
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_stmt>datetime datetime<import_from_stmt>typing Dict<import_stmt>requests<import_from_stmt>pandas DataFrame<import_from_stmt>lib.concurrent thread_map<import_from_stmt>lib.data_source DataSource<import_from_stmt>lib.time date_range date_today<line_sep>_api_url_tpl="https://api-covid19.rnbo.gov.ua/data?to={date}"<def_stmt>_get_daily_records date:str<block_start>records=[]<line_sep>url=_api_url_tpl.format(date=date)<line_sep>daily_data=requests.get(url timeout=60).json().get("ukraine" [])<for_stmt>record daily_data<block_start>records.append({"date":date "country_code":"UA" "match_string":record.get("label" {}).get("en") "total_confirmed":record.get("confirmed") "total_deceased":record.get("deaths") "total_recovered":record.get("recovered") })<block_end><return>records<block_end><class_stmt>UkraineDataSource(DataSource)<block_start><def_stmt>parse self sources:Dict[str str] aux:Dict[str DataFrame] **parse_opts<arrow>DataFrame# Data can only be retrieved one day at a time, and it starts on 2020-01-22 <block_start>first="2020-01-22"<line_sep>map_iter=list(date_range(first date_today()))<line_sep>records=sum(thread_map(_get_daily_records map_iter) [])<line_sep><return>DataFrame.from_records(records)<block_end><block_end>
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Tools for protein features. """<import_from_stmt>collections OrderedDict<import_from_stmt>enum Enum<class_stmt>ProteinTokenizer(object)<block_start>""" Protein Tokenizer. """<line_sep>padding_token='<pad>'<line_sep>mask_token='<mask>'<line_sep>start_token=class_token='<cls>'<line_sep>end_token=seperate_token='<sep>'<line_sep>unknown_token='<unk>'<line_sep>padding_token_id=0<line_sep>mask_token_id=1<line_sep>start_token_id=class_token_id=2<line_sep>end_token_id=seperate_token_id=3<line_sep>unknown_token_id=4<line_sep>special_token_ids=[padding_token_id mask_token_id start_token_id end_token_id unknown_token_id]<line_sep>vocab=OrderedDict([(padding_token 0) (mask_token 1) (class_token 2) (seperate_token 3) (unknown_token 4) ('A' 5) ('B' 6) ('C' 7) ('D' 8) ('E' 9) ('F' 10) ('G' 11) ('H' 12) ('I' 13) ('K' 14) ('L' 15) ('M' 16) ('N' 17) ('O' 18) ('P' 19) ('Q' 20) ('R' 21) ('S' 22) ('T' 23) ('U' 24) ('V' 25) ('W' 26) ('X' 27) ('Y' 28) ('Z' 29)])<def_stmt>tokenize self sequence<block_start>""" Split the sequence into token list. Args: sequence: The sequence to be tokenized. Returns: tokens: The token lists. """<line_sep><return>[x<for>x sequence]<block_end><def_stmt>convert_token_to_id self token<block_start>""" Converts a token to an id. Args: token: Token. Returns: id: The id of the input token. """<if_stmt>token<not><in>self.vocab<block_start><return>ProteinTokenizer.unknown_token_id<block_end><else_stmt><block_start><return>ProteinTokenizer.vocab[token]<block_end><block_end><def_stmt>convert_tokens_to_ids self tokens<block_start>""" Convert multiple tokens to ids. Args: tokens: The list of tokens. Returns: ids: The id list of the input tokens. """<line_sep><return>[self.convert_token_to_id(token)<for>token tokens]<block_end><def_stmt>gen_token_ids self sequence<block_start>""" Generate the list of token ids according the input sequence. Args: sequence: Sequence to be tokenized. Returns: token_ids: The list of token ids. """<line_sep>tokens=[]<line_sep>tokens.append(ProteinTokenizer.start_token)<line_sep>tokens.extend(self.tokenize(sequence))<line_sep>tokens.append(ProteinTokenizer.end_token)<line_sep>token_ids=self.convert_tokens_to_ids(tokens)<line_sep><return>token_ids<block_end><block_end>
<import_stmt>argparse<import_stmt>os<import_stmt>re<line_sep>parser=argparse.ArgumentParser('Visualizing Training sample, top200 pairs from randomly top 2000 pairs')<line_sep>parser.add_argument('--outHtml' type=str help='output html file')<line_sep>parser.add_argument('--imgDir' type=str help='image directory')<line_sep>args=parser.parse_args()<line_sep>### Writing the table format### f=open(args.outHtml 'w')<line_sep>f.write('<html>\n')<line_sep>f.write('<head>\n')<line_sep>f.write('\t<title></title>\n')<line_sep>f.write('\t<meta name=\"keywords\" content= \"Visual Result\" /> <meta charset=\"utf-8\" />\n')<line_sep>f.write('\t<meta name=\"robots\" content=\"index, follow\" />\n')<line_sep>f.write('\t<meta http-equiv=\"Content-Script-Type\" content=\"text/javascript\" />\n')<line_sep>f.write('\t<meta http-equiv=\"expires\" content=\"0\" />\n')<line_sep>f.write('\t<meta name=\"description\" content= \"Project page of style.css\" />\n')<line_sep>f.write('\t<link rel=\"stylesheet\" type=\"text/css\" href=\"style.css\" media=\"screen\" />\n')<line_sep>f.write('\t<link rel=\"shortcut icon\" href=\"favicon.ico\" />\n')<line_sep>f.write('</head>\n')<line_sep>f.write('<body>\n')<line_sep>f.write('<div id="website">\n')<line_sep>f.write('<center>\n')<line_sep>f.write('\t<div class=\"blank\"></div>\n')<line_sep>f.write('\t<h1>\n')<line_sep>f.write('\t\tVisualize Training Sample\n')<line_sep>f.write('\t</h1>\n')<line_sep>f.write('</center>\n')<line_sep>f.write('<div class=\"blank\"></div>\n')<line_sep>f.write('<center>\n')<line_sep>f.write('<div>\n')<line_sep>f.write('</div>\n')<line_sep>### ---HTML Table--- ### f.write('<table>\n')<line_sep>f.write('\t<tr>\n')<line_sep>f.write('\t\t<th># Rank</th>\n')<line_sep>f.write('\t\t<th>Img 1 </th>\n')<line_sep>f.write('\t\t<th>Img 2 </th>\n')<line_sep>f.write('\t</tr>\n')<line_sep>nbPair=len(os.listdir(args.imgDir))/2## Nb of row <for_stmt>j range(nbPair)<block_start>f.write('\t<tr >\n')<line_sep>msg='\t\t<th>{:d}</th>\n'.format(j+1)<line_sep>f.write(msg)## Rank img1=os.path.join(args.imgDir 'Rank{:d}_1.jpg'.format(j))<line_sep>msg='\t\t<td><a download=\"{}\" href=\"{}\" title="ImageName"> <img src=\"{}\" /></a> </td>\n'.format(img1 img1 img1)<line_sep>f.write(msg)## Img 1 img2=os.path.join(args.imgDir 'Rank{:d}_2.jpg'.format(j))<line_sep>msg='\t\t<td><a download=\"{}\" href=\"{}\" title="ImageName"> <img src=\"{}\" /></a> </td>\n'.format(img2 img2 img2)<line_sep>f.write(msg)## Img 2 f.write('\t</tr>\n')<block_end>f.write('</table>\n')<line_sep>f.write('</center>\n</div>\n </body>\n</html>\n')<line_sep>f.close()<line_sep>
# -*- coding: utf-8 -*- <import_from_future_stmt> division print_function absolute_import<import_stmt>sys<line_sep>sys.path.append('../../')<import_stmt>xml.etree.cElementTree<as>ET<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_stmt>glob<import_stmt>cv2<import_stmt>json<import_from_stmt>libs.label_name_dict.label_dict *<import_from_stmt>help_utils.tools *<line_sep>tf.app.flags.DEFINE_string('coco_dir' '/data/COCO/coco_trainvalmini.odgt' 'coco dir')<line_sep>tf.app.flags.DEFINE_string('save_name' 'train' 'save name')<line_sep>tf.app.flags.DEFINE_string('save_dir' '../tfrecord/' 'save name')<line_sep>tf.app.flags.DEFINE_string('dataset' 'coco' 'dataset')<line_sep>FLAGS=tf.app.flags.FLAGS<def_stmt>_int64_feature value<block_start><return>tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))<block_end><def_stmt>_bytes_feature value<block_start><return>tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))<block_end><def_stmt>convert_pascal_to_tfrecord coco_trainvalmini<block_start>save_path=FLAGS.save_dir+FLAGS.dataset+'_'+FLAGS.save_name+'.tfrecord'<line_sep>mkdir(FLAGS.save_dir)<line_sep># writer_options = tf.python_io.TFRecordOptions(tf.python_io.TFRecordCompressionType.ZLIB) # writer = tf.python_io.TFRecordWriter(path=save_path, options=writer_options) writer=tf.python_io.TFRecordWriter(path=save_path)<with_stmt>open(coco_trainvalmini)<as>f<block_start>files=f.readlines()<block_end>img_count=0<line_sep>gt_count=0<for_stmt>count,raw_line enumerate(files)<block_start>file=json.loads(raw_line)<line_sep>img_path=os.path.join('/data/COCO/train2017' file['fpath'].split('_')[-1])<line_sep>img_name=file['ID']<if_stmt><not>os.path.exists(img_path)# print('{} is not exist!'.format(img_path)) <block_start>img_count<augadd>1<line_sep><continue><block_end># img = np.array(Image.open(img_path)) img=cv2.imread(img_path)[: : ::-1]<if_stmt>img<is><none><block_start><continue><block_end>gtboxes=file['gtboxes']<line_sep>img_height=file['height']<line_sep>img_width=file['width']<if_stmt>len(gtboxes)<eq>0# print('{}: gt is not exist!'.format(img_path)) <block_start>gt_count<augadd>1<line_sep><continue><block_end>gtbox_label=[]<for_stmt>gt gtboxes<block_start>box=gt['box']<line_sep>label=gt['tag']<line_sep>gtbox_label.append([box[0] box[1] box[0]+box[2] box[1]+box[3] NAME_LABEL_MAP[label]])<block_end>gtbox_label=np.array(gtbox_label np.int32)<line_sep>feature=tf.train.Features(feature={# do not need encode() in linux 'img_name':_bytes_feature(img_name.encode()) # 'img_name': _bytes_feature(img_name), 'img_height':_int64_feature(img_height) 'img_width':_int64_feature(img_width) 'img':_bytes_feature(img.tostring()) 'gtboxes_and_label':_bytes_feature(gtbox_label.tostring()) 'num_objects':_int64_feature(gtbox_label.shape[0])})<line_sep>example=tf.train.Example(features=feature)<line_sep>writer.write(example.SerializeToString())<line_sep>view_bar('Conversion progress' count+1 len(files))<block_end>print('{} images not exist!'.format(img_count))<line_sep>print('{} gts not exist!'.format(gt_count))<line_sep>print('\nConversion is complete!')<block_end><if_stmt>__name__<eq>'__main__'<block_start>convert_pascal_to_tfrecord(FLAGS.coco_dir)<block_end>
""" Unit tests for ed_utils. -- <EMAIL> """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<line_sep># pylint: disable=relative-import # Local imports <import_from_stmt>.exd_utils random_sampling_cts random_sampling_kmeans_cts<import_from_stmt>..utils.base_test_class BaseTestClass execute_tests<class_stmt>EDUtilsTestCase(BaseTestClass)<block_start>""" Unit tests for generic functions ed_utils.py """<def_stmt>setUp self<block_start>""" Sets up unit tests. """<line_sep>self.lhs_data=[(1 10) (2 5) (4 10) (10 100)]<block_end>@classmethod<def_stmt>_check_sample_sizes cls data samples<block_start>""" Data is a tuple of the form (dim, num_samples) ans samples is an ndarray."""<assert_stmt>(data[1] data[0])<eq>samples.shape<block_end><def_stmt>test_random_sampling self<block_start>""" Tests random sampling. """<line_sep>self.report('Test random sampling.')<for_stmt>data self.lhs_data<block_start>self._check_sample_sizes(data random_sampling_cts(data[0] data[1]))<block_end><block_end><def_stmt>test_random_sampling_kmeans self<block_start>""" Tests random sampling with k-means. """<line_sep>self.report('Test random sampling with k-means.')<for_stmt>data self.lhs_data<block_start>self._check_sample_sizes(data random_sampling_kmeans_cts(data[0] data[1]))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>execute_tests()<block_end>
fields_masks={'background':"sheets/data/rowData/values/effectiveFormat/backgroundColor" 'value':"sheets/data/rowData/values/formattedValue" 'note':"sheets/data/rowData/values/note" 'font_color':"sheets/data/rowData/values/effectiveFormat/textFormat/foregroundColor"}<line_sep>
# Copyright (c) 2020 BlenderNPR and contributors. MIT license. <import_stmt>math<import_stmt>ctypes<import_stmt>pyrr<import_from_stmt>Malt.GL.GL *<import_from_stmt>Malt.GL.Shader UBO<import_from_stmt>Malt.GL.Texture TextureArray CubeMapArray<import_from_stmt>Malt.GL.RenderTarget ArrayLayerTarget RenderTarget<import_from_stmt>Malt Pipeline<line_sep>_LIGHTS_BUFFER=<none><def_stmt>get_lights_buffer <block_start><if_stmt>Pipeline.MAIN_CONTEXT<block_start><global>_LIGHTS_BUFFER<if_stmt>_LIGHTS_BUFFER<is><none><block_start>_LIGHTS_BUFFER=LightsBuffer()<block_end><return>_LIGHTS_BUFFER<block_end><else_stmt><block_start><return>LightsBuffer()<block_end><block_end>_SHADOWMAPS=<none><def_stmt>get_shadow_maps <block_start><if_stmt>Pipeline.MAIN_CONTEXT<block_start><global>_SHADOWMAPS<if_stmt>_SHADOWMAPS<is><none><block_start>_SHADOWMAPS=ShadowMaps()<block_end><return>_SHADOWMAPS<block_end><else_stmt><block_start><return>ShadowMaps()<block_end><block_end>LIGHT_SUN=1<line_sep>LIGHT_POINT=2<line_sep>LIGHT_SPOT=3<class_stmt>C_Light(ctypes.Structure)<block_start>_fields_=[('color' ctypes.c_float<times>3) ('type' ctypes.c_int32) ('position' ctypes.c_float<times>3) ('radius' ctypes.c_float) ('direction' ctypes.c_float<times>3) ('spot_angle' ctypes.c_float) ('spot_blend' ctypes.c_float) ('type_index' ctypes.c_int32) ('__padding' ctypes.c_int32<times>2) ]<block_end>MAX_SPOTS=64<line_sep>MAX_SUNS=64<line_sep>MAX_LIGHTS=128<class_stmt>C_LightsBuffer(ctypes.Structure)<block_start>_fields_=[('lights' C_Light<times>MAX_LIGHTS) ('lights_count' ctypes.c_int) ('cascades_count' ctypes.c_int) ('__padding' ctypes.c_int32<times>2) ('spot_matrices' ctypes.c_float<times>16<times>MAX_SPOTS) ('sun_matrices' ctypes.c_float<times>16<times>MAX_SUNS) ]<block_end><class_stmt>ShadowMaps(object)<block_start><def_stmt>__init__ self<block_start>self.max_spots=1<line_sep>self.spot_resolution=2048<line_sep>self.spot_depth_t=<none><line_sep>self.spot_fbos=[]<line_sep>self.max_suns=1<line_sep>self.sun_resolution=2048<line_sep>self.sun_depth_t=<none><line_sep>self.sun_fbos=[]<line_sep>self.max_points=1<line_sep>self.point_resolution=512<line_sep>self.point_depth_t=<none><line_sep>self.point_fbos=[]<line_sep>self.initialized=<false><block_end><def_stmt>load self scene spot_resolution sun_resolution point_resolution sun_cascades<block_start>needs_setup=self.initialized<is><false><line_sep>self.initialized=<true><line_sep>new_settings=(spot_resolution sun_resolution point_resolution)<line_sep>current_settings=(self.spot_resolution self.sun_resolution self.point_resolution)<if_stmt>new_settings<ne>current_settings<block_start>self.spot_resolution=spot_resolution<line_sep>self.sun_resolution=sun_resolution<line_sep>self.point_resolution=point_resolution<line_sep>needs_setup=<true><block_end>spot_count=len([l<for>l scene.lights<if>l.type<eq>LIGHT_SPOT])<if_stmt>spot_count<g>self.max_spots<block_start>self.max_spots=spot_count<line_sep>needs_setup=<true><block_end>sun_count=len([l<for>l scene.lights<if>l.type<eq>LIGHT_SUN])<line_sep>sun_count=sun_count<times>sun_cascades<if_stmt>sun_count<g>self.max_suns<block_start>self.max_suns=sun_count<line_sep>needs_setup=<true><block_end>point_count=len([l<for>l scene.lights<if>l.type<eq>LIGHT_POINT])<if_stmt>point_count<g>self.max_points<block_start>self.max_points=point_count<line_sep>needs_setup=<true><block_end><if_stmt>needs_setup<block_start>self.setup()<block_end>self.clear(spot_count sun_count point_count)<block_end><def_stmt>setup self create_fbos=<true><block_start>self.spot_depth_t=TextureArray((self.spot_resolution self.spot_resolution) self.max_spots GL_DEPTH_COMPONENT32F)<line_sep>self.sun_depth_t=TextureArray((self.sun_resolution self.sun_resolution) self.max_suns GL_DEPTH_COMPONENT32F)<line_sep>self.point_depth_t=CubeMapArray((self.point_resolution self.point_resolution) self.max_points GL_DEPTH_COMPONENT32F)<if_stmt>create_fbos<block_start>self.spot_fbos=[]<for_stmt>i range(self.spot_depth_t.length)<block_start>self.spot_fbos.append(RenderTarget([] ArrayLayerTarget(self.spot_depth_t i)))<block_end>self.sun_fbos=[]<for_stmt>i range(self.sun_depth_t.length)<block_start>self.sun_fbos.append(RenderTarget([] ArrayLayerTarget(self.sun_depth_t i)))<block_end>self.point_fbos=[]<for_stmt>i range(self.point_depth_t.length<times>6)<block_start>self.point_fbos.append(RenderTarget([] ArrayLayerTarget(self.point_depth_t i)))<block_end><block_end><block_end><def_stmt>clear self spot_count sun_count point_count<block_start><for_stmt>i range(spot_count)<block_start>self.spot_fbos[i].clear(depth=1)<block_end><for_stmt>i range(sun_count)<block_start>self.sun_fbos[i].clear(depth=1)<block_end><for_stmt>i range(point_count<times>6)<block_start>self.point_fbos[i].clear(depth=1)<block_end><block_end><def_stmt>shader_callback self shader<block_start>shader.textures['SHADOWMAPS_DEPTH_SPOT']=self.spot_depth_t<line_sep>shader.textures['SHADOWMAPS_DEPTH_SUN']=self.sun_depth_t<line_sep>shader.textures['SHADOWMAPS_DEPTH_POINT']=self.point_depth_t<block_end><block_end><class_stmt>LightsBuffer(object)<block_start><def_stmt>__init__ self<block_start>self.data=C_LightsBuffer()<line_sep>self.UBO=UBO()<line_sep>self.spots=<none><line_sep>self.suns=<none><line_sep>self.points=<none><block_end><def_stmt>load self scene cascades_count cascades_distribution_scalar cascades_max_distance=1.0#TODO: Automatic distribution exponent basedd on FOV <block_start>spot_count=0<line_sep>sun_count=0<line_sep>point_count=0<import_from_stmt>collections OrderedDict<line_sep>self.spots=OrderedDict()<line_sep>self.suns=OrderedDict()<line_sep>self.points=OrderedDict()<for_stmt>i,light enumerate(scene.lights)<block_start>self.data.lights[i].color=light.color<line_sep>self.data.lights[i].type=light.type<line_sep>self.data.lights[i].position=light.position<line_sep>self.data.lights[i].radius=light.radius<line_sep>self.data.lights[i].direction=light.direction<line_sep>self.data.lights[i].spot_angle=light.spot_angle<line_sep>self.data.lights[i].spot_blend=light.spot_blend<if_stmt>light.type<eq>LIGHT_SPOT<block_start>self.data.lights[i].type_index=spot_count<line_sep>projection_matrix=make_projection_matrix(light.spot_angle 1 0.01 light.radius)<line_sep>spot_matrix=projection_matrix<times>pyrr.Matrix44(light.matrix)<line_sep>self.data.spot_matrices[spot_count]=flatten_matrix(spot_matrix)<line_sep>self.spots[light]=[(light.matrix flatten_matrix(projection_matrix))]<line_sep>spot_count<augadd>1<block_end><if_stmt>light.type<eq>LIGHT_SUN<block_start>self.data.lights[i].type_index=sun_count<line_sep>sun_matrix=pyrr.Matrix44(light.matrix)<line_sep>projection_matrix=pyrr.Matrix44(scene.camera.projection_matrix)<line_sep>view_matrix=projection_matrix<times>pyrr.Matrix44(scene.camera.camera_matrix)<line_sep>cascades_matrices=get_sun_cascades(sun_matrix projection_matrix view_matrix cascades_count cascades_distribution_scalar cascades_max_distance)<line_sep>self.suns[light]=[]<for_stmt>i,cascade enumerate(cascades_matrices)<block_start>cascade=flatten_matrix(cascade)<line_sep>self.data.sun_matrices[sun_count<times>cascades_count+i]=cascade<line_sep>self.suns[light].append((cascade flatten_matrix(pyrr.Matrix44.identity())))<block_end>sun_count<augadd>1<block_end><if_stmt>light.type<eq>LIGHT_POINT<block_start>self.data.lights[i].type_index=point_count<line_sep>cube_map_axes=[((1 0 0) (0 -1 0)) ((-1 0 0) (0 -1 0)) ((0 1 0) (0 0 1)) ((0 -1 0) (0 0 -1)) ((0 0 1) (0 -1 0)) ((0 0 -1) (0 -1 0))]<line_sep>matrices=[]<for_stmt>axes cube_map_axes<block_start>position=pyrr.Vector3(light.position)<line_sep>front=pyrr.Vector3(axes[0])<line_sep>up=pyrr.Vector3(axes[1])<line_sep>matrices.append(pyrr.Matrix44.look_at(position position+front up))<block_end>projection_matrix=make_projection_matrix(math.pi/2.0 1.0 0.01 light.radius)<line_sep>self.points[light]=[]<for_stmt>i range(6)<block_start>self.points[light].append((flatten_matrix(matrices[i]) flatten_matrix(projection_matrix)))<block_end>point_count<augadd>1<block_end><block_end>self.data.lights_count=len(scene.lights)<line_sep>self.data.cascades_count=cascades_count<line_sep>self.UBO.load_data(self.data)<block_end><def_stmt>bind self location<block_start>self.UBO.bind(location)<block_end><block_end><def_stmt>flatten_matrix matrix<block_start><return>(ctypes.c_float<times>16)(*[e<for>v matrix<for>e v])<block_end>#TODO: Hard-coded for Blender conventions for now <def_stmt>make_projection_matrix fov aspect_ratio near far<block_start>x_scale=1.0/math.tan(fov/2.0)<line_sep>y_scale=x_scale<times>aspect_ratio<line_sep><return>pyrr.Matrix44([x_scale 0 0 0 0 y_scale 0 0 0 0 (-(far+near))/(far-near) -1 0 0 (-2.0<times>far<times>near)/(far-near) 0])<block_end><def_stmt>get_sun_cascades sun_from_world_matrix projection_matrix view_from_world_matrix cascades_count cascades_distribution_scalar cascades_max_distance<block_start>cascades=[]<line_sep>splits=[]<line_sep>n,f=0 0<if_stmt>projection_matrix[3][3]<eq>1.0# ortho <block_start>n=cascades_max_distance/2<line_sep>f=-cascades_max_distance/2<block_end><else_stmt># perspective <block_start>clip_start=projection_matrix.inverse<times>pyrr.Vector4([0 0 -1 1])<line_sep>clip_start<augdiv>clip_start.w<line_sep>n=clip_start.z<line_sep>f=-cascades_max_distance<block_end><def_stmt>lerp a b f<block_start>f=max(0 min(f 1))<line_sep><return>a<times>(1.0-f)+b<times>f<block_end><for_stmt>i range(cascades_count+1)<block_start>split_log=n<times>pow(f/n i/cascades_count)<line_sep>split_uniform=n+(f-n)<times>(i/cascades_count)<line_sep>split=lerp(split_uniform split_log cascades_distribution_scalar)<line_sep>projected=projection_matrix<times>pyrr.Vector4([0 0 split 1])<line_sep>projected=(projected/projected.w)<times>(1.0<if>projected.w<ge>0<else>-1.0)<line_sep>splits.append(projected.z)<block_end><for_stmt>i range(1 len(splits))<block_start>near=splits[i-1]<line_sep>far=splits[i]<line_sep>cascades.append(sun_shadowmap_matrix(sun_from_world_matrix view_from_world_matrix near far))<block_end><return>cascades<block_end><def_stmt>frustum_corners view_from_world_matrix near far<block_start>m=view_from_world_matrix.inverse<line_sep>corners=[]<for_stmt>x (-1 1)<block_start><for_stmt>y (-1 1)<block_start><for_stmt>z (near far)<block_start>v=pyrr.Vector4([x y z 1])<line_sep>v=m<times>v<line_sep>v<augdiv>v.w<line_sep>corners.append(v)<block_end><block_end><block_end><return>corners<block_end><def_stmt>sun_shadowmap_matrix sun_from_world_matrix view_from_world_matrix near far<block_start>INFINITY=float('inf')<line_sep>aabb={'min':pyrr.Vector3([INFINITY INFINITY INFINITY]) 'max':pyrr.Vector3([-INFINITY -INFINITY -INFINITY])}<for_stmt>corner frustum_corners(view_from_world_matrix near far)<block_start>corner=sun_from_world_matrix<times>corner<line_sep>aabb['min'].x=min(aabb['min'].x corner.x)<line_sep>aabb['min'].y=min(aabb['min'].y corner.y)<line_sep>aabb['min'].z=min(aabb['min'].z corner.z)<line_sep>aabb['max'].x=max(aabb['max'].x corner.x)<line_sep>aabb['max'].y=max(aabb['max'].y corner.y)<line_sep>aabb['max'].z=max(aabb['max'].z corner.z)<block_end>world_from_light_space=sun_from_world_matrix.inverse<line_sep>size=aabb['max']-aabb['min']<line_sep>aabb['min']=world_from_light_space<times>pyrr.Vector4([*aabb['min'].tolist() 1.0])<line_sep>aabb['max']=world_from_light_space<times>pyrr.Vector4([*aabb['max'].tolist() 1.0])<line_sep>center=(aabb['min']+aabb['max'])/2.0<line_sep>center=pyrr.Vector3(center.tolist()[:3])<line_sep>scale=pyrr.Matrix44.from_scale(size)<line_sep>translate=pyrr.Matrix44.from_translation(center)<line_sep>matrix=translate<times>world_from_light_space<times>scale<line_sep>screen=pyrr.Matrix44([1 0 0 0 0 1 0 0 0 0 -1 0 0 0 0 1])<line_sep><return>screen<times>matrix.inverse<block_end>
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** <import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['GetClustersResult' 'AwaitableGetClustersResult' 'get_clusters' ]<line_sep>@pulumi.output_type<class_stmt>GetClustersResult<block_start>""" A collection of values returned by getClusters. """<def_stmt>__init__ __self__ id=<none> names=<none><block_start><if_stmt>id<and><not>isinstance(id str)<block_start><raise>TypeError("Expected argument 'id' to be a str")<block_end>pulumi.set(__self__ "id" id)<if_stmt>names<and><not>isinstance(names list)<block_start><raise>TypeError("Expected argument 'names' to be a list")<block_end>pulumi.set(__self__ "names" names)<block_end>@[email protected]<def_stmt>id self<arrow>str<block_start>""" The provider-assigned unique ID for this managed resource. """<line_sep><return>pulumi.get(self "id")<block_end>@[email protected]<def_stmt>names self<arrow>Sequence[str]<block_start>""" Set of EKS clusters names """<line_sep><return>pulumi.get(self "names")<block_end><block_end><class_stmt>AwaitableGetClustersResult(GetClustersResult)# pylint: disable=using-constant-test <block_start><def_stmt>__await__ self<block_start><if_stmt><false><block_start><yield>self<block_end><return>GetClustersResult(id=self.id names=self.names)<block_end><block_end><def_stmt>get_clusters opts:Optional[pulumi.InvokeOptions]=<none><arrow>AwaitableGetClustersResult<block_start>""" Retrieve EKS Clusters list """<line_sep>__args__=dict()<if_stmt>opts<is><none><block_start>opts=pulumi.InvokeOptions()<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end>__ret__=pulumi.runtime.invoke('aws:eks/getClusters:getClusters' __args__ opts=opts typ=GetClustersResult).value<line_sep><return>AwaitableGetClustersResult(id=__ret__.id names=__ret__.names)<block_end>
"""REST decorators"""<import_stmt>logging<import_from_stmt>decorator decorator<import_from_stmt>pylons.controllers.util abort<import_from_stmt>pylons.decorators.util get_pylons<line_sep>__all__=['dispatch_on' 'restrict']<line_sep>log=logging.getLogger(__name__)<def_stmt>restrict *methods<block_start>"""Restricts access to the function depending on HTTP method Example: .. code-block:: python from pylons.decorators import rest class SomeController(BaseController): @rest.restrict('GET') def comment(self, id): """<def_stmt>check_methods func *args **kwargs<block_start>"""Wrapper for restrict"""<if_stmt>get_pylons(args).request.method<not><in>methods<block_start>log.debug("Method not allowed by restrict")<line_sep>abort(405 headers=[('Allow' ','.join(methods))])<block_end><return>func(*args **kwargs)<block_end><return>decorator(check_methods)<block_end><def_stmt>dispatch_on **method_map<block_start>"""Dispatches to alternate controller methods based on HTTP method Multiple keyword arguments should be passed, with the keyword corresponding to the HTTP method to dispatch on (DELETE, POST, GET, etc.) and the value being the function to call. The value should be a string indicating the name of the function to dispatch to. Example: .. code-block:: python from pylons.decorators import rest class SomeController(BaseController): @rest.dispatch_on(POST='create_comment') def comment(self): # Do something with the comment def create_comment(self, id): # Do something if its a post to comment """<def_stmt>dispatcher func self *args **kwargs<block_start>"""Wrapper for dispatch_on"""<line_sep>alt_method=method_map.get(get_pylons(args).request.method)<if_stmt>alt_method<block_start>alt_method=getattr(self alt_method)<line_sep>log.debug("Dispatching to %s instead" alt_method)<line_sep><return>self._inspect_call(alt_method **kwargs)<block_end><return>func(self *args **kwargs)<block_end><return>decorator(dispatcher)<block_end>
# Copyright 2016 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>io<import_stmt>inspect<import_stmt>os<import_stmt>sys<import_stmt>time<import_stmt>traceback<import_stmt>app.buffer_file<line_sep>screenLog=[u"--- screen log ---"]<line_sep>fullLog=[u"--- begin log ---"]<line_sep>enabledChannels={u"meta":<true> #'mouse': True, u"startup":<true> }<line_sep>shouldWritePrintLog=<false><line_sep>startTime=time.time()<def_stmt>get_lines <block_start><return>screenLog<block_end><def_stmt>parse_lines frame logChannel *args<block_start><if_stmt><not>len(args)<block_start>args=[u""]<block_end>msg=str(args[0])<if_stmt>1<block_start>msg=u"%s %s %s %s: %s"%(logChannel os.path.split(frame[1])[1] frame[2] frame[3] msg )<block_end>prior=msg<for_stmt>i args[1:]<block_start><if_stmt><not>len(prior)<or>prior[-1]<ne>u"\n"<block_start>msg<augadd>u" "<block_end>prior=repr(i)# unicode(i) msg<augadd>prior<block_end><return>msg.split(u"\n")<block_end><def_stmt>channel_enable logChannel isEnabled<block_start><global>fullLog shouldWritePrintLog<line_sep>fullLog<augadd>[u"%10s %10s: %s %r"%(u"logging" u"channel_enable" logChannel isEnabled)]<if_stmt>isEnabled<block_start>enabledChannels[logChannel]=isEnabled<line_sep>shouldWritePrintLog=<true><block_end><else_stmt><block_start>enabledChannels.pop(channel <none>)<block_end><block_end><def_stmt>channel logChannel *args<block_start><global>fullLog screenLog<if_stmt>logChannel<in>enabledChannels<block_start>lines=parse_lines(inspect.stack()[2] logChannel *args)<line_sep>screenLog<augadd>lines<line_sep>fullLog<augadd>lines<block_end><block_end><def_stmt>caller *args<block_start><global>fullLog screenLog<line_sep>priorCaller=inspect.stack()[2]<line_sep>msg=(u"%s %s %s"%(os.path.split(priorCaller[1])[1] priorCaller[2] priorCaller[3]) )+args<line_sep>lines=parse_lines(inspect.stack()[1] u"caller" *msg)<line_sep>screenLog<augadd>lines<line_sep>fullLog<augadd>lines<block_end><def_stmt>exception e *args<block_start><global>fullLog<line_sep>lines=parse_lines(inspect.stack()[1] u"except" *args)<line_sep>fullLog<augadd>lines<line_sep>errorType,value,tracebackInfo=sys.exc_info()<line_sep>out=traceback.format_exception(errorType value tracebackInfo)<for_stmt>i out<block_start>error(i[:-1])<block_end><block_end><def_stmt>check_failed prefix a op b<block_start>stack(u"failed %s %r %s %r"%(prefix a op b))<line_sep><raise>Exception("fatal error")<block_end><def_stmt>check_ge a b<block_start><if_stmt>a<ge>b<block_start><return><block_end>check_failed(u"check_ge" a u">=" b)<block_end><def_stmt>check_gt a b<block_start><if_stmt>a<g>b<block_start><return><block_end>check_failed(u"check_lt" a u"<" b)<block_end><def_stmt>check_le a b<block_start><if_stmt>a<le>b<block_start><return><block_end>check_failed(u"check_le" a u"<=" b)<block_end><def_stmt>check_lt a b<block_start><if_stmt>a<l>b<block_start><return><block_end>check_failed(u"check_lt" a u"<" b)<block_end><def_stmt>stack *args<block_start><global>fullLog screenLog<line_sep>callStack=inspect.stack()[1:]<line_sep>callStack.reverse()<for_stmt>i,frame enumerate(callStack)<block_start>line=[u"stack %2d %14s %4s %s"%(i os.path.split(frame[1])[1] frame[2] frame[3])]<line_sep>screenLog<augadd>line<line_sep>fullLog<augadd>line<block_end><if_stmt>len(args)<block_start>screenLog.append(u"stack "+repr(args[0]))<line_sep>fullLog.append(u"stack "+repr(args[0]))<block_end><block_end><def_stmt>info *args<block_start>channel(u"info" *args)<block_end><def_stmt>meta *args<block_start>"""Log information related to logging."""<line_sep>channel(u"meta" *args)<block_end><def_stmt>mouse *args<block_start>channel(u"mouse" *args)<block_end><def_stmt>parser *args<block_start>channel(u"parser" *args)<block_end><def_stmt>startup *args<block_start>channel(u"startup" *args)<block_end><def_stmt>quick *args<block_start><global>fullLog screenLog<line_sep>msg=str(args[0])<line_sep>prior=msg<for_stmt>i args[1:]<block_start><if_stmt><not>len(prior)<or>prior[-1]<ne>u"\n"<block_start>msg<augadd>u" "<block_end>prior=i# unicode(i) msg<augadd>prior<block_end>lines=msg.split(u"\n")<line_sep>screenLog<augadd>lines<line_sep>fullLog<augadd>lines<block_end><def_stmt>debug *args<block_start><global>fullLog screenLog<if_stmt>u"debug"<in>enabledChannels<block_start>lines=parse_lines(inspect.stack()[1] u"debug_@@@" *args)<line_sep>screenLog<augadd>lines<line_sep>fullLog<augadd>lines<block_end><block_end><def_stmt>detail *args<block_start><global>fullLog<if_stmt>u"detail"<in>enabledChannels<block_start>lines=parse_lines(inspect.stack()[1] u"detail" *args)<line_sep>fullLog<augadd>lines<block_end><block_end><def_stmt>error *args<block_start><global>fullLog<line_sep>lines=parse_lines(inspect.stack()[1] u"error" *args)<line_sep>fullLog<augadd>lines<block_end><def_stmt>when *args<block_start>args=(time.time()-startTime )+args<line_sep>channel(u"info" *args)<block_end><def_stmt>wrapper function shouldWrite=<true><block_start><global>shouldWritePrintLog<line_sep>shouldWritePrintLog=shouldWrite<line_sep>r=-1<try_stmt><block_start><try_stmt><block_start>r=function()<block_end><except_stmt>BaseException<block_start>shouldWritePrintLog=<true><line_sep>errorType,value,tracebackInfo=sys.exc_info()<line_sep>out=traceback.format_exception(errorType value tracebackInfo)<for_stmt>i out<block_start>error(i[:-1])<block_end><block_end><block_end><finally_stmt><block_start>flush()<block_end><return>r<block_end><def_stmt>write_to_file path<block_start>fullPath=app.buffer_file.expand_full_path(path)<with_stmt>io.open(fullPath "w+" encoding=u"UTF-8")<as>out<block_start>out.write(u"\n".join(fullLog)+u"\n")<block_end><block_end><def_stmt>flush <block_start><if_stmt>shouldWritePrintLog<block_start>sys.stdout.write(u"\n".join(fullLog)+u"\n")<block_end><block_end>
# -*- Python -*- # Copyright 2021 The Verible Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ Rule to generate json-serializable simple structs """<def_stmt>jcxxgen name src out namespace=""<block_start>"""Generate C/C++ language source from a jcxxgen schema file. Args: name: Name of the rule, producing a cc-library with the same name. src: The schema yaml input file. out: Name of the generated header file. namespace: Optional name of the C++ namespace for generated structs. """<line_sep>tool="//common/tools:jcxxgen"<line_sep>json_header='"nlohmann/json.hpp"'<line_sep>native.genrule(name=name+"_gen" srcs=[src] outs=[out] cmd=("$(location //common/tools:jcxxgen) --json_header='"+json_header+"' --class_namespace "+namespace+" --output $@ $<") tools=[tool] )<line_sep>native.cc_library(name=name hdrs=[out] deps=["@jsonhpp"] )<block_end>
# ----------------------------------------------------------------------------- # Copyright (c) 2009-2016 <NAME>. All rights reserved. # Distributed under the (new) BSD License. # ----------------------------------------------------------------------------- """ """<import_stmt>os<import_stmt>re<import_stmt>sys<import_stmt>logging<import_stmt>importlib<import_stmt>numpy<as>np<import_from_stmt>glumpy gl<import_from_stmt>glumpy.log log<import_from_stmt>glumpy.ext.inputhook inputhook_manager stdin_ready<import_from_stmt>glumpy.app.window backends<import_from_stmt>. parser<import_from_stmt>. configuration<import_from_stmt>. clock<as>_clock<import_from_stmt>.clock Clock<import_from_stmt>.console Console<import_from_stmt>.viewport Viewport<import_from_stmt>.window EventDispatcher<line_sep># Default clock __clock__=<none><line_sep># Active windows __windows__=[]<line_sep># Current backend __backend__=<none><line_sep>__running__=<false><line_sep># --------------------------------------------------------------------- fps --- <def_stmt>fps <block_start>""" Get FPS from the default clock. """<line_sep><return>__clock__.get_fps()<block_end># --------------------------------------------------------------------- use --- <def_stmt>use backend api=<none> major=<none> minor=<none> profile=<none><block_start>""" Select a specific backend Parameters ---------- backend : ['osxglut', 'freeglut', 'pyglet', 'glfw', 'sdl', 'sdl2', 'pyside'] Graphical toolkit to use api : ['GL'|'ES'] OpenGL API to use major : int OpenGL major version to use minor : int OpenGL minor version to use profile : ['compatibility'|'core'] OpenGL profile to use Note ---- A shortened version is available with the following syntax: use("backend (api major.minor profile)") For example, `use("glfw (GL 3.3 core)")` """<line_sep><global>__backend__<line_sep>config=configuration.get_default()<line_sep># Parse options (in backend name, see note above) exp="""(?P<backend>\w+)? (.*\( (.*(?P<api>GL|ES))? (.*(?P<major>[1234])\.(?P<minor>[012345]))? (.*(?P<profile>compatibility|core))?.*\))?"""<line_sep>r=re.search(exp backend re.IGNORECASE|re.VERBOSE)<line_sep>_backend=r.group('backend')<or>"glfw"<line_sep>_api=r.group('api')<or>"GL"<line_sep>_major=int(r.group('major')<or>str(config.major_version))<line_sep>_minor=int(r.group('minor')<or>str(config.minor_version))<line_sep>_profile=r.group('profile')<or>""<line_sep># Arguments take precedence over shortened options backend=_backend<line_sep>api=api<or>_api<line_sep>major=major<or>_major<line_sep>minor=minor<or>_minor<line_sep>profile=profile<or>_profile<line_sep>config.api=api<line_sep>config.major_version=major<line_sep>config.minor_version=minor<line_sep>config.profile=profile<if_stmt>backend<not><in>backends.__backends__<block_start>log.critical("Unknown backend (%s)"%backend)<line_sep>log.critical("Available backends are: %s" str(backends.__backends__))<line_sep>sys.exit(0)<block_end># BUG: For some reason, the import module changes the working directory # We save it beforehand and restore it just after workdir=os.getcwd()<line_sep>name="glumpy.app.window.backends.backend_"+backend<line_sep>importlib.import_module(name)<line_sep>backend=sys.modules[name]<line_sep>os.chdir(workdir)<line_sep># Check availability <if_stmt>backend.available()<block_start>__backend__=backend<line_sep><return>backend<block_end><else_stmt><block_start>log.warning("Backend (%s) not available"%backend)<line_sep><return><none><block_end><block_end># ----------------------------------------------------------------- Window --- <class_stmt>Window(object)<block_start>""" Abstract Window This class is responsible for finding a suitable backend and parsing arguments. """<def_stmt>__new__ cls *args **kwargs<block_start><global>__backend__<line_sep>all=list(backends.__backends__)<line_sep>options=parser.get_options()<line_sep># No backend was specified # Check for command line argument then pick a default one if possible <if_stmt>__backend__<is><none><block_start><if_stmt>options.backend<ne>all[0]<block_start>all=[options.backend ]+all<block_end><for_stmt>name all<block_start>backend=use(name)<if_stmt>backend<and>backend.available()<block_start>__backend__=backend<line_sep><break><block_end><block_end># No backend available, there's nothing we can do <if_stmt>__backend__<is><none><block_start>log.critical("No suitable backend found")<line_sep><raise>NotImplementedError<block_end><block_end>config=configuration.get_default()<if_stmt>"config"<not><in>kwargs.keys()<block_start>kwargs['config']=config<block_end><if_stmt>'vsync'<not><in>kwargs.keys()<block_start>kwargs['vsync']=options.vsync<block_end># Get command line size # if options.size: # size = options.size.split(",") # kwargs['width'] = int(size[0]) # kwargs['height'] = int(size[1]) # else: # kwargs['width'] = kwargs.get('width', 512) # kwargs['height'] = kwargs.get('height', 512) # Get command line position # if options.position: # position = options.position.split(",") # #kwargs['x'] = kwargs.get('x', int(position[0])) # #kwargs['y'] = kwargs.get('y', int(position[1])) # else: # pass # #kwargs['x'] = kwargs.get('x', 0) # #kwargs['y'] = kwargs.get('y', 0) # Create the backend window window=__backend__.Window(*args **kwargs)<line_sep>window._backend=__backend__<line_sep>window._config=config<line_sep>log.info("Using %s (%s %d.%d)"%(__backend__.name() config.api config.major_version config.minor_version))<if_stmt>config.samples<g>0<block_start>log.info("Using multisampling with %d samples"%(config.samples))<block_end># Display fps options <if_stmt>options.display_fps<block_start>@window.timer(1.0)<def_stmt>timer elapsed<block_start>print("Estimated FPS: %f"%fps())<block_end><block_end><return>window<block_end><block_end># --------------------------------------------------------------- __init__ --- <def_stmt>__init__ clock=<none> framerate=<none> backend=<none><block_start>""" Initialize the main loop Parameters ---------- clock : Clock clock to use to run the app (gives the elementary tick) framerate : int frames per second backend : python module Backend module """<line_sep><global>__clock__<line_sep>options=parser.get_options()<if_stmt>options.debug<block_start>log.setLevel(logging.DEBUG)<block_end><if_stmt>framerate<is><none><block_start>framerate=options.framerate<block_end><if_stmt>framerate<g>0<block_start>log.info("Running at %d frames/second"%framerate)<block_end><else_stmt><block_start>log.info("Running at full speed")<block_end><if_stmt>clock<is><none><block_start>__clock__=_clock.get_default()<block_end><else_stmt><block_start>__clock__=clock<block_end>__clock__.set_fps_limit(framerate)<line_sep># OpenGL Initialization <for_stmt>window backend.windows()<block_start>window.activate()<line_sep>gl.glPixelStorei(gl.GL_UNPACK_ALIGNMENT 1)<line_sep>gl.glPixelStorei(gl.GL_PACK_ALIGNMENT 1)<line_sep>gl.glEnable(gl.GL_VERTEX_PROGRAM_POINT_SIZE)<try_stmt># This has been removed in 3.2 (it's now on by default) <block_start>gl.glEnable(gl.GL_POINT_SPRITE)<block_end><except_stmt><block_start><pass><block_end>gl.glEnable(gl.GL_BLEND)<line_sep>gl.glBlendFunc(gl.GL_SRC_ALPHA gl.GL_ONE_MINUS_SRC_ALPHA)<block_end># Initialize timers for all windows <for_stmt>window backend.windows()<block_start>window._clock=__clock__<line_sep># Start timers <for_stmt>i range(len(window._timer_stack))<block_start>handler,interval=window._timer_stack[i]<line_sep>__clock__.schedule_interval(handler interval)<block_end># Activate window window.activate()<line_sep># Dispatch init event window.dispatch_event('on_init')<line_sep># Dispatch an initial resize event window.dispatch_event('on_resize' window._width window._height)<block_end><return>__clock__<block_end># -------------------------------------------------------------------- quit --- <def_stmt>quit <block_start><global>__running__<line_sep>__running__=<false><line_sep># count = len(__backend__.windows()) # while count: # dt = clock.tick() # window = __backend__.windows()[-1] # window.close() # count = __backend__.process(dt) <block_end># --------------------------------------------------------------------- run --- <def_stmt>run clock=<none> framerate=<none> interactive=<none> duration=sys.maxsize framecount=sys.maxsize<block_start>""" Run the main loop Parameters ---------- clock : Clock clock to use to run the app (gives the elementary tick) framerate : int frames per second duration : float Duration after which the app will be stopped framecount : int Number of frame to display before stopping. """<line_sep><global>__running__<line_sep>clock=__init__(clock=clock framerate=framerate backend=__backend__)<line_sep>options=parser.get_options()<if_stmt>interactive<is><none><block_start>interactive=options.interactive<block_end><if_stmt>interactive# Set interactive python session <block_start>os.environ['PYTHONINSPECT']='1'<import_stmt>readline<line_sep>readline.parse_and_bind("tab: complete")<def_stmt>run <block_start><while_stmt><not>stdin_ready()<block_start>__backend__.process(clock.tick())<block_end><return>0<block_end>inputhook_manager.set_inputhook(run)<block_end><else_stmt><block_start>__running__=<true><def_stmt>run duration framecount<block_start>count=len(__backend__.windows())<while_stmt>count<and>duration<g>0<and>framecount<g>0<and>__running__<block_start>dt=clock.tick()<line_sep>duration<augsub>dt<line_sep>framecount<augsub>1<line_sep>count=__backend__.process(dt)<block_end><block_end><if_stmt>options.record<block_start><import_from_stmt>.movie record<try_stmt># Check if output file name given <block_start>name=sys.argv[2]<block_end><except_stmt># Obtain the name of the script that is being run <block_start>name=os.path.basename(sys.argv[0])<block_end># Replace .py extension with .mp4 filename=re.sub('.py$' '.mp4' name)<line_sep>log.info("Recording movie in '%s'"%filename)<with_stmt>record(window=__backend__.windows()[0] filename=filename fps=60)<block_start>run(duration framecount)<block_end><block_end><else_stmt><block_start>run(duration framecount)<block_end><block_end><block_end>
<import_stmt>asyncio<import_stmt>pytest<import_from_stmt>asynctest TestCase mock<as>async_mock<import_from_stmt>....admin.server AdminResponder<import_from_stmt>....core.in_memory InMemoryProfile<import_from_stmt>....messaging.responder BaseResponder<import_from_stmt>....multitenant.base BaseMultitenantManager<import_from_stmt>....multitenant.manager MultitenantManager<import_from_stmt>...error WireFormatError<import_from_stmt>...outbound.message OutboundMessage<import_from_stmt>..message InboundMessage<import_from_stmt>..receipt MessageReceipt<import_from_stmt>..session InboundSession<class_stmt>TestInboundSession(TestCase)<block_start><def_stmt>setUp self<block_start>self.profile=InMemoryProfile.test_profile()<block_end><def_stmt>test_init self<block_start>test_inbound=async_mock.MagicMock()<line_sep>test_session_id="session-id"<line_sep>test_wire_format=async_mock.MagicMock()<line_sep>test_client_info={"client":"info"}<line_sep>test_close=async_mock.MagicMock()<line_sep>test_reply_mode=MessageReceipt.REPLY_MODE_ALL<line_sep>test_reply_thread_ids={"1" "2"}<line_sep>test_reply_verkeys={"3" "4"}<line_sep>test_transport_type="transport-type"<line_sep>sess=InboundSession(profile=self.profile inbound_handler=test_inbound session_id=test_session_id wire_format=test_wire_format client_info=test_client_info close_handler=test_close reply_mode=test_reply_mode reply_thread_ids=test_reply_thread_ids reply_verkeys=test_reply_verkeys transport_type=test_transport_type )<assert_stmt>sess.profile<is>self.profile<assert_stmt>sess.session_id<eq>test_session_id<assert_stmt>sess.wire_format<is>test_wire_format<assert_stmt>sess.client_info<eq>test_client_info<assert_stmt>sess.reply_mode<eq>test_reply_mode<assert_stmt>sess.transport_type<eq>test_transport_type<assert_stmt>"1"<in>sess.reply_thread_ids<assert_stmt>"3"<in>sess.reply_verkeys<line_sep>test_msg=async_mock.MagicMock()<with_stmt>async_mock.patch.object(sess "process_inbound")<as>process<block_start>sess.receive_inbound(test_msg)<line_sep>process.assert_called_once_with(test_msg)<line_sep>test_inbound.assert_called_once_with(sess.profile test_msg can_respond=<false>)<block_end>sess.close()<line_sep>test_close.assert_called_once()<assert_stmt>sess.closed<block_end><def_stmt>test_setters self<block_start>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=<none> wire_format=<none> )<line_sep>sess.reply_mode=MessageReceipt.REPLY_MODE_ALL<assert_stmt>sess.reply_mode<eq>MessageReceipt.REPLY_MODE_ALL<line_sep>sess.add_reply_thread_ids("1")<assert_stmt>"1"<in>sess.reply_thread_ids<line_sep>sess.add_reply_verkeys("2")<assert_stmt>"2"<in>sess.reply_verkeys<line_sep>sess.reply_mode="invalid"<assert_stmt><not>sess.reply_mode<assert_stmt><not>sess.reply_thread_ids<block_end># reset by setter method <async_keyword><def_stmt>test_parse_inbound self<block_start>test_session_id="session-id"<line_sep>test_transport_type="transport-type"<line_sep>test_wire_format=async_mock.MagicMock()<line_sep>test_wire_format.parse_message=async_mock.CoroutineMock()<line_sep>test_parsed="parsed-payload"<line_sep>test_receipt=async_mock.MagicMock()<line_sep>test_wire_format.parse_message.return_value=(test_parsed test_receipt)<line_sep>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=test_session_id wire_format=test_wire_format transport_type=test_transport_type )<line_sep>session=self.profile.session()<line_sep>setattr(self.profile "session" async_mock.MagicMock(return_value=session))<line_sep>test_payload="{}"<line_sep>result=<await>sess.parse_inbound(test_payload)<line_sep>test_wire_format.parse_message.assert_awaited_once_with(session test_payload)<assert_stmt>result.payload<eq>test_parsed<assert_stmt>result.receipt<is>test_receipt<assert_stmt>result.session_id<eq>test_session_id<assert_stmt>result.transport_type<eq>test_transport_type<block_end><async_keyword><def_stmt>test_receive self<block_start>self.multitenant_mgr=async_mock.MagicMock(MultitenantManager autospec=<true>)<line_sep>self.multitenant_mgr.get_wallets_by_message=async_mock.CoroutineMock(return_value=[async_mock.MagicMock(is_managed=<true>)])<line_sep>self.multitenant_mgr.get_wallet_profile=async_mock.CoroutineMock(return_value=self.profile)<line_sep>self.profile.context.injector.bind_instance(BaseMultitenantManager self.multitenant_mgr)<line_sep>self.profile.context.update_settings({"multitenant.enabled":<true>})<line_sep>self.base_responder=async_mock.MagicMock(AdminResponder autospec=<true>)<line_sep>self.profile.context.injector.bind_instance(BaseResponder self.base_responder)<line_sep>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=<none> wire_format=<none> )<line_sep>test_msg=async_mock.MagicMock()<with_stmt>async_mock.patch.object(sess "parse_inbound" async_mock.CoroutineMock())<as>encode async_mock.patch.object(sess "receive_inbound" async_mock.MagicMock())<as>receive<block_start>result=<await>sess.receive(test_msg)<line_sep>encode.assert_awaited_once_with(test_msg)<line_sep>receive.assert_called_once_with(encode.return_value)<assert_stmt>result<is>encode.return_value<block_end><block_end><async_keyword><def_stmt>test_receive_no_wallet_found self<block_start>self.multitenant_mgr=async_mock.MagicMock(MultitenantManager autospec=<true>)<line_sep>self.multitenant_mgr.get_wallets_by_message=async_mock.CoroutineMock(side_effect=ValueError("no such wallet"))<line_sep>self.multitenant_mgr.get_wallet_profile=async_mock.CoroutineMock(return_value=self.profile)<line_sep>self.profile.context.injector.bind_instance(BaseMultitenantManager self.multitenant_mgr)<line_sep>self.profile.context.update_settings({"multitenant.enabled":<true>})<line_sep>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=<none> wire_format=<none> )<line_sep>test_msg=async_mock.MagicMock()<with_stmt>async_mock.patch.object(sess "parse_inbound" async_mock.CoroutineMock())<as>encode async_mock.patch.object(sess "receive_inbound" async_mock.MagicMock())<as>receive<block_start>result=<await>sess.receive(test_msg)<line_sep>encode.assert_awaited_once_with(test_msg)<line_sep>receive.assert_called_once_with(encode.return_value)<assert_stmt>result<is>encode.return_value<block_end><block_end><def_stmt>test_process_inbound self<block_start>test_session_id="session-id"<line_sep>test_thread_id="thread-id"<line_sep>test_verkey="verkey"<line_sep>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=test_session_id wire_format=<none> )<line_sep>receipt=MessageReceipt(direct_response_mode=MessageReceipt.REPLY_MODE_THREAD thread_id=test_thread_id sender_verkey=test_verkey )<line_sep>receipt.recipient_did="dummy"<assert_stmt>receipt.recipient_did<eq>"dummy"<line_sep>receipt.recipient_did_public=<true><assert_stmt>receipt.recipient_did_public<line_sep>receipt.recipient_did=<none><line_sep>receipt.recipient_did_public=<none><assert_stmt>receipt.recipient_did<is><none><assert_stmt>receipt.recipient_did_public<is><none><line_sep>receipt.sender_did="dummy"<assert_stmt>receipt.sender_did<eq>"dummy"<line_sep>receipt.sender_did=<none><assert_stmt>receipt.sender_did<is><none><assert_stmt>"direct_response_mode"<in>str(receipt)<line_sep>message=InboundMessage(payload=<none> receipt=receipt)<line_sep>sess.process_inbound(message)<assert_stmt>sess.reply_mode<eq>receipt.direct_response_mode<assert_stmt>test_verkey<in>sess.reply_verkeys<assert_stmt>test_thread_id<in>sess.reply_thread_ids<assert_stmt>receipt.in_time<is><none><line_sep>receipt.connection_id="dummy"<assert_stmt>receipt.connection_id<eq>"dummy"<block_end><def_stmt>test_select_outbound self<block_start>test_session_id="session-id"<line_sep>test_thread_id="thread-id"<line_sep>test_verkey="verkey"<line_sep>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=test_session_id wire_format=<none> )<line_sep>sess.reply_mode=MessageReceipt.REPLY_MODE_ALL<line_sep>test_msg=OutboundMessage(payload=<none>)<assert_stmt><not>sess.select_outbound(test_msg)# no key test_msg.reply_session_id=test_session_id<assert_stmt><not>sess.select_outbound(test_msg)# no difference sess.can_respond=<true><assert_stmt><not>sess.select_outbound(test_msg)# no difference test_msg.reply_to_verkey=test_verkey<line_sep>sess.add_reply_verkeys(test_verkey)<assert_stmt>sess.select_outbound(test_msg)<line_sep>sess.reply_mode=MessageReceipt.REPLY_MODE_THREAD<line_sep>sess.reply_verkeys=<none><line_sep>sess.reply_thread_ids=<none><line_sep>test_msg=OutboundMessage(payload=<none>)<assert_stmt><not>sess.select_outbound(test_msg)<line_sep>sess.add_reply_thread_ids(test_thread_id)<line_sep>test_msg.reply_thread_id=test_thread_id<assert_stmt><not>sess.select_outbound(test_msg)<line_sep>sess.add_reply_verkeys(test_verkey)<line_sep>test_msg.reply_to_verkey=test_verkey<assert_stmt>sess.select_outbound(test_msg)<line_sep>sess.close()<assert_stmt><not>sess.select_outbound(test_msg)<block_end><async_keyword><def_stmt>test_wait_response self<block_start>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=<none> wire_format=<none> )<line_sep>test_msg=OutboundMessage(payload=<none>)<line_sep>sess.set_response(test_msg)<assert_stmt>sess.response_event.is_set()<assert_stmt>sess.response_buffered<with_stmt>async_mock.patch.object(sess "encode_outbound" async_mock.CoroutineMock())<as>encode<block_start>result=<await>asyncio.wait_for(sess.wait_response() 0.1)<assert_stmt>encode.awaited_once_with(test_msg)<assert_stmt>result<is>encode.return_value<block_end>sess.clear_response()<assert_stmt><not>sess.response_buffer<line_sep>sess.close()<assert_stmt><await>asyncio.wait_for(sess.wait_response() 0.1)<is><none><block_end><async_keyword><def_stmt>test_wait_response_x self<block_start>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=<none> wire_format=<none> )<line_sep>test_msg=OutboundMessage(payload=<none>)<line_sep>sess.set_response(test_msg)<assert_stmt>sess.response_event.is_set()<assert_stmt>sess.response_buffered<with_stmt>async_mock.patch.object(sess "encode_outbound" async_mock.CoroutineMock())<as>encode<block_start>encode.side_effect=WireFormatError()<with_stmt>pytest.raises(asyncio.TimeoutError)<block_start><await>asyncio.wait_for(sess.wait_response() 0.1)<block_end><block_end><assert_stmt><not>sess.response_buffer<line_sep>sess.close()<assert_stmt><await>asyncio.wait_for(sess.wait_response() 0.1)<is><none><block_end><async_keyword><def_stmt>test_encode_response self<block_start>test_wire_format=async_mock.MagicMock()<line_sep>test_wire_format.encode_message=async_mock.CoroutineMock()<line_sep>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=<none> wire_format=test_wire_format )<line_sep>test_msg=OutboundMessage(payload=<none>)<line_sep>test_from_verkey="from-verkey"<line_sep>test_to_verkey="to-verkey"<line_sep>session=self.profile.session()<line_sep>setattr(self.profile "session" async_mock.MagicMock(return_value=session))<with_stmt>self.assertRaises(WireFormatError)<block_start><await>sess.encode_outbound(test_msg)<block_end>test_msg.payload="{}"<with_stmt>self.assertRaises(WireFormatError)<block_start><await>sess.encode_outbound(test_msg)<block_end>test_msg.reply_from_verkey=test_from_verkey<line_sep>test_msg.reply_to_verkey=test_to_verkey<line_sep>result=<await>sess.encode_outbound(test_msg)<assert_stmt>result<is>test_wire_format.encode_message.return_value<line_sep>test_wire_format.encode_message.assert_awaited_once_with(session test_msg.payload [test_to_verkey] <none> test_from_verkey )<block_end><async_keyword><def_stmt>test_accept_response self<block_start>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=<none> wire_format=<none> )<line_sep>test_msg=OutboundMessage(payload=<none>)<with_stmt>async_mock.patch.object(sess "select_outbound")<as>selector<block_start>selector.return_value=<false><line_sep>accepted=sess.accept_response(test_msg)<assert_stmt><not>accepted<and><not>accepted.retry<line_sep>sess.set_response(OutboundMessage(payload=<none>))<line_sep>selector.return_value=<true><line_sep>accepted=sess.accept_response(test_msg)<assert_stmt><not>accepted<and>accepted.retry<line_sep>sess.clear_response()<line_sep>accepted=sess.accept_response(test_msg)<assert_stmt>accepted<block_end><block_end><async_keyword><def_stmt>test_context_mgr self<block_start>sess=InboundSession(profile=self.profile inbound_handler=<none> session_id=<none> wire_format=<none> )<assert_stmt><not>sess.closed<async_keyword><with_stmt>sess<block_start><pass><block_end><assert_stmt>sess.closed<block_end><block_end>
<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>.loss_blocks SSIM smooth_grad_1st smooth_grad_2nd TernaryLoss<import_from_stmt>utils.warp_utils flow_warp<import_from_stmt>utils.warp_utils get_occu_mask_bidirection get_occu_mask_backward<class_stmt>unFlowLoss(nn.modules.Module)<block_start><def_stmt>__init__ self cfg<block_start>super(unFlowLoss self).__init__()<line_sep>self.cfg=cfg<block_end><def_stmt>loss_photomatric self im1_scaled im1_recons occu_mask1<block_start>loss=[]<if_stmt>self.cfg.w_l1<g>0<block_start>loss<augadd>[self.cfg.w_l1<times>(im1_scaled-im1_recons).abs()<times>occu_mask1]<block_end><if_stmt>self.cfg.w_ssim<g>0<block_start>loss<augadd>[self.cfg.w_ssim<times>SSIM(im1_recons<times>occu_mask1 im1_scaled<times>occu_mask1)]<block_end><if_stmt>self.cfg.w_ternary<g>0<block_start>loss<augadd>[self.cfg.w_ternary<times>TernaryLoss(im1_recons<times>occu_mask1 im1_scaled<times>occu_mask1)]<block_end><return>sum([l.mean()<for>l loss])/occu_mask1.mean()<block_end><def_stmt>loss_smooth self flow im1_scaled<block_start><if_stmt>'smooth_2nd'<in>self.cfg<and>self.cfg.smooth_2nd<block_start>func_smooth=smooth_grad_2nd<block_end><else_stmt><block_start>func_smooth=smooth_grad_1st<block_end>loss=[]<line_sep>loss<augadd>[func_smooth(flow im1_scaled self.cfg.alpha)]<line_sep><return>sum([l.mean()<for>l loss])<block_end><def_stmt>forward self output target<block_start>""" :param output: Multi-scale forward/backward flows n * [B x 4 x h x w] :param target: image pairs Nx6xHxW :return: """<line_sep>pyramid_flows=output<line_sep>im1_origin=target[: :3]<line_sep>im2_origin=target[: 3:]<line_sep>pyramid_smooth_losses=[]<line_sep>pyramid_warp_losses=[]<line_sep>self.pyramid_occu_mask1=[]<line_sep>self.pyramid_occu_mask2=[]<line_sep>s=1.<for_stmt>i,flow enumerate(pyramid_flows)<block_start><if_stmt>self.cfg.w_scales[i]<eq>0<block_start>pyramid_warp_losses.append(0)<line_sep>pyramid_smooth_losses.append(0)<line_sep><continue><block_end>b,_,h,w=flow.size()<line_sep># resize images to match the size of layer im1_scaled=F.interpolate(im1_origin (h w) mode='area')<line_sep>im2_scaled=F.interpolate(im2_origin (h w) mode='area')<line_sep>im1_recons=flow_warp(im2_scaled flow[: :2] pad=self.cfg.warp_pad)<line_sep>im2_recons=flow_warp(im1_scaled flow[: 2:] pad=self.cfg.warp_pad)<if_stmt>i<eq>0<block_start><if_stmt>self.cfg.occ_from_back<block_start>occu_mask1=1-get_occu_mask_backward(flow[: 2:] th=0.2)<line_sep>occu_mask2=1-get_occu_mask_backward(flow[: :2] th=0.2)<block_end><else_stmt><block_start>occu_mask1=1-get_occu_mask_bidirection(flow[: :2] flow[: 2:])<line_sep>occu_mask2=1-get_occu_mask_bidirection(flow[: 2:] flow[: :2])<block_end><block_end><else_stmt><block_start>occu_mask1=F.interpolate(self.pyramid_occu_mask1[0] (h w) mode='nearest')<line_sep>occu_mask2=F.interpolate(self.pyramid_occu_mask2[0] (h w) mode='nearest')<block_end>self.pyramid_occu_mask1.append(occu_mask1)<line_sep>self.pyramid_occu_mask2.append(occu_mask2)<line_sep>loss_warp=self.loss_photomatric(im1_scaled im1_recons occu_mask1)<if_stmt>i<eq>0<block_start>s=min(h w)<block_end>loss_smooth=self.loss_smooth(flow[: :2]/s im1_scaled)<if_stmt>self.cfg.with_bk<block_start>loss_warp<augadd>self.loss_photomatric(im2_scaled im2_recons occu_mask2)<line_sep>loss_smooth<augadd>self.loss_smooth(flow[: 2:]/s im2_scaled)<line_sep>loss_warp<augdiv>2.<line_sep>loss_smooth<augdiv>2.<block_end>pyramid_warp_losses.append(loss_warp)<line_sep>pyramid_smooth_losses.append(loss_smooth)<block_end>pyramid_warp_losses=[l<times>w<for>l,w zip(pyramid_warp_losses self.cfg.w_scales)]<line_sep>pyramid_smooth_losses=[l<times>w<for>l,w zip(pyramid_smooth_losses self.cfg.w_sm_scales)]<line_sep>warp_loss=sum(pyramid_warp_losses)<line_sep>smooth_loss=self.cfg.w_smooth<times>sum(pyramid_smooth_losses)<line_sep>total_loss=warp_loss+smooth_loss<line_sep><return>total_loss warp_loss smooth_loss pyramid_flows[0].abs().mean()<block_end><block_end>
# coding=utf-8 # Copyright 2018 The Google AI Language Team Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """BERT next sentence prediction / binary coherence finetuning runner."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>collections<import_stmt>math<import_from_stmt>absl app<import_from_stmt>absl flags<import_from_stmt>bert modeling<import_from_stmt>bert optimization<import_from_stmt>bert tokenization<import_from_stmt>language.conpono.cpc model_builder<import_from_stmt>language.conpono.reconstruct preprocess<as>ip<import_stmt>tensorflow.compat.v1<as>tf<import_from_stmt>tensorflow.contrib cluster_resolver<as>contrib_cluster_resolver<import_from_stmt>tensorflow.contrib data<as>contrib_data<import_from_stmt>tensorflow.contrib lookup<as>contrib_lookup<import_from_stmt>tensorflow.contrib tpu<as>contrib_tpu<import_from_stmt>tensorflow.contrib training<as>contrib_training<line_sep>FLAGS=flags.FLAGS<line_sep>## Required parameters flags.DEFINE_string("eval_file" <none> "The input data. Should be in tfrecord format ready to input to BERT.")<line_sep>flags.DEFINE_string("train_file" <none> "The input data. Should be in tfrecord format ready to input to BERT.")<line_sep>flags.DEFINE_string("bert_config_file" <none> "The config json file corresponding to the pre-trained BERT model. "<concat>"This specifies the model architecture.")<line_sep>flags.DEFINE_string("vocab_file" <none> "The vocabulary file that the BERT model was trained on.")<line_sep>flags.DEFINE_string("output_dir" <none> "The output directory where the model checkpoints will be written.")<line_sep>## Other parameters flags.DEFINE_bool("include_mlm" <true> "Whether to include MLM loss/objective")<line_sep>flags.DEFINE_integer("num_choices" 32 "Number of negative samples + 1")<line_sep>flags.DEFINE_integer("data_window_size" 5 "Number of documents to draw"<concat>"negative samples from.")<line_sep>flags.DEFINE_integer("data_window_shift" 2 "Shift windows by this many for"<concat>"negative samples.")<line_sep>flags.DEFINE_integer("max_sent_length" 70 "Number of tokens per sentence.")<line_sep>flags.DEFINE_integer("max_para_length" 30 "Number of sentences per paragraph")<line_sep>flags.DEFINE_integer("context_size" 4 "Number of sentences in the context")<line_sep>flags.DEFINE_integer("margin" 1 "Eta value for margin.")<line_sep>flags.DEFINE_float("mask_rate" 0.1 "Rate of masking for mlm.")<line_sep>flags.DEFINE_bool("add_lv2loss" <false> "Whether to use the level 2 loss.")<line_sep>flags.DEFINE_string("init_checkpoint" <none> "Initial checkpoint (usually from a pre-trained BERT model).")<line_sep>flags.DEFINE_bool("do_lower_case" <true> "Whether to lower case the input text. Should be True for uncased "<concat>"models and False for cased models.")<line_sep>flags.DEFINE_integer("max_seq_length" 128 "The maximum total input sequence length after WordPiece tokenization. "<concat>"Sequences longer than this will be truncated, and sequences shorter "<concat>"than this will be padded.")<line_sep>flags.DEFINE_float("dataset_one_weight" 0.5 "Weight of first dataset."<concat>"Weight of second dataset will be 1-x")<line_sep>flags.DEFINE_bool("do_train" <false> "Whether to run training.")<line_sep>flags.DEFINE_bool("do_eval" <false> "Whether to run eval on the dev set.")<line_sep>flags.DEFINE_integer("train_batch_size" 32 "Total batch size for training.")<line_sep>flags.DEFINE_integer("eval_batch_size" 32 "Total batch size for eval.")<line_sep>flags.DEFINE_integer("train_data_size" 10000 "The number of examples in the"<concat>"training data")<line_sep>flags.DEFINE_integer("eval_data_size" -1 "The number of examples in the"<concat>"validation data")<line_sep>flags.DEFINE_integer("predict_batch_size" 8 "Total batch size for predict.")<line_sep>flags.DEFINE_float("learning_rate" 5e-5 "The initial learning rate for Adam.")<line_sep>flags.DEFINE_float("warmup_proportion" 0.1 "Proportion of training to perform linear learning rate warmup for. "<concat>"E.g., 0.1 = 10% of training.")<line_sep>flags.DEFINE_integer("save_checkpoints_steps" 10000 "How often to save the model checkpoint.")<line_sep>flags.DEFINE_integer("iterations_per_loop" 1000 "How many steps to make in each estimator call.")<line_sep>flags.DEFINE_bool("use_tpu" <false> "Whether to use TPU or GPU/CPU.")<line_sep>flags.DEFINE_string("tpu_name" <none> "The Cloud TPU to use for training. This should be either the name "<concat>"used when creating the Cloud TPU, or a grpc://ip.address.of.tpu:8470 "<concat>"url.")<line_sep>flags.DEFINE_string("tpu_zone" <none> "[Optional] GCE zone where the Cloud TPU is located in. If not "<concat>"specified, we will attempt to automatically detect the GCE project from "<concat>"metadata.")<line_sep>flags.DEFINE_string("gcp_project" <none> "[Optional] Project name for the Cloud TPU-enabled project. If not "<concat>"specified, we will attempt to automatically detect the GCE project from "<concat>"metadata.")<line_sep>flags.DEFINE_string("master" <none> "[Optional] TensorFlow master URL.")<line_sep>flags.DEFINE_integer("num_tpu_cores" 8 "Only used if `use_tpu` is True. Total number of TPU cores to use.")<line_sep>_SEP_TOKEN="[SEP]"<class_stmt>InputFeatures(object)<block_start>"""A single set of features of data."""<def_stmt>__init__ self input_ids input_mask segment_ids label_id is_real_example=<true><block_start>self.input_ids=input_ids<line_sep>self.input_mask=input_mask<line_sep>self.segment_ids=segment_ids<line_sep>self.label_id=label_id<line_sep>self.is_real_example=is_real_example<block_end><block_end># pylint: disable=invalid-name Outputs_And_Context=collections.namedtuple("Outputs_And_Context" ["input_ids" "input_mask" "segment_ids" "label_types" "context"])<line_sep># pylint: enable=invalid-name <def_stmt>pad_and_cut tensor max_len_scalar<block_start>end_padding=tf.constant([[0 max_len_scalar]])<line_sep><return>tf.pad(tensor end_padding)[:max_len_scalar]<block_end><def_stmt>build_distractors distractor_examples context<block_start>"""Create inputs with distractors."""<line_sep>CLS_ID=tf.constant([101] dtype=tf.int64)# pylint: disable=invalid-name SEP_ID=tf.constant([102] dtype=tf.int64)# pylint: disable=invalid-name bert_inputs=[]<line_sep>input_masks=[]<line_sep>segment_ids=[]<line_sep># for each distractor sample_size=int((FLAGS.num_choices-4)/(FLAGS.data_window_size-1))<for_stmt>example distractor_examples# randomly sample 7 <block_start>intermediate_examples_tensor=tf.reduce_sum(tf.abs(example) 1)<line_sep>examples_zero_vector=tf.zeros(shape=(1 1) dtype=tf.int64)<line_sep>examples_bool_mask=tf.squeeze(tf.not_equal(intermediate_examples_tensor examples_zero_vector))<line_sep>paragraph_len=tf.reduce_sum(tf.cast(examples_bool_mask tf.int32))<line_sep>indices=tf.range(0 limit=paragraph_len dtype=tf.int32)<line_sep>shuffled_indices=tf.random.shuffle(indices)[:sample_size]<line_sep># extend examples / targets distractor_cand=example<line_sep>distractor_cand_plus_one=distractor_cand[1:]<line_sep>distractor_cand_plus_two=distractor_cand[2:]<line_sep># pad extensions paddings_one=tf.constant([[0 1] [0 0]])<line_sep>distractor_cand_plus_one=tf.pad(distractor_cand_plus_one paddings_one)<line_sep>paddings_two=tf.constant([[0 2] [0 0]])<line_sep>distractor_cand_plus_two=tf.pad(distractor_cand_plus_two paddings_two)<line_sep>distractor_cand_ext=tf.concat([distractor_cand distractor_cand_plus_one distractor_cand_plus_two] axis=1)<line_sep>distractors=tf.gather(distractor_cand_ext shuffled_indices)<for_stmt>i range(sample_size)<block_start>distractors_non_zero=tf.where(tf.not_equal(distractors[i] tf.zeros_like(distractors[i])))<line_sep>distractors_stripped=tf.gather_nd(distractors[i] distractors_non_zero)<line_sep>segment_id=tf.concat([tf.zeros_like(CLS_ID dtype=tf.int64) tf.zeros_like(context) tf.zeros_like(SEP_ID dtype=tf.int64) tf.ones_like(distractors_stripped) tf.ones_like(SEP_ID dtype=tf.int64)] axis=0)<line_sep>segment_id=pad_and_cut(segment_id FLAGS.max_seq_length)<line_sep>segment_ids.append(segment_id)<line_sep>new_input=tf.concat([CLS_ID context SEP_ID distractors_stripped SEP_ID] axis=0)<line_sep>input_mask=tf.ones_like(new_input)<line_sep>input_mask=pad_and_cut(input_mask FLAGS.max_seq_length)<line_sep>input_masks.append(input_mask)<line_sep>padded_new_input=pad_and_cut(new_input FLAGS.max_seq_length)<line_sep>bert_inputs.append(padded_new_input)<block_end><block_end>bert_inputs=tf.stack(bert_inputs axis=0)<line_sep>input_masks=tf.stack(input_masks axis=0)<line_sep>segment_ids=tf.stack(segment_ids axis=0)<line_sep>out=Outputs_And_Context(bert_inputs input_masks segment_ids <none> <none>)<line_sep><return>out<block_end><def_stmt>build_bert_inputs example<block_start>"""Convert example <Tensor [30, 70]> into bert inputs."""<line_sep>CLS_ID=tf.constant([101] dtype=tf.int64)# pylint: disable=invalid-name SEP_ID=tf.constant([102] dtype=tf.int64)# pylint: disable=invalid-name max_len=tf.constant([FLAGS.max_para_length])<line_sep>context_size=tf.constant([FLAGS.context_size])<line_sep>intermediate_examples_tensor=tf.reduce_sum(tf.abs(example) 1)<line_sep>examples_zero_vector=tf.zeros(shape=(1 1) dtype=tf.int64)<line_sep>examples_bool_mask=tf.squeeze(tf.not_equal(intermediate_examples_tensor examples_zero_vector))<line_sep>paragraph_len=tf.reduce_sum(tf.cast(examples_bool_mask tf.int32))<line_sep>start=tf.random.uniform([1] 0 tf.reshape(paragraph_len [])-tf.reshape(context_size [])+1 dtype=tf.int32)<line_sep># Slice the document into the before, after and context. # Discard the zero padding. sizes=tf.squeeze(tf.concat([[start context_size paragraph_len-context_size-start max_len-paragraph_len]] 0))<line_sep>before,context,after,_=tf.split(example sizes axis=0)<line_sep># Gather the context removing zero padding at end of sentences. non_zeros=tf.where(tf.not_equal(context tf.zeros_like(context)))<line_sep>context_gathered=tf.gather_nd(context non_zeros)<line_sep># Flip before so we select the 4 sentences closest to target before=tf.reverse(before axis=[0])<line_sep># pad both to longer than needed paddings=tf.constant([[0 8] [0 0]])<line_sep>before=tf.pad(before paddings)<line_sep>after=tf.pad(after paddings)<line_sep># Extend targets to 3 sentences # pad both before_minus_one=before[1:][:4]<line_sep>before_minus_two=before[2:][:4]<line_sep>after_plus_one=after[1:][:4]<line_sep>after_plus_two=after[2:][:4]<line_sep>before=before[:4]<line_sep>after=after[:4]<line_sep>before=tf.concat([before_minus_two before_minus_one before] axis=1)<line_sep>after=tf.concat([after after_plus_one after_plus_two] axis=1)<line_sep>############################################################################ # before = before[:4] # after = after[:4] # These 8 sentences are the 8 surrounding targets. Some are padding. targets=tf.concat([before after] axis=0)<line_sep># Remove the padding from the sourrounding sentences # Eg. if context starts at beginning of paragraph, before is all padding intermediate_tensor=tf.reduce_sum(tf.abs(targets) 1)<line_sep>zero_vector=tf.zeros(shape=(1 1) dtype=tf.int64)<line_sep>bool_mask=tf.squeeze(tf.not_equal(intermediate_tensor zero_vector))<line_sep>bool_mask.set_shape([<none>])<line_sep>targets=tf.boolean_mask(targets bool_mask)<line_sep># Randomly select 4 targets # We will also select the label_types for each selected target indices=tf.range(0 limit=tf.shape(targets)[0] dtype=tf.int32)<line_sep>shuffled_indices=tf.random.shuffle(indices)[:4]<line_sep>targets=tf.gather(targets shuffled_indices)<line_sep>full_labels=tf.concat([tf.range(3 -1 -1) tf.range(4 8)] axis=0)<line_sep>label_types=tf.boolean_mask(full_labels bool_mask)<line_sep>label_types=tf.gather(label_types shuffled_indices)<line_sep># create inputs bert_inputs=[]<line_sep>input_masks=[]<line_sep>segment_ids=[]<for_stmt>i range(4)<block_start>target_non_zero=tf.where(tf.not_equal(targets[i] tf.zeros_like(targets[i])))<line_sep>targets_stripped=tf.gather_nd(targets[i] target_non_zero)<line_sep>segment_id=tf.concat([tf.zeros_like(CLS_ID dtype=tf.int64) tf.zeros_like(context_gathered) tf.zeros_like(SEP_ID dtype=tf.int64) tf.ones_like(targets_stripped) tf.ones_like(SEP_ID dtype=tf.int64)] axis=0)<line_sep>segment_id=pad_and_cut(segment_id FLAGS.max_seq_length)<line_sep>segment_ids.append(segment_id)<line_sep>new_input=tf.concat([CLS_ID context_gathered SEP_ID targets_stripped SEP_ID] axis=0)<line_sep>input_mask=tf.ones_like(new_input)<line_sep>input_mask=pad_and_cut(input_mask FLAGS.max_seq_length)<line_sep>input_masks.append(input_mask)<line_sep>padded_new_input=pad_and_cut(new_input FLAGS.max_seq_length)<line_sep>bert_inputs.append(padded_new_input)<block_end>bert_inputs=tf.stack(bert_inputs axis=0)<line_sep>input_masks=tf.stack(input_masks axis=0)<line_sep>segment_ids=tf.stack(segment_ids axis=0)<line_sep>out=Outputs_And_Context(bert_inputs input_masks segment_ids label_types context_gathered)<line_sep><return>out<block_end><def_stmt>file_based_input_fn_builder input_file is_training drop_remainder add_masking<block_start>"""Creates an `input_fn` closure to be passed to TPUEstimator."""<line_sep>input_file=input_file.split(",")<line_sep>expanded_files=[]<for_stmt>infile input_file<block_start><try_stmt><block_start>sharded_files=tf.io.gfile.glob(infile)<line_sep>expanded_files.append(sharded_files)<block_end><except_stmt>tf.errors.OpError<block_start>expanded_files.append(infile)<block_end><block_end>name_to_features={"sents":tf.FixedLenFeature([FLAGS.max_para_length<times>FLAGS.max_sent_length] tf.int64)}<def_stmt>_decode_record record name_to_features vocab_table<block_start>"""Decodes a record to a TensorFlow example."""<line_sep>target_example=tf.parse_single_example(record[0] name_to_features)<line_sep>target_example=tf.reshape(target_example["sents"] [FLAGS.max_para_length FLAGS.max_sent_length])<line_sep># distractor_examples = [] # for rec in record[1:]: # distractor_examples.append( # tf.reshape( # tf.parse_single_example(rec, name_to_features)["sents"], # [FLAGS.max_para_length, FLAGS.max_sent_length])) # This is an unfortunate hack but is necessary to get around a TF error. dist0=tf.reshape(tf.parse_single_example(record[1] name_to_features)["sents"] [FLAGS.max_para_length FLAGS.max_sent_length])<line_sep>dist1=tf.reshape(tf.parse_single_example(record[2] name_to_features)["sents"] [FLAGS.max_para_length FLAGS.max_sent_length])<line_sep>dist2=tf.reshape(tf.parse_single_example(record[3] name_to_features)["sents"] [FLAGS.max_para_length FLAGS.max_sent_length])<line_sep>dist3=tf.reshape(tf.parse_single_example(record[4] name_to_features)["sents"] [FLAGS.max_para_length FLAGS.max_sent_length])<line_sep>inputs_obj=build_bert_inputs(target_example)<line_sep>distractor_obj=build_distractors([dist0 dist1 dist2 dist3] inputs_obj.context)<line_sep>example={}<line_sep>example["input_ids"]=tf.concat([inputs_obj.input_ids distractor_obj.input_ids] axis=0)<line_sep>example["input_mask"]=tf.concat([inputs_obj.input_mask distractor_obj.input_mask] axis=0)<line_sep>example["segment_ids"]=tf.concat([inputs_obj.segment_ids distractor_obj.segment_ids] axis=0)<line_sep>example["label_types"]=inputs_obj.label_types<line_sep># Add masking: <if_stmt>add_masking<block_start>mask_rate=FLAGS.mask_rate<line_sep>max_predictions_per_seq=int(math.ceil(FLAGS.max_seq_length<times>mask_rate))<line_sep>cls_token="[CLS]"<line_sep>sep_token="[SEP]"<line_sep>mask_token="[MASK]"<line_sep># pad_token = "[PAD]" mask_blacklist=tf.constant([cls_token sep_token])# , pad_token]) mask_blacklist_ids=tf.to_int32(vocab_table.lookup(mask_blacklist))<line_sep>mask_token_id=tf.to_int32(vocab_table.lookup(tf.constant(mask_token)))<line_sep>input_ids=tf.to_int32(example["input_ids"])<def_stmt>call_sample_mask_indices x<block_start><return>ip.sample_mask_indices(x mask_rate mask_blacklist_ids max_predictions_per_seq)<block_end>mask_indices=tf.map_fn(call_sample_mask_indices input_ids dtype=tf.int32)<def_stmt>call_get_target_tokens x<block_start>input_len=tf.shape(input_ids)[-1]<line_sep>x_input_id=x[:input_len]<line_sep>x_mask_indices=x[input_len:]<line_sep><return>ip.get_target_tokens_for_apply(x_input_id x_mask_indices)<block_end>map_input=tf.concat([input_ids mask_indices] -1)<line_sep>target_token_ids=tf.map_fn(call_get_target_tokens map_input)<def_stmt>call_apply_masking x<block_start>input_len=tf.shape(input_ids)[-1]<line_sep>mask_idx_len=tf.shape(mask_indices)[-1]<line_sep>x_input_id=x[:input_len]<line_sep>x_mask_indices=x[input_len:input_len+mask_idx_len]<line_sep>x_target_token_ids=x[input_len+mask_idx_len:]<line_sep><return>ip.apply_masking(x_input_id x_target_token_ids x_mask_indices mask_token_id 1000)<block_end>map_input2=tf.concat([input_ids mask_indices target_token_ids] -1)<line_sep>token_ids_masked=tf.map_fn(call_apply_masking tf.to_int64(map_input2))<line_sep>target_token_weights=tf.ones_like(target_token_ids dtype=tf.float32)<line_sep>pad_targets=tf.where(tf.equal(target_token_ids 0) tf.ones_like(target_token_ids dtype=tf.float32) tf.zeros_like(target_token_ids dtype=tf.float32))<line_sep>target_token_weights=target_token_weights-pad_targets<line_sep>example["target_token_weights"]=target_token_weights<line_sep>example["target_token_ids"]=target_token_ids<line_sep>example["input_ids"]=token_ids_masked<line_sep>example["mask_indices"]=mask_indices<line_sep># Set shape explicitly for TPU example["target_token_weights"].set_shape([FLAGS.num_choices max_predictions_per_seq])<line_sep>example["target_token_ids"].set_shape([FLAGS.num_choices max_predictions_per_seq])<line_sep>example["mask_indices"].set_shape([FLAGS.num_choices max_predictions_per_seq])<block_end># Set shape explicitly for TPU example["input_ids"].set_shape([FLAGS.num_choices FLAGS.max_seq_length])<line_sep>example["input_mask"].set_shape([FLAGS.num_choices FLAGS.max_seq_length])<line_sep>example["segment_ids"].set_shape([FLAGS.num_choices FLAGS.max_seq_length])<line_sep>example["label_types"].set_shape([4])<line_sep>example["label_ids"]=tf.scatter_nd(tf.reshape(example["label_types"] [4 1]) tf.range(4) [8])<line_sep># tf.Example only supports tf.int64, but the TPU only supports tf.int32. # So cast all int64 to int32. <for_stmt>name list(example.keys())# pylint: disable=g-builtin-op <block_start>t=example[name]<if_stmt>t.dtype<eq>tf.int64<block_start>t=tf.to_int32(t)<block_end>example[name]=t<block_end><return>example<block_end><def_stmt>input_fn params<block_start>"""The actual input function."""<line_sep>batch_size=params["batch_size"]<line_sep>vocab_table=contrib_lookup.index_table_from_file(FLAGS.vocab_file)<if_stmt>len(expanded_files)<eq>1<block_start>d=tf.data.TFRecordDataset(expanded_files[0])<if_stmt>is_training<block_start>d=d.repeat()<line_sep>d=d.shuffle(buffer_size=256)<block_end><block_end><else_stmt><block_start>dataset_list=[tf.data.TFRecordDataset(expanded_files[i])<for>i range(len(expanded_files))]<if_stmt>is_training<block_start>dataset_list=[d.repeat()<for>d dataset_list]<block_end>dset_weights=[FLAGS.dataset_one_weight 1-FLAGS.dataset_one_weight]<line_sep>d=tf.data.experimental.sample_from_datasets(dataset_list dset_weights)<line_sep># Note that sample_from_datasets() inserts randomness into the training # An alternative would be to use choose_from_datasets() but then the # order must be stated explicitly which is less intitive for unbalanced # datasets. Example below: # # choice_dataset = tf.data.Dataset.range(len(dataset_list)).repeat() # d = tf.data.experimental.choose_from_datasets(dataset_list, # choice_dataset) <if_stmt>is_training<block_start>d=d.shuffle(buffer_size=256)<block_end><block_end># The window size will be for selecting negative samples # It equals the number of documents to sample from -1 d=d.apply(contrib_data.sliding_window_batch(window_size=FLAGS.data_window_size window_shift=FLAGS.data_window_shift))<line_sep>d=d.apply(tf.data.experimental.map_and_batch(<lambda>record:_decode_record(record name_to_features vocab_table) batch_size=batch_size drop_remainder=drop_remainder))<line_sep><return>d<block_end><return>input_fn<block_end><def_stmt>model_fn_builder bert_config init_checkpoint learning_rate num_train_steps num_warmup_steps use_tpu use_one_hot_embeddings num_choices add_masking<block_start>"""Returns `model_fn` closure for TPUEstimator."""<def_stmt>model_fn features labels mode params# pylint: disable=unused-argument <block_start>"""The `model_fn` for TPUEstimator."""<line_sep>tf.logging.info("*** Features ***")<for_stmt>name sorted(features.keys())<block_start>tf.logging.info(" name = %s, shape = %s"%(name features[name].shape))<block_end>input_ids=tf.reshape(features["input_ids"] [-1 FLAGS.max_seq_length])<line_sep>input_mask=tf.reshape(features["input_mask"] [-1 FLAGS.max_seq_length])<line_sep>segment_ids=tf.reshape(features["segment_ids"] [-1 FLAGS.max_seq_length])<line_sep>label_types=features["label_types"]<line_sep>label_ids=features["label_ids"]<line_sep>is_training=(mode<eq>tf.estimator.ModeKeys.TRAIN)<line_sep>is_real_example=tf.reduce_sum(tf.one_hot(label_types 8) axis=1)<line_sep>model=modeling.BertModel(config=bert_config is_training=is_training input_ids=input_ids input_mask=input_mask token_type_ids=segment_ids use_one_hot_embeddings=use_one_hot_embeddings)<line_sep>(cpc_loss _ logits probabilities)=model_builder.create_model(model label_ids label_types FLAGS.train_batch_size<if>is_training<else>FLAGS.eval_batch_size num_choices use_tpu FLAGS.add_lv2loss margin=float(FLAGS.margin))<if_stmt>add_masking<block_start>mask_rate=FLAGS.mask_rate# search alternatives? max_predictions_per_seq=int(math.ceil(FLAGS.max_seq_length<times>mask_rate))<line_sep>masked_lm_positions=tf.reshape(features["mask_indices"] [-1 max_predictions_per_seq])<line_sep>masked_lm_ids=tf.reshape(features["target_token_ids"] [-1 max_predictions_per_seq])<line_sep>masked_lm_weights=tf.reshape(features["target_token_weights"] [-1 max_predictions_per_seq])<line_sep>(masked_lm_loss _ _)=model_builder.get_masked_lm_output(bert_config model.get_sequence_output() model.get_embedding_table() masked_lm_positions masked_lm_ids masked_lm_weights)<line_sep>total_loss=cpc_loss+masked_lm_loss<block_end>tvars=tf.trainable_variables()<line_sep>initialized_variable_names={}<line_sep>scaffold_fn=<none><if_stmt>init_checkpoint<block_start>(assignment_map initialized_variable_names)=modeling.get_assignment_map_from_checkpoint(tvars init_checkpoint)<if_stmt>use_tpu<block_start><def_stmt>tpu_scaffold <block_start>tf.train.init_from_checkpoint(init_checkpoint assignment_map)<line_sep><return>tf.train.Scaffold()<block_end>scaffold_fn=tpu_scaffold<block_end><else_stmt><block_start>tf.train.init_from_checkpoint(init_checkpoint assignment_map)<block_end><block_end>tf.logging.info("**** Trainable Variables ****")<for_stmt>var tvars<block_start>init_string=""<if_stmt>var.name<in>initialized_variable_names<block_start>init_string=", *INIT_FROM_CKPT*"<block_end>tf.logging.info(" name = %s, shape = %s%s" var.name var.shape init_string)<block_end>output_spec=<none><if_stmt>mode<eq>tf.estimator.ModeKeys.TRAIN<block_start>train_op=optimization.create_optimizer(total_loss learning_rate num_train_steps num_warmup_steps use_tpu)<line_sep>output_spec=contrib_tpu.TPUEstimatorSpec(mode=mode loss=total_loss train_op=train_op scaffold_fn=scaffold_fn)<block_end><elif_stmt>mode<eq>tf.estimator.ModeKeys.EVAL<block_start><def_stmt>metric_fn cpc_loss mlm_loss label_ids logits is_real_example<block_start>"""Collect metrics for function."""<line_sep>predictions=tf.argmax(logits axis=-1 output_type=tf.int32)<line_sep>accuracy=tf.metrics.accuracy(labels=label_ids predictions=predictions weights=is_real_example)<line_sep>cpc_loss_metric=tf.metrics.mean(values=cpc_loss)<line_sep>mlm_loss_metric=tf.metrics.mean(values=mlm_loss)<line_sep>metric_dict={"eval_accuracy":accuracy "eval_cpc_loss":cpc_loss_metric "eval_mlm_loss":mlm_loss_metric}<for_stmt>i range(8)<block_start>metric_dict["acc"+str(i)]=tf.metrics.accuracy(labels=label_ids[: i] predictions=predictions[: i] weights=is_real_example[: i])<block_end><return>metric_dict<block_end>eval_metrics=(metric_fn [cpc_loss masked_lm_loss label_ids logits is_real_example])<line_sep>output_spec=contrib_tpu.TPUEstimatorSpec(mode=mode loss=total_loss eval_metrics=eval_metrics scaffold_fn=scaffold_fn)<block_end><else_stmt><block_start>output_spec=contrib_tpu.TPUEstimatorSpec(mode=mode predictions={"probabilities":probabilities} scaffold_fn=scaffold_fn)<block_end><return>output_spec<block_end><return>model_fn<block_end><def_stmt>main _<block_start>tf.logging.set_verbosity(tf.logging.INFO)<line_sep>tokenization.validate_case_matches_checkpoint(FLAGS.do_lower_case FLAGS.init_checkpoint)<if_stmt><not>FLAGS.do_train<and><not>FLAGS.do_eval<block_start><raise>ValueError("At least one of `do_train`, `do_eval` must be True.")<block_end>bert_config=modeling.BertConfig.from_json_file(FLAGS.bert_config_file)<if_stmt>FLAGS.max_seq_length<g>bert_config.max_position_embeddings<block_start><raise>ValueError("Cannot use sequence length %d because the BERT model "<concat>"was only trained up to sequence length %d"%(FLAGS.max_seq_length bert_config.max_position_embeddings))<block_end>tf.gfile.MakeDirs(FLAGS.output_dir)<line_sep>tpu_cluster_resolver=<none><if_stmt>FLAGS.use_tpu<and>FLAGS.tpu_name<block_start>tpu_cluster_resolver=contrib_cluster_resolver.TPUClusterResolver(FLAGS.tpu_name zone=FLAGS.tpu_zone project=FLAGS.gcp_project)<block_end>is_per_host=contrib_tpu.InputPipelineConfig.PER_HOST_V2<line_sep>run_config=contrib_tpu.RunConfig(cluster=tpu_cluster_resolver master=FLAGS.master model_dir=FLAGS.output_dir save_checkpoints_steps=FLAGS.save_checkpoints_steps tpu_config=contrib_tpu.TPUConfig(iterations_per_loop=FLAGS.iterations_per_loop num_shards=FLAGS.num_tpu_cores per_host_input_for_training=is_per_host))<line_sep>num_train_steps=<none><line_sep>num_warmup_steps=<none><if_stmt>FLAGS.do_train<block_start>num_train_steps=int(FLAGS.train_data_size/FLAGS.train_batch_size)<line_sep>num_warmup_steps=int(num_train_steps<times>FLAGS.warmup_proportion)<block_end>model_fn=model_fn_builder(bert_config=bert_config init_checkpoint=FLAGS.init_checkpoint learning_rate=FLAGS.learning_rate num_train_steps=num_train_steps num_warmup_steps=num_warmup_steps use_tpu=FLAGS.use_tpu use_one_hot_embeddings=FLAGS.use_tpu num_choices=FLAGS.num_choices add_masking=FLAGS.include_mlm)<line_sep># If TPU is not available, this will fall back to normal Estimator on CPU # or GPU. estimator=contrib_tpu.TPUEstimator(use_tpu=FLAGS.use_tpu model_fn=model_fn config=run_config train_batch_size=FLAGS.train_batch_size eval_batch_size=FLAGS.eval_batch_size predict_batch_size=FLAGS.predict_batch_size)<if_stmt>FLAGS.do_train<block_start>tf.logging.info("***** Running training *****")<line_sep>tf.logging.info(" Batch size = %d" FLAGS.train_batch_size)<line_sep>tf.logging.info(" Num steps = %d" num_train_steps)<line_sep>train_input_fn=file_based_input_fn_builder(input_file=FLAGS.train_file is_training=<true> drop_remainder=<true> add_masking=FLAGS.include_mlm)<line_sep>estimator.train(input_fn=train_input_fn steps=num_train_steps)<block_end><if_stmt>FLAGS.do_eval# This tells the estimator to run through the entire set. <block_start><if_stmt>FLAGS.eval_data_size<l>0<block_start>eval_steps=<none><block_end><else_stmt><block_start>eval_steps=int(FLAGS.eval_data_size/FLAGS.eval_batch_size)<block_end>eval_drop_remainder=<true><if>FLAGS.use_tpu<else><false><line_sep># Note that we are masking inputs for eval as well as training and this will # decrease eval performance eval_input_fn=file_based_input_fn_builder(input_file=FLAGS.eval_file is_training=<false> drop_remainder=eval_drop_remainder add_masking=FLAGS.include_mlm)<line_sep># checkpoints_iterator blocks until a new checkpoint appears. <for_stmt>ckpt contrib_training.checkpoints_iterator(estimator.model_dir)<block_start><try_stmt><block_start>result=estimator.evaluate(input_fn=eval_input_fn steps=eval_steps)<line_sep>tf.logging.info("********** Eval results:*******\n")<for_stmt>key sorted(result.keys())<block_start>tf.logging.info("%s = %s"%(key str(result[key])))<block_end><block_end><except_stmt>tf.errors.NotFoundError<block_start>tf.logging.error("Checkpoint path '%s' no longer exists." ckpt)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>flags.mark_flag_as_required("eval_file")<line_sep>flags.mark_flag_as_required("vocab_file")<line_sep>flags.mark_flag_as_required("bert_config_file")<line_sep>flags.mark_flag_as_required("output_dir")<line_sep>app.run(main)<block_end>
<import_stmt>unittest<import_from_stmt>typing Dict Optional<import_stmt>sqlalchemy.engine<import_from_stmt>magma.db_service.config TestConfig<import_from_stmt>magma.db_service.models Base<import_from_stmt>magma.db_service.session_manager Session<import_from_stmt>sqlalchemy MetaData create_engine<class_stmt>DBTestCaseBlueprint(unittest.TestCase)<block_start>metadata:MetaData<line_sep>engine:sqlalchemy.engine.Engine<line_sep>session:Session<line_sep>@classmethod<def_stmt>drop_all cls<block_start>cls.metadata.drop_all()<block_end>@classmethod<def_stmt>create_all cls<block_start>cls.metadata.create_all()<block_end>@classmethod<def_stmt>setMetadata cls metadata:MetaData=Base.metadata<block_start>cls.metadata=metadata<block_end>@classmethod<def_stmt>setUpClass cls<arrow><none><block_start>cls.setMetadata(metadata=Base.metadata)<block_end>@classmethod<def_stmt>set_up_db_test_case cls **kwargs:Optional[Dict]<block_start>cls.engine=cls.get_test_db_engine(**kwargs)<line_sep>cls.session=Session(bind=cls.engine)<line_sep>cls.bind_engine()<block_end>@staticmethod<def_stmt>get_test_db_engine **kwargs<arrow>sqlalchemy.engine.Engine<block_start>config=TestConfig()<line_sep><return>create_engine(url=kwargs.get("SQLALCHEMY_DB_URI" config.SQLALCHEMY_DB_URI) encoding=kwargs.get("SQLALCHEMY_DB_ENCODING" config.SQLALCHEMY_DB_ENCODING) echo=<false> future=kwargs.get("SQLALCHEMY_FUTURE" config.SQLALCHEMY_FUTURE) )<block_end>@classmethod<def_stmt>bind_engine cls<block_start>cls.metadata.bind=cls.engine<block_end>@classmethod<def_stmt>close_session cls<block_start>cls.session.rollback()<line_sep>cls.session.close()<block_end><block_end><class_stmt>BaseDBTestCase(DBTestCaseBlueprint)<block_start><def_stmt>setUp self<block_start>self.set_up_db_test_case()<line_sep>self.create_all()<block_end><def_stmt>tearDown self<block_start>self.close_session()<line_sep>self.drop_all()<block_end><block_end>
# Copyright 2018 The TensorFlow Authors All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Wrapper for providing semantic segmentation video data."""<import_stmt>tensorflow<as>tf<import_from_stmt>feelvos input_preprocess<import_from_stmt>feelvos model<import_from_stmt>feelvos.utils mask_damaging<import_from_stmt>feelvos.utils train_utils<line_sep>slim=tf.contrib.slim<line_sep>dataset_data_provider=slim.dataset_data_provider<line_sep>MIN_LABEL_COUNT=10<def_stmt>decode_image_sequence tensor image_format='jpeg' shape=<none> channels=3 raw_dtype=tf.uint8<block_start>"""Decodes a sequence of images. Args: tensor: the tensor of strings to decode, shape: [num_images] image_format: a string (possibly tensor) with the format of the image. Options include 'jpeg', 'png', and 'raw'. shape: a list or tensor of the decoded image shape for a single image. channels: if 'shape' is None, the third dimension of the image is set to this value. raw_dtype: if the image is encoded as raw bytes, this is the method of decoding the bytes into values. Returns: The decoded images with shape [time, height, width, channels]. """<line_sep>handler=slim.tfexample_decoder.Image(shape=shape channels=channels dtype=raw_dtype repeated=<true>)<line_sep><return>handler.tensors_to_item({'image/encoded':tensor 'image/format':image_format})<block_end><def_stmt>_get_data data_provider dataset_split video_frames_are_decoded<block_start>"""Gets data from data provider. Args: data_provider: An object of slim.data_provider. dataset_split: Dataset split. video_frames_are_decoded: Boolean, whether the video frames are already decoded Returns: image: Image Tensor. label: Label Tensor storing segmentation annotations. object_label: An integer refers to object_label according to labelmap. If the example has more than one object_label, take the first one. image_name: Image name. height: Image height. width: Image width. video_id: String tensor representing the name of the video. Raises: ValueError: Failed to find label. """<if_stmt>video_frames_are_decoded<block_start>image,=data_provider.get(['image'])<block_end><else_stmt><block_start>image,=data_provider.get(['image/encoded'])<block_end># Some datasets do not contain image_name. <if_stmt>'image_name'<in>data_provider.list_items()<block_start>image_name,=data_provider.get(['image_name'])<block_end><else_stmt><block_start>image_name=tf.constant('')<block_end>height,width=data_provider.get(['height' 'width'])<line_sep>label=<none><if_stmt>dataset_split<ne>'test'<block_start><if_stmt>video_frames_are_decoded<block_start><if_stmt>'labels_class'<not><in>data_provider.list_items()<block_start><raise>ValueError('Failed to find labels.')<block_end>label,=data_provider.get(['labels_class'])<block_end><else_stmt><block_start>key='segmentation/object/encoded'<if_stmt>key<not><in>data_provider.list_items()<block_start><raise>ValueError('Failed to find labels.')<block_end>label,=data_provider.get([key])<block_end><block_end>object_label=<none><line_sep>video_id,=data_provider.get(['video_id'])<line_sep><return>image label object_label image_name height width video_id<block_end><def_stmt>_has_foreground_and_background_in_first_frame label subsampling_factor<block_start>"""Checks if the labels have foreground and background in the first frame. Args: label: Label tensor of shape [num_frames, height, width, 1]. subsampling_factor: Integer, the subsampling factor. Returns: Boolean, whether the labels have foreground and background in the first frame. """<line_sep>h,w=train_utils.resolve_shape(label)[1:3]<line_sep>label_downscaled=tf.squeeze(tf.image.resize_nearest_neighbor(label[0 tf.newaxis] [h<floordiv>subsampling_factor w<floordiv>subsampling_factor] align_corners=<true>) axis=0)<line_sep>is_bg=tf.equal(label_downscaled 0)<line_sep>is_fg=tf.logical_not(is_bg)<line_sep># Just using reduce_any was not robust enough, so lets make sure the count # is above MIN_LABEL_COUNT. fg_count=tf.reduce_sum(tf.cast(is_fg tf.int32))<line_sep>bg_count=tf.reduce_sum(tf.cast(is_bg tf.int32))<line_sep>has_bg=tf.greater_equal(fg_count MIN_LABEL_COUNT)<line_sep>has_fg=tf.greater_equal(bg_count MIN_LABEL_COUNT)<line_sep><return>tf.logical_and(has_bg has_fg)<block_end><def_stmt>_has_foreground_and_background_in_first_frame_2 label decoder_output_stride<block_start>"""Checks if the labels have foreground and background in the first frame. Second attempt, this time we use the actual output dimension for resizing. Args: label: Label tensor of shape [num_frames, height, width, 1]. decoder_output_stride: Integer, the stride of the decoder output. Returns: Boolean, whether the labels have foreground and background in the first frame. """<line_sep>h,w=train_utils.resolve_shape(label)[1:3]<line_sep>h_sub=model.scale_dimension(h 1.0/decoder_output_stride)<line_sep>w_sub=model.scale_dimension(w 1.0/decoder_output_stride)<line_sep>label_downscaled=tf.squeeze(tf.image.resize_nearest_neighbor(label[0 tf.newaxis] [h_sub w_sub] align_corners=<true>) axis=0)<line_sep>is_bg=tf.equal(label_downscaled 0)<line_sep>is_fg=tf.logical_not(is_bg)<line_sep># Just using reduce_any was not robust enough, so lets make sure the count # is above MIN_LABEL_COUNT. fg_count=tf.reduce_sum(tf.cast(is_fg tf.int32))<line_sep>bg_count=tf.reduce_sum(tf.cast(is_bg tf.int32))<line_sep>has_bg=tf.greater_equal(fg_count MIN_LABEL_COUNT)<line_sep>has_fg=tf.greater_equal(bg_count MIN_LABEL_COUNT)<line_sep><return>tf.logical_and(has_bg has_fg)<block_end><def_stmt>_has_enough_pixels_of_each_object_in_first_frame label decoder_output_stride<block_start>"""Checks if for each object (incl. background) enough pixels are visible. During test time, we will usually not see a reference frame in which only very few pixels of one object are visible. These cases can be problematic during training, especially if more than the 1-nearest neighbor is used. That's why this function can be used to detect and filter these cases. Args: label: Label tensor of shape [num_frames, height, width, 1]. decoder_output_stride: Integer, the stride of the decoder output. Returns: Boolean, whether the labels have enough pixels of each object in the first frame. """<line_sep>h,w=train_utils.resolve_shape(label)[1:3]<line_sep>h_sub=model.scale_dimension(h 1.0/decoder_output_stride)<line_sep>w_sub=model.scale_dimension(w 1.0/decoder_output_stride)<line_sep>label_downscaled=tf.squeeze(tf.image.resize_nearest_neighbor(label[0 tf.newaxis] [h_sub w_sub] align_corners=<true>) axis=0)<line_sep>_,_,counts=tf.unique_with_counts(tf.reshape(label_downscaled [-1]))<line_sep>has_enough_pixels_per_object=tf.reduce_all(tf.greater_equal(counts MIN_LABEL_COUNT))<line_sep><return>has_enough_pixels_per_object<block_end><def_stmt>get dataset num_frames_per_video crop_size batch_size min_resize_value=<none> max_resize_value=<none> resize_factor=<none> min_scale_factor=1. max_scale_factor=1. scale_factor_step_size=0 preprocess_image_and_label=<true> num_readers=1 num_threads=1 dataset_split=<none> is_training=<true> model_variant=<none> batch_capacity_factor=32 video_frames_are_decoded=<false> decoder_output_stride=<none> first_frame_finetuning=<false> sample_only_first_frame_for_finetuning=<false> sample_adjacent_and_consistent_query_frames=<false> remap_labels_to_reference_frame=<true> generate_prev_frame_mask_by_mask_damaging=<false> three_frame_dataset=<false> add_prev_frame_label=<true><block_start>"""Gets the dataset split for semantic segmentation. This functions gets the dataset split for semantic segmentation. In particular, it is a wrapper of (1) dataset_data_provider which returns the raw dataset split, (2) input_preprcess which preprocess the raw data, and (3) the Tensorflow operation of batching the preprocessed data. Then, the output could be directly used by training, evaluation or visualization. Args: dataset: An instance of slim Dataset. num_frames_per_video: The number of frames used per video crop_size: Image crop size [height, width]. batch_size: Batch size. min_resize_value: Desired size of the smaller image side. max_resize_value: Maximum allowed size of the larger image side. resize_factor: Resized dimensions are multiple of factor plus one. min_scale_factor: Minimum scale factor value. max_scale_factor: Maximum scale factor value. scale_factor_step_size: The step size from min scale factor to max scale factor. The input is randomly scaled based on the value of (min_scale_factor, max_scale_factor, scale_factor_step_size). preprocess_image_and_label: Boolean variable specifies if preprocessing of image and label will be performed or not. num_readers: Number of readers for data provider. num_threads: Number of threads for batching data. dataset_split: Dataset split. is_training: Is training or not. model_variant: Model variant (string) for choosing how to mean-subtract the images. See feature_extractor.network_map for supported model variants. batch_capacity_factor: Batch capacity factor affecting the training queue batch capacity. video_frames_are_decoded: Boolean, whether the video frames are already decoded decoder_output_stride: Integer, the stride of the decoder output. first_frame_finetuning: Boolean, whether to only sample the first frame for fine-tuning. sample_only_first_frame_for_finetuning: Boolean, whether to only sample the first frame during fine-tuning. This should be False when using lucid or wonderland data, but true when fine-tuning on the first frame only. Only has an effect if first_frame_finetuning is True. sample_adjacent_and_consistent_query_frames: Boolean, if true, the query frames (all but the first frame which is the reference frame) will be sampled such that they are adjacent video frames and have the same crop coordinates and flip augmentation. remap_labels_to_reference_frame: Boolean, whether to remap the labels of the query frames to match the labels of the (downscaled) reference frame. If a query frame contains a label which is not present in the reference, it will be mapped to background. generate_prev_frame_mask_by_mask_damaging: Boolean, whether to generate the masks used as guidance from the previous frame by damaging the ground truth mask. three_frame_dataset: Boolean, whether the dataset has exactly three frames per video of which the first is to be used as reference and the two others are consecutive frames to be used as query frames. add_prev_frame_label: Boolean, whether to sample one more frame before the first query frame to obtain a previous frame label. Only has an effect, if sample_adjacent_and_consistent_query_frames is True and generate_prev_frame_mask_by_mask_damaging is False. Returns: A dictionary of batched Tensors for semantic segmentation. Raises: ValueError: dataset_split is None, or Failed to find labels. """<if_stmt>dataset_split<is><none><block_start><raise>ValueError('Unknown dataset split.')<block_end><if_stmt>model_variant<is><none><block_start>tf.logging.warning('Please specify a model_variant. See '<concat>'feature_extractor.network_map for supported model '<concat>'variants.')<block_end>data_provider=dataset_data_provider.DatasetDataProvider(dataset num_readers=num_readers num_epochs=<none><if>is_training<else>1 shuffle=is_training)<line_sep>image,label,object_label,image_name,height,width,video_id=_get_data(data_provider dataset_split video_frames_are_decoded)<line_sep>sampling_is_valid=tf.constant(<true>)<if_stmt>num_frames_per_video<is><not><none><block_start>total_num_frames=tf.shape(image)[0]<if_stmt>first_frame_finetuning<or>three_frame_dataset<block_start><if_stmt>sample_only_first_frame_for_finetuning<block_start><assert_stmt><not>sample_adjacent_and_consistent_query_frames ('this option does not make sense for sampling only first frame.')<line_sep># Sample the first frame num_frames_per_video times. sel_indices=tf.tile(tf.constant(0 dtype=tf.int32)[tf.newaxis] multiples=[num_frames_per_video])<block_end><else_stmt><block_start><if_stmt>sample_adjacent_and_consistent_query_frames<block_start><if_stmt>add_prev_frame_label<block_start>num_frames_per_video<augadd>1<block_end># Since this is first frame fine-tuning, we'll for now assume that # each sequence has exactly 3 images: the ref frame and 2 adjacent # query frames. <assert_stmt>num_frames_per_video<eq>3<with_stmt>tf.control_dependencies([tf.assert_equal(total_num_frames 3)])<block_start>sel_indices=tf.constant([1 2] dtype=tf.int32)<block_end><block_end><else_stmt># Sample num_frames_per_video - 1 query frames which are not the # first frame. <block_start>sel_indices=tf.random_shuffle(tf.range(1 total_num_frames))[:(num_frames_per_video-1)]<block_end># Concat first frame as reference frame to the front. sel_indices=tf.concat([tf.constant(0 dtype=tf.int32)[tf.newaxis] sel_indices] axis=0)<block_end><block_end><else_stmt><block_start><if_stmt>sample_adjacent_and_consistent_query_frames<block_start><if_stmt>add_prev_frame_label# Sample one more frame which we can use to provide initial softmax # feedback. <block_start>num_frames_per_video<augadd>1<block_end>ref_idx=tf.random_shuffle(tf.range(total_num_frames))[0]<line_sep>sampling_is_valid=tf.greater_equal(total_num_frames num_frames_per_video)<def_stmt>sample_query_start_idx <block_start><return>tf.random_shuffle(tf.range(total_num_frames-num_frames_per_video+1))[0]<block_end>query_start_idx=tf.cond(sampling_is_valid sample_query_start_idx <lambda>:tf.constant(0 dtype=tf.int32))<def_stmt>sample_sel_indices <block_start><return>tf.concat([ref_idx[tf.newaxis] tf.range(query_start_idx query_start_idx+(num_frames_per_video-1))] axis=0)<block_end>sel_indices=tf.cond(sampling_is_valid sample_sel_indices <lambda>:tf.zeros((num_frames_per_video ) dtype=tf.int32))<block_end><else_stmt># Randomly sample some frames from the video. <block_start>sel_indices=tf.random_shuffle(tf.range(total_num_frames))[:num_frames_per_video]<block_end><block_end>image=tf.gather(image sel_indices axis=0)<block_end><if_stmt><not>video_frames_are_decoded<block_start>image=decode_image_sequence(image)<block_end><if_stmt>label<is><not><none><block_start><if_stmt>num_frames_per_video<is><not><none><block_start>label=tf.gather(label sel_indices axis=0)<block_end><if_stmt><not>video_frames_are_decoded<block_start>label=decode_image_sequence(label image_format='png' channels=1)<block_end># Sometimes, label is saved as [num_frames_per_video, height, width] or # [num_frames_per_video, height, width, 1]. We change it to be # [num_frames_per_video, height, width, 1]. <if_stmt>label.shape.ndims<eq>3<block_start>label=tf.expand_dims(label 3)<block_end><elif_stmt>label.shape.ndims<eq>4<and>label.shape.dims[3]<eq>1<block_start><pass><block_end><else_stmt><block_start><raise>ValueError('Input label shape must be '<concat>'[num_frames_per_video, height, width],'<concat>' or [num_frames, height, width, 1]. '<concat>'Got {}'.format(label.shape.ndims))<block_end>label.set_shape([<none> <none> <none> 1])<block_end># Add size of first dimension since tf can't figure it out automatically. image.set_shape((num_frames_per_video <none> <none> <none>))<if_stmt>label<is><not><none><block_start>label.set_shape((num_frames_per_video <none> <none> <none>))<block_end>preceding_frame_label=<none><if_stmt>preprocess_image_and_label<block_start><if_stmt>num_frames_per_video<is><none><block_start><raise>ValueError('num_frame_per_video must be specified for preproc.')<block_end>original_images=[]<line_sep>images=[]<line_sep>labels=[]<if_stmt>sample_adjacent_and_consistent_query_frames<block_start>num_frames_individual_preproc=1<block_end><else_stmt><block_start>num_frames_individual_preproc=num_frames_per_video<block_end><for_stmt>frame_idx range(num_frames_individual_preproc)<block_start>original_image_t,image_t,label_t=(input_preprocess.preprocess_image_and_label(image[frame_idx] label[frame_idx] crop_height=crop_size[0]<if>crop_size<is><not><none><else><none> crop_width=crop_size[1]<if>crop_size<is><not><none><else><none> min_resize_value=min_resize_value max_resize_value=max_resize_value resize_factor=resize_factor min_scale_factor=min_scale_factor max_scale_factor=max_scale_factor scale_factor_step_size=scale_factor_step_size ignore_label=dataset.ignore_label is_training=is_training model_variant=model_variant))<line_sep>original_images.append(original_image_t)<line_sep>images.append(image_t)<line_sep>labels.append(label_t)<block_end><if_stmt>sample_adjacent_and_consistent_query_frames<block_start>imgs_for_preproc=[image[frame_idx]<for>frame_idx range(1 num_frames_per_video)]<line_sep>labels_for_preproc=[label[frame_idx]<for>frame_idx range(1 num_frames_per_video)]<line_sep>original_image_rest,image_rest,label_rest=(input_preprocess.preprocess_images_and_labels_consistently(imgs_for_preproc labels_for_preproc crop_height=crop_size[0]<if>crop_size<is><not><none><else><none> crop_width=crop_size[1]<if>crop_size<is><not><none><else><none> min_resize_value=min_resize_value max_resize_value=max_resize_value resize_factor=resize_factor min_scale_factor=min_scale_factor max_scale_factor=max_scale_factor scale_factor_step_size=scale_factor_step_size ignore_label=dataset.ignore_label is_training=is_training model_variant=model_variant))<line_sep>original_images.extend(original_image_rest)<line_sep>images.extend(image_rest)<line_sep>labels.extend(label_rest)<block_end><assert_stmt>len(original_images)<eq>num_frames_per_video<assert_stmt>len(images)<eq>num_frames_per_video<assert_stmt>len(labels)<eq>num_frames_per_video<if_stmt>remap_labels_to_reference_frame# Remap labels to indices into the labels of the (downscaled) reference # frame, or 0, i.e. background, for labels which are not present # in the reference. <block_start>reference_labels=labels[0][tf.newaxis]<line_sep>h,w=train_utils.resolve_shape(reference_labels)[1:3]<line_sep>embedding_height=model.scale_dimension(h 1.0/decoder_output_stride)<line_sep>embedding_width=model.scale_dimension(w 1.0/decoder_output_stride)<line_sep>reference_labels_embedding_size=tf.squeeze(tf.image.resize_nearest_neighbor(reference_labels tf.stack([embedding_height embedding_width]) align_corners=<true>) axis=0)<line_sep># Get sorted unique labels in the reference frame. labels_in_ref_frame,_=tf.unique(tf.reshape(reference_labels_embedding_size [-1]))<line_sep>labels_in_ref_frame=tf.contrib.framework.sort(labels_in_ref_frame)<for_stmt>idx range(1 len(labels))<block_start>ref_label_mask=tf.equal(labels[idx] labels_in_ref_frame[tf.newaxis tf.newaxis :])<line_sep>remapped=tf.argmax(tf.cast(ref_label_mask tf.uint8) axis=-1 output_type=tf.int32)<line_sep># Set to 0 if label is not present is_in_ref=tf.reduce_any(ref_label_mask axis=-1)<line_sep>remapped<augmul>tf.cast(is_in_ref tf.int32)<line_sep>labels[idx]=remapped[<ellipsis> tf.newaxis]<block_end><block_end><if_stmt>sample_adjacent_and_consistent_query_frames<block_start><if_stmt>first_frame_finetuning<and>generate_prev_frame_mask_by_mask_damaging<block_start>preceding_frame_label=mask_damaging.damage_masks(labels[1])<block_end><elif_stmt>add_prev_frame_label# Discard the image of the additional frame and take the label as # initialization for softmax feedback. <block_start>original_images=[original_images[0]]+original_images[2:]<line_sep>preceding_frame_label=labels[1]<line_sep>images=[images[0]]+images[2:]<line_sep>labels=[labels[0]]+labels[2:]<line_sep>num_frames_per_video<augsub>1<block_end><block_end>original_image=tf.stack(original_images axis=0)<line_sep>image=tf.stack(images axis=0)<line_sep>label=tf.stack(labels axis=0)<block_end><else_stmt><block_start><if_stmt>label<is><not><none># Need to set label shape due to batching. <block_start>label.set_shape([num_frames_per_video <none><if>crop_size<is><none><else>crop_size[0] <none><if>crop_size<is><none><else>crop_size[1] 1])<block_end>original_image=tf.to_float(tf.zeros_like(label))<if_stmt>crop_size<is><none><block_start>height=tf.shape(image)[1]<line_sep>width=tf.shape(image)[2]<block_end><else_stmt><block_start>height=crop_size[0]<line_sep>width=crop_size[1]<block_end><block_end>sample={'image':image 'image_name':image_name 'height':height 'width':width 'video_id':video_id}<if_stmt>label<is><not><none><block_start>sample['label']=label<block_end><if_stmt>object_label<is><not><none><block_start>sample['object_label']=object_label<block_end><if_stmt>preceding_frame_label<is><not><none><block_start>sample['preceding_frame_label']=preceding_frame_label<block_end><if_stmt><not>is_training# Original image is only used during visualization. <block_start>sample['original_image']=original_image<block_end><if_stmt>is_training<block_start><if_stmt>first_frame_finetuning<block_start>keep_input=tf.constant(<true>)<block_end><else_stmt><block_start>keep_input=tf.logical_and(sampling_is_valid tf.logical_and(_has_enough_pixels_of_each_object_in_first_frame(label decoder_output_stride) _has_foreground_and_background_in_first_frame_2(label decoder_output_stride)))<block_end>batched=tf.train.maybe_batch(sample keep_input=keep_input batch_size=batch_size num_threads=num_threads capacity=batch_capacity_factor<times>batch_size dynamic_pad=<true>)<block_end><else_stmt><block_start>batched=tf.train.batch(sample batch_size=batch_size num_threads=num_threads capacity=batch_capacity_factor<times>batch_size dynamic_pad=<true>)<block_end># Flatten from [batch, num_frames_per_video, ...] to # batch * num_frames_per_video, ...]. cropped_height=train_utils.resolve_shape(batched['image'])[2]<line_sep>cropped_width=train_utils.resolve_shape(batched['image'])[3]<if_stmt>num_frames_per_video<is><none><block_start>first_dim=-1<block_end><else_stmt><block_start>first_dim=batch_size<times>num_frames_per_video<block_end>batched['image']=tf.reshape(batched['image'] [first_dim cropped_height cropped_width 3])<if_stmt>label<is><not><none><block_start>batched['label']=tf.reshape(batched['label'] [first_dim cropped_height cropped_width 1])<block_end><return>batched<block_end>
<import_stmt>tensorflow<as>tf<import_from_stmt>gdmix.util.io_utils read_json_file namedtuple_with_defaults<class_stmt>DatasetMetadata<block_start>"""Abstract Metadata class from which all dataset metadata classes derive"""<line_sep># define mapping of dtype in meta data to dtype in TensorFlow TO_TF_DTYPE={'int':tf.int32 'long':tf.int64 'float':tf.float32 'double':tf.float64 'bytes':tf.string 'string':tf.string}<line_sep>TF_INT_DTYPES={tf.int8 tf.uint8 tf.uint16 tf.uint32 tf.uint64 tf.int16 tf.int32 tf.int64}<line_sep>FEATURES="features"<line_sep>LABELS="labels"<line_sep>INDICES="indices"<line_sep>VALUES="values"<line_sep>NUMBER_OF_TRAINING_SAMPLES="numberOfTrainingSamples"<line_sep>SUPPORTED_TYPES=frozenset(['int' 'long' 'float' 'double' 'bytes' 'string'])<line_sep>METADATA_FIELDS=frozenset(["name" "dtype" "shape" "isSparse"])<line_sep>METADATA_FIELD_DEFAULT_VALUES=(<none> <none> <none> <false>)<line_sep>MetadataInfo=namedtuple_with_defaults("MetadataInfo" METADATA_FIELDS defaults=METADATA_FIELD_DEFAULT_VALUES)<def_stmt>__init__ self path_or_metadata<block_start>""" Take a metadata str or dict to build up the tensor metadata infos :param path_or_metadata: Path to the metadata file or a JSON dict corresponding to the metadata """<line_sep># ensure m is dict <if_stmt>isinstance(path_or_metadata str)<block_start><try_stmt><block_start>path_or_metadata=read_json_file(path_or_metadata)<block_end><except_stmt>Exception<as>err<block_start><raise>("Input of type str must be a valid JSON file. {}".format(err))<block_end><block_end># ensure features and labels are list <if_stmt><not>isinstance(path_or_metadata.get(self.FEATURES []) list)<block_start><raise>TypeError("Features must be a list. Type {} detected.".format(type(path_or_metadata[self.FEATURES])))<block_end><if_stmt><not>isinstance(path_or_metadata.get(self.LABELS []) list)<block_start><raise>TypeError("Labels must be a list. Type {} detected.".format(type(path_or_metadata[self.LABELS])))<block_end><def_stmt>parseMetadata key<block_start>tensors={}<for_stmt>entity path_or_metadata.get(key [])<block_start>name=entity["name"]<line_sep># Check if there are duplicated names in the metadata <if_stmt>name<in>tensors<block_start><raise>ValueError("Tensor name in your metadata appears more than once:{}".format(name))<block_end>tensors[name]=self._build_metadata_info(entity.copy())<block_end><return>tensors<block_end><try_stmt><block_start>feature_tensors=parseMetadata(self.FEATURES)<line_sep>label_tensors=parseMetadata(self.LABELS)<block_end><except_stmt>(TypeError ValueError)<as>err<block_start><raise>ValueError("Invalid field: {}".format(err))<block_end>self._tensors={**feature_tensors **label_tensors}<line_sep>self._features=list(feature_tensors.values())<line_sep>self._labels=list(label_tensors.values())<line_sep>self._feature_names=list(feature_tensors.keys())<line_sep>self._label_names=list(label_tensors.keys())<line_sep>self._number_of_training_samples=path_or_metadata.get("numberOfTrainingSamples" -1)<block_end>@classmethod<def_stmt>_build_metadata_info cls metadata_dict<block_start>""" Create namedtuple from metadata dict :param metadata_dict: the metadata in dict form :return: metadata namedtuple """<if_stmt><not>cls.METADATA_FIELDS.issubset(metadata_dict.keys())<block_start><raise>ValueError("Required metadata fields are {0}. "<concat>"Proved fields are {1}".format(",".join(cls.METADATA_FIELDS) ",".join(metadata_dict.keys())))<block_end>metadata_obj=cls.MetadataInfo(**metadata_dict)<if_stmt>metadata_obj.name<is><none><or><not>isinstance(metadata_obj.name str)<block_start><raise>ValueError("Feature name can not be None and must be str")<block_end><if_stmt>metadata_obj.dtype<not><in>cls.SUPPORTED_TYPES<block_start><raise>ValueError("User provided dtype '{}' is not supported. "<concat>"Supported types are '{}'.".format(metadata_obj.dtype list(cls.SUPPORTED_TYPES)))<block_end>metadata_obj=metadata_obj._replace(dtype=cls.TO_TF_DTYPE[metadata_obj.dtype])<if_stmt>metadata_obj.shape<is><none><or><not>isinstance(metadata_obj.shape list)<block_start><raise>ValueError("Feature shape can not be None and must be a list")<block_end><return>metadata_obj<block_end><def_stmt>get_features self<block_start><return>self._features.copy()<block_end><def_stmt>get_labels self<block_start><return>self._labels.copy()<block_end><def_stmt>get_label_names self<block_start><return>self._label_names.copy()<block_end><def_stmt>get_feature_names self<block_start><return>self._feature_names.copy()<block_end><def_stmt>get_feature_shape self feature_name<block_start><return>next(filter(<lambda>x:x.name<eq>feature_name self.get_features())).shape<block_end><def_stmt>get_tensors self<block_start><return>self._tensors.copy()<block_end><def_stmt>get_number_of_training_samples self<block_start><return>self._number_of_training_samples<block_end>@staticmethod<def_stmt>map_int in_dtype<block_start>""" TFRecord features only support three data types: 1. tf.float32 2. tf.int64 3. tf.string This function maps int32 and int16 to int64 and leave other types intact. :param in_dtype: Input TF data type :return: Mapped TF data type """<if_stmt>in_dtype<in>DatasetMetadata.TF_INT_DTYPES<block_start><return>tf.int64<block_end><else_stmt><block_start><return>in_dtype<block_end><block_end><block_end>
<import_from_stmt>datetime datetime<import_stmt>pandas<as>pd<import_stmt>numpy<as>np<line_sep># 将根目录(execute所在目录)添加到环境变量 <import_from_stmt>utils.GlobalVar add_path_to_sys<line_sep>rootdir=add_path_to_sys()<line_sep># 导入考勤状态判断相关函数和变量 <import_from_stmt>utils.GlobalVar COURSE_TIME LATE_SPAN<line_sep>filenames=['Auxiliary_Info.xlsx' 'Classroom_Course_Schedule.xlsx' 'Classroom_Info.xlsx' 'College_Class_Info.xlsx' 'Attendance_Logs.xlsx']<line_sep>au_info=pd.read_excel(rootdir+'/development/'+filenames[0])<def_stmt>calculate_current_teach_week semester_first_week_date='2021-3-08 08:00:00'<block_start>""" 计算当前日期所属教学周,实现思路是:当前日期所属一年中的周 - 每学期的第一周 ---- param: semester_first_week_date: 学期第一周的日期,例如 '2021-3-08 08:00:00' return: 当前教学周 """<line_sep># 获取指定日期属于当年的第几周, 返回字符串 semester_first_week=datetime.strptime(semester_first_week_date '%Y-%m-%d %H:%M:%S').strftime('%W')<line_sep># 获取当前日期是一年中的第几周, 返回字符串 current_year_week=datetime.now().strftime('%W')<line_sep># 计算当前日期所属的教学周 # ( ) 中的减一表示第一周之前的周数 # 最后加一是因为计算周数是从索引00开始的,所以需要加1 current_teach_week=int(current_year_week)-(int(semester_first_week)-1)+1<line_sep><return>current_teach_week<block_end><def_stmt>holiday_judgment judg_time=datetime.now() holidays=au_info['Holiday Date']<block_start>""" 判断是否为假期 ---- param: judg_time: 需要被判断的时间 param: holidays: 当前学期的假期列表 return: 如果有课,则返回考勤时间设置; 如果没课,则返回None,并提示空教室。 """<line_sep># 因为表中有 NaT类型,这在遍历时会导致错误,因此需要先过滤掉NaT值 # 不包含 NaT 的索引 indexes_without_nat=[(type(holiday)<ne>type(pd.NaT))<for>holiday au_info['Holiday Date']]<line_sep># 不包含 NaT 的假期列表 holidays_pure=list(holidays[indexes_without_nat])<line_sep># 获取完整的时间格式 now=datetime.now()<line_sep># 相同的功能 judg_time_ymd=now.date()<line_sep># 是否为假期的标志位 is_now_holiday=<false><line_sep># 遍历假期列表 <for_stmt>holiday holidays_pure# 截取当前假期的年月日 <block_start>holiday_month_day=datetime(holiday.year holiday.month holiday.day)<if_stmt>judg_time_ymd-holiday_month_day<eq>0<block_start>is_now_holiday=<true><block_end><block_end><if_stmt>is_now_holiday<block_start>print(f'[INFO] {judg_time_ymd} is Holiday!')<block_end><else_stmt><block_start>print(f'[INFO] {judg_time_ymd} is not Holiday!')<block_end><return>is_now_holiday<block_end><def_stmt>attendance_check set_time='08:00:00'<block_start>""" 注意: 隔天比较存在问题,比如23:00考勤开始,00:30打卡,但是这种情况在现实中不存在 考勤状态判断,根据指定的时间判断考勤状态 手动设定考勤时间(简单),例如: - 1)正常:考勤时间设定之前的一小时内签到 - 2)迟到:上课之后45分钟内 - 3)其他:上课超过45分钟 - 4)旷课:上课时间未到 - 5)请假:通过销假系统自动读取,或者老师手动填写 ---- param set_time: = '19:00:00' """<line_sep>####################### 自定义参数 ##################### # 正常:考勤时间设定之前的一小时内(3600s)签到 normal_span=60<times>60# seconds # 设定一节课(大课,两小节)时长 course_time=COURSE_TIME# minutes # 设定上课多长时间认为是迟到 late_span=LATE_SPAN<line_sep>######################################################## # 获取完整的时间格式 now=datetime.now()<line_sep># 分别获取当前的年,月,日,时,分,秒,均为int类型 judg_time=now<line_sep>now_y=judg_time.year<line_sep>now_m=judg_time.month<line_sep>now_d=judg_time.day<line_sep># 定义考勤标识符 att_state='正常'<line_sep># 格式化考勤时间 att_time=datetime.strptime(f'{now_y}-{now_m}-{now_d} {set_time}' '%Y-%m-%d %H:%M:%S')<line_sep># 计算当前时间与设定时间的差值 time_diff=now-att_time<line_sep># print(time_diff) time_diff_days,time_diff_seconds=time_diff.days time_diff.seconds<line_sep># print(time_diff_days, time_diff_seconds) # 如果time_diff_days为负,说明还未到考勤时间,此时计算距离考勤的时间 <if_stmt>time_diff_days<l>0# 一天的秒数减去time_diff_days <block_start>time_span_att=60<times>60<times>23-time_diff_seconds<if_stmt>time_span_att<l>normal_span<block_start>att_state='正常'<block_end><else_stmt><block_start>print(f'[INFO] 无效!请在考勤时间设定之前的一小时内签到!距离考勤时间设定还有{round((time_span_att-60<times>60)/60 2)}分钟!')<block_end><block_end># 如果time_diff_days为正,说明考勤时间已过,此时判断是否为迟到或旷课 <else_stmt># 上课45分钟内,算迟到 <block_start><if_stmt>time_diff_seconds-late_span<times>60<le>0<block_start>att_state='迟到'<block_end><elif_stmt>(time_diff_seconds<g>late_span<times>60)<and>(time_diff_seconds<le>course_time<times>60)<block_start>att_state='其他'<line_sep>print('[INFO] 已经超过迟到时间范围,请联系老师处理!')<block_end><else_stmt><block_start>att_state='旷课'<block_end><block_end>print(f'[INFO] 时间设定:{att_time},考勤时间:{now},考勤状态:{att_state}')<line_sep><return>att_state<block_end>
"""This file is only retained for backwards compatibility. It will be removed in the future. sre was moved to re in version 2.5. """<import_stmt>warnings<line_sep>warnings.warn("The sre module is deprecated, please import re." DeprecationWarning 2)<import_from_stmt>re *<import_from_stmt>re __all__<line_sep># old pickles expect the _compile() reconstructor in this module <import_from_stmt>re _compile<line_sep>
# Copyright 2014-2017 F5 Networks Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>copy<import_stmt>pytest<import_from_stmt>distutils.version LooseVersion<import_from_stmt>f5.bigip.tm.gtm.listener Listener<import_from_stmt>f5.sdk_exception MissingRequiredCreationParameter<import_from_stmt>f5.sdk_exception MissingRequiredReadParameter<import_from_stmt>f5.sdk_exception UnsupportedOperation<import_from_stmt>pytest symbols<import_from_stmt>requests.exceptions HTTPError<import_from_stmt>six iteritems<line_sep>pytestmark=pytest.mark.skipif(symbols<and>hasattr(symbols 'modules')<and><not>symbols.modules['gtm'] reason='The modules symbol for GTM is set to False.')<def_stmt>delete_listener mgmt_root name partition<block_start><try_stmt><block_start>foo=mgmt_root.tm.gtm.listeners.listener.load(name=name partition=partition)<block_end><except_stmt>HTTPError<as>err<block_start><if_stmt>err.response.status_code<ne>404<block_start><raise><block_end><return><block_end>foo.delete()<block_end><def_stmt>setup_create_test request mgmt_root name partition<block_start><def_stmt>teardown <block_start>delete_listener(mgmt_root name partition)<block_end>request.addfinalizer(teardown)<block_end><def_stmt>setup_basic_test request mgmt_root name address partition<block_start><def_stmt>teardown <block_start>delete_listener(mgmt_root name partition)<block_end>reg1=mgmt_root.tm.gtm.listeners.listener.create(name=name address=address partition=partition)<line_sep>request.addfinalizer(teardown)<line_sep><return>reg1<block_end><class_stmt>TestCreate(object)<block_start><def_stmt>test_create_no_args self mgmt_root<block_start><with_stmt>pytest.raises(MissingRequiredCreationParameter)<block_start>mgmt_root.tm.gtm.listeners.listener.create()<block_end><block_end><def_stmt>test_create self request mgmt_root<block_start>setup_create_test(request mgmt_root 'fake_listener' 'Common')<line_sep>reg1=mgmt_root.tm.gtm.listeners.listener.create(name='fake_listener' partition='Common' address='10.10.10.10')<assert_stmt>reg1.name<eq>'fake_listener'<assert_stmt>reg1.partition<eq>'Common'<assert_stmt>reg1.address<eq>'10.10.10.10'<assert_stmt>reg1.generation<and>isinstance(reg1.generation int)<assert_stmt>reg1.kind<eq>'tm:gtm:listener:listenerstate'<assert_stmt>reg1.selfLink.startswith('https://localhost/mgmt/tm/gtm/listener/~Common~fake_listener')<block_end><def_stmt>test_create_optional_args self request mgmt_root<block_start>setup_create_test(request mgmt_root 'fake_listener' 'Common')<line_sep>reg1=mgmt_root.tm.gtm.listeners.listener.create(name='fake_listener' partition='Common' address='10.10.10.10' description='NewListener')<assert_stmt>hasattr(reg1 'description')<assert_stmt>reg1.description<eq>'NewListener'<block_end><def_stmt>test_create_duplicate self request mgmt_root<block_start>setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<with_stmt>pytest.raises(HTTPError)<as>err<block_start>mgmt_root.tm.gtm.listeners.listener.create(name='fake_listener' partition='Common' address='10.10.10.10')<block_end><assert_stmt>err.value.response.status_code<eq>409<block_end><block_end><class_stmt>TestRefresh(object)<block_start><def_stmt>test_refresh self request mgmt_root<block_start>setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<line_sep>r1=mgmt_root.tm.gtm.listeners.listener.load(name='fake_listener' partition='Common')<line_sep>r2=mgmt_root.tm.gtm.listeners.listener.load(name='fake_listener' partition='Common')<assert_stmt>r1.name<eq>'fake_listener'<assert_stmt>r2.name<eq>'fake_listener'<line_sep>r2.update(description='NewListener')<assert_stmt>hasattr(r2 'description')<assert_stmt><not>hasattr(r1 'description')<assert_stmt>r2.description<eq>'NewListener'<line_sep>r1.refresh()<assert_stmt>hasattr(r1 'description')<assert_stmt>r1.description<eq>'NewListener'<block_end><block_end><class_stmt>TestLoad(object)<block_start><def_stmt>test_load_no_object self mgmt_root<block_start><with_stmt>pytest.raises(HTTPError)<as>err<block_start>mgmt_root.tm.gtm.listeners.listener.load(name='fake_listener' partition='Common')<block_end><if_stmt>LooseVersion(pytest.config.getoption('--release'))<ge>LooseVersion('12.0.0')<block_start><assert_stmt>err.value.response.status_code<eq>400<block_end><else_stmt><block_start><assert_stmt>err.value.response.status_code<eq>500<block_end><block_end><def_stmt>test_load self request mgmt_root<block_start>setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<line_sep>r1=mgmt_root.tm.gtm.listeners.listener.load(name='fake_listener' partition='Common')<assert_stmt>r1.name<eq>'fake_listener'<assert_stmt><not>hasattr(r1 'description')<line_sep>r1.update(description='NewListener')<assert_stmt>hasattr(r1 'description')<assert_stmt>r1.description<eq>'NewListener'<line_sep>r2=mgmt_root.tm.gtm.listeners.listener.load(name='fake_listener' partition='Common')<assert_stmt>hasattr(r2 'description')<assert_stmt>r2.description<eq>'NewListener'<block_end><block_end><class_stmt>TestExists(object)<block_start><def_stmt>test_not_exists self request mgmt_root<block_start>result=mgmt_root.tm.gtm.listeners.listener.exists(name='my_listener' partition='Common')<assert_stmt>result<is><false><block_end><def_stmt>test_exists self request mgmt_root<block_start>r1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<line_sep>result=mgmt_root.tm.gtm.listeners.listener.exists(name='fake_listener' partition='Common')<assert_stmt>r1.name<eq>'fake_listener'<assert_stmt>result<is><true><block_end><block_end><class_stmt>TestUpdate(object)<block_start><def_stmt>test_update self request mgmt_root<block_start>r1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<assert_stmt>r1.name<eq>'fake_listener'<assert_stmt><not>hasattr(r1 'description')<line_sep>r1.update(description='NewListener')<assert_stmt>hasattr(r1 'description')<assert_stmt>r1.description<eq>'NewListener'<block_end><block_end><class_stmt>TestModify(object)<block_start><def_stmt>test_modify self request mgmt_root<block_start>r1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<line_sep>original_dict=copy.copy(r1.__dict__)<line_sep>value='description'<line_sep>r1.modify(description='NewListener')<for_stmt>k,v iteritems(original_dict)<block_start><if_stmt>k<ne>value<block_start>original_dict[k]=r1.__dict__[k]<block_end><elif_stmt>k<eq>value<block_start><assert_stmt>r1.__dict__[k]<eq>'NewListener'<block_end><block_end><block_end><block_end><class_stmt>TestDelete(object)<block_start><def_stmt>test_delete self request mgmt_root<block_start>r1=mgmt_root.tm.gtm.listeners.listener.create(name='fake_listener' address='10.10.10.10')<line_sep>r1.delete()<with_stmt>pytest.raises(HTTPError)<as>err<block_start>mgmt_root.tm.gtm.listeners.listener.load(name='fake_region' partition='Common')<block_end><if_stmt>LooseVersion(pytest.config.getoption('--release'))<ge>LooseVersion('12.0.0')<block_start><assert_stmt>err.value.response.status_code<eq>400<block_end><else_stmt><block_start><assert_stmt>err.value.response.status_code<eq>500<block_end><block_end><block_end><class_stmt>TestListenerCollection(object)<block_start><def_stmt>test_listener_collection self request mgmt_root<block_start>reg1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<assert_stmt>reg1.name<eq>'fake_listener'<assert_stmt>reg1.partition<eq>'Common'<assert_stmt>reg1.address<eq>'10.10.10.10'<assert_stmt>reg1.generation<and>isinstance(reg1.generation int)<assert_stmt>reg1.kind<eq>'tm:gtm:listener:listenerstate'<assert_stmt>reg1.selfLink.startswith('https://localhost/mgmt/tm/gtm/listener/~Common~fake_listener')<line_sep>rc=mgmt_root.tm.gtm.listeners.get_collection()<assert_stmt>isinstance(rc list)<assert_stmt>len(rc)<assert_stmt>isinstance(rc[0] Listener)<block_end><block_end><class_stmt>TestProfile(object)<block_start><def_stmt>test_load_missing_args self request mgmt_root<block_start>reg1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<line_sep>profcol=reg1.profiles_s.get_collection()<line_sep>prname=str(profcol[0].name)<with_stmt>pytest.raises(MissingRequiredReadParameter)<block_start>reg1.profiles_s.profile.load(name=prname)<block_end><block_end><def_stmt>test_load self request mgmt_root<block_start>reg1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<line_sep>profcol=reg1.profiles_s.get_collection()<line_sep>prname=str(profcol[0].name)<line_sep>prpart=str(profcol[0].partition)<line_sep>pr1=reg1.profiles_s.profile.load(name=prname partition=prpart)<assert_stmt>pr1.kind<eq>'tm:gtm:listener:profiles:profilesstate'<assert_stmt>pr1.selfLink.startswith('https://localhost/mgmt/tm/gtm/listener'<concat>'/~Common~fake_listener/profiles/'<concat>'~Common~dns')<block_end><def_stmt>test_refresh self request mgmt_root<block_start>reg1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<line_sep>profcol=reg1.profiles_s.get_collection()<line_sep>prname=str(profcol[0].name)<line_sep>prpart=str(profcol[0].partition)<line_sep>pr1=reg1.profiles_s.profile.load(name=prname partition=prpart)<assert_stmt>pr1.kind<eq>'tm:gtm:listener:profiles:profilesstate'<assert_stmt>pr1.selfLink.startswith('https://localhost/mgmt/tm/gtm/listener'<concat>'/~Common~fake_listener/profiles/'<concat>'~Common~dns')<line_sep>pr2=reg1.profiles_s.profile.load(name=prname partition=prpart)<line_sep>pr1.refresh()<assert_stmt>pr1.kind<eq>pr2.kind<assert_stmt>pr1.selfLink<eq>pr2.selfLink<line_sep>pr2.refresh()<assert_stmt>pr2.kind<eq>pr1.kind<assert_stmt>pr2.selfLink<eq>pr1.selfLink<block_end><def_stmt>test_create_raises self request mgmt_root<block_start>reg1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<with_stmt>pytest.raises(UnsupportedOperation)<block_start>reg1.profiles_s.profile.create()<block_end><block_end><def_stmt>test_modify_raises self request mgmt_root<block_start>reg1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<with_stmt>pytest.raises(UnsupportedOperation)<block_start>reg1.profiles_s.profile.modify()<block_end><block_end><def_stmt>test_update_raises self request mgmt_root<block_start>reg1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<with_stmt>pytest.raises(UnsupportedOperation)<block_start>reg1.profiles_s.profile.update()<block_end><block_end><def_stmt>test_delete_raises self request mgmt_root<block_start>reg1=setup_basic_test(request mgmt_root 'fake_listener' '10.10.10.10' 'Common')<with_stmt>pytest.raises(UnsupportedOperation)<block_start>reg1.profiles_s.profile.delete()<block_end><block_end><block_end>
<import_stmt>concurrent.futures<import_stmt>datetime<import_stmt>os<import_from_stmt>concurrent.futures ProcessPoolExecutor ThreadPoolExecutor<import_stmt>pandas<as>pd<import_stmt>pymongo<import_stmt>QUANTAXIS<as>QA<import_from_stmt>QUANTAXIS.QAFetch.QATdx QA_fetch_get_stock_list<import_from_stmt>QUANTAXIS.QAUtil DATABASE QA_util_date_stamp QA_util_get_real_date QA_util_log_info QA_util_time_stamp QA_util_to_json_from_pandas trade_date_sse <line_sep>TRADE_HOUR_END=17<def_stmt>now_time <block_start>""" 1. 当前日期如果是交易日且当前时间在 17:00 之前,默认行情取到上个交易日收盘 2. 当前日期如果是交易日且当前时间在 17:00 之后,默认行情取到当前交易日收盘 """<line_sep><return>(str(QA_util_get_real_date(str(datetime.date.today()-datetime.timedelta(days=1)) trade_date_sse -1 ))+" 17:00:00"<if>datetime.datetime.now().hour<l>TRADE_HOUR_END<else>str(QA_util_get_real_date(str(datetime.date.today()) trade_date_sse -1))+" 17:00:00")<block_end><def_stmt>QA_SU_save_stock_min client=DATABASE ui_log=<none> ui_progress=<none><block_start>""" 聚宽实现方式 save current day's stock_min data """<line_sep># 导入聚宽模块且进行登录 <try_stmt><block_start><import_stmt>jqdatasdk<line_sep># 请自行将 JQUSERNAME 和 JQUSERPASSWD 修改为自己的账号密码 jqdatasdk.auth("JQUSERNAME" "JQUSERPASSWD")<block_end><except_stmt><block_start><raise>ModuleNotFoundError<block_end># 股票代码格式化 code_list=list(map(<lambda>x:x+".XSHG"<if>x[0]<eq>"6"<else>x+".XSHE" QA_fetch_get_stock_list().code.unique().tolist() ))<line_sep>coll=client.stock_min<line_sep>coll.create_index([("code" pymongo.ASCENDING) ("time_stamp" pymongo.ASCENDING) ("date_stamp" pymongo.ASCENDING) ])<line_sep>err=[]<def_stmt>__transform_jq_to_qa df code type_<block_start>""" 处理 jqdata 分钟数据为 qa 格式,并存入数据库 1. jdatasdk 数据格式: open close high low volume money 2018-12-03 09:31:00 10.59 10.61 10.61 10.59 8339100.0 88377836.0 2. 与 QUANTAXIS.QAFetch.QATdx.QA_fetch_get_stock_min 获取数据进行匹配,具体处理详见相应源码 open close high low vol amount ... datetime 2018-12-03 09:31:00 10.99 10.90 10.99 10.90 2.211700e+06 2.425626e+07 ... """<if_stmt>df<is><none><or>len(df)<eq>0<block_start><raise>ValueError("没有聚宽数据")<block_end>df=df.reset_index().rename(columns={"index":"datetime" "volume":"vol" "money":"amount"})<line_sep>df["code"]=code<line_sep>df["date"]=df.datetime.map(str).str.slice(0 10)<line_sep>df=df.set_index("datetime" drop=<false>)<line_sep>df["date_stamp"]=df["date"].apply(<lambda>x:QA_util_date_stamp(x))<line_sep>df["time_stamp"]=(df["datetime"].map(str).apply(<lambda>x:QA_util_time_stamp(x)))<line_sep>df["type"]=type_<line_sep><return>df[["open" "close" "high" "low" "vol" "amount" "datetime" "code" "date" "date_stamp" "time_stamp" "type" ]]<block_end><def_stmt>__saving_work code coll<block_start>QA_util_log_info("##JOB03 Now Saving STOCK_MIN ==== {}".format(code) ui_log=ui_log)<try_stmt><block_start><for_stmt>type_ ["1min" "5min" "15min" "30min" "60min"]<block_start>col_filter={"code":str(code)[0:6] "type":type_}<line_sep>ref_=coll.find(col_filter)<line_sep>end_time=str(now_time())[0:19]<if_stmt>coll.count_documents(col_filter)<g>0<block_start>start_time=ref_[coll.count_documents(col_filter)-1]["datetime"]<line_sep>QA_util_log_info("##JOB03.{} Now Saving {} from {} to {} == {}".format(["1min" "5min" "15min" "30min" "60min"].index(type_) str(code)[0:6] start_time end_time type_ ) ui_log=ui_log )<if_stmt>start_time<ne>end_time<block_start>df=jqdatasdk.get_price(security=code start_date=start_time end_date=end_time frequency=type_.split("min")[0]+"m" )<line_sep>__data=__transform_jq_to_qa(df code=code[:6] type_=type_)<if_stmt>len(__data)<g>1<block_start>coll.insert_many(QA_util_to_json_from_pandas(__data)[1::])<block_end><block_end><block_end><else_stmt><block_start>start_time="2015-01-01 09:30:00"<line_sep>QA_util_log_info("##JOB03.{} Now Saving {} from {} to {} == {}".format(["1min" "5min" "15min" "30min" "60min"].index(type_) str(code)[0:6] start_time end_time type_ ) ui_log=ui_log )<if_stmt>start_time<ne>end_time<block_start>__data<eq>__transform_jq_to_qa(jqdatasdk.get_price(security=code start_date=start_time end_date=end_time frequency=type_.split("min")[0]+"m" ) code=code[:6] type_=type_)<if_stmt>len(__data)<g>1<block_start>coll.insert_many(QA_util_to_json_from_pandas(__data)[1::])<block_end><block_end><block_end><block_end><block_end><except_stmt>Exception<as>e<block_start>QA_util_log_info(e ui_log=ui_log)<line_sep>err.append(code)<line_sep>QA_util_log_info(err ui_log=ui_log)<block_end><block_end># 聚宽之多允许三个线程连接 executor=ThreadPoolExecutor(max_workers=2)<line_sep>res={executor.submit(__saving_work code_list[i_] coll)<for>i_ range(len(code_list))}<line_sep>count=0<for_stmt>i_ concurrent.futures.as_completed(res)<block_start>QA_util_log_info('The {} of Total {}'.format(count len(code_list)) ui_log=ui_log)<line_sep>strProgress="DOWNLOAD PROGRESS {} ".format(str(float(count/len(code_list)<times>100))[0:4]+"%")<line_sep>intProgress=int(count/len(code_list)<times>10000.0)<line_sep>QA_util_log_info(strProgress ui_log ui_progress=ui_progress ui_progress_int_value=intProgress)<line_sep>count=count+1<block_end><if_stmt>len(err)<l>1<block_start>QA_util_log_info("SUCCESS" ui_log=ui_log)<block_end><else_stmt><block_start>QA_util_log_info(" ERROR CODE \n " ui_log=ui_log)<line_sep>QA_util_log_info(err ui_log=ui_log)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>QA_SU_save_stock_min()<block_end>
<import_from_stmt>typing Iterable<import_from_stmt>eth2spec.test.helpers.constants PHASE0 ALTAIR BELLATRIX MINIMAL MAINNET<import_from_stmt>eth2spec.test.helpers.typing SpecForkName PresetBaseName<import_from_stmt>eth2spec.test.altair.fork test_altair_fork_basic test_altair_fork_random<import_from_stmt>eth2spec.test.bellatrix.fork test_bellatrix_fork_basic test_bellatrix_fork_random<import_from_stmt>eth2spec.gen_helpers.gen_base gen_runner gen_typing<import_from_stmt>eth2spec.gen_helpers.gen_from_tests.gen generate_from_tests<def_stmt>create_provider tests_src preset_name:PresetBaseName phase:SpecForkName fork_name:SpecForkName<arrow>gen_typing.TestProvider<block_start><def_stmt>prepare_fn <arrow><none><block_start><return><block_end><def_stmt>cases_fn <arrow>Iterable[gen_typing.TestCase]<block_start><return>generate_from_tests(runner_name='fork' handler_name='fork' src=tests_src fork_name=fork_name preset_name=preset_name phase=phase )<block_end><return>gen_typing.TestProvider(prepare=prepare_fn make_cases=cases_fn)<block_end><def_stmt>_get_fork_tests_providers <block_start><for_stmt>preset [MINIMAL MAINNET]<block_start><yield>create_provider(test_altair_fork_basic preset PHASE0 ALTAIR)<line_sep><yield>create_provider(test_altair_fork_random preset PHASE0 ALTAIR)<line_sep><yield>create_provider(test_bellatrix_fork_basic preset ALTAIR BELLATRIX)<line_sep><yield>create_provider(test_bellatrix_fork_random preset ALTAIR BELLATRIX)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>gen_runner.run_generator("forks" list(_get_fork_tests_providers()))<block_end>
<import_stmt>math<import_stmt>bpy<import_stmt>mathutils<line_sep>ALPHA_MODE_ITEMS=[('OPAQUE' 'Opaque' 'The alpha value is ignored and the rendered output is fully opaque') ('MASK' 'Mask' ('The rendered output is either fully opaque or fully transparent depending on the '<concat>'alpha value and the specified alpha cutoff value')) ('BLEND' 'Blend' 'The alpha value is used to composite the source and destination areas') ]<def_stmt>get_base_color_factor self<block_start>material=self.id_data<line_sep>diffuse=mathutils.Vector(material.diffuse_color)<line_sep>diffuse<augmul>material.diffuse_intensity<line_sep><return>[*diffuse material.alpha]<block_end><def_stmt>set_base_color_factor self value<block_start>material=self.id_data<line_sep>material.diffuse_color=value[:3]<line_sep>material.diffuse_intensity=1.0<line_sep>alpha=value[3]<line_sep>material.alpha=alpha<if_stmt>alpha<l>1.0<block_start>material.use_transparency=<true><line_sep>material.transparency_method='Z_TRANSPARENCY'<block_end><else_stmt><block_start>material.use_transparency=<false><block_end><block_end><def_stmt>get_emissive_factor self<block_start>material=self.id_data<line_sep><return>[min(material.emit 2.0)<times>0.5]<times>3<block_end><def_stmt>set_emissive_factor self value<block_start>material=self.id_data<line_sep>material.emit=mathutils.Color(value).v<times>2.0<block_end><def_stmt>get_alpha_mode self<block_start>material=self.id_data<line_sep>alpha_mode='OPAQUE'<if_stmt>material.use_transparency<block_start>gs_alpha=material.game_settings.alpha_blend<if_stmt>gs_alpha<eq>'CLIP'<block_start>alpha_mode='MASK'<block_end><else_stmt><block_start>alpha_mode='BLEND'<block_end><block_end><for_stmt>i,mode enumerate(ALPHA_MODE_ITEMS)<block_start><if_stmt>mode[0]<eq>alpha_mode<block_start>alpha_mode=i<line_sep><break><block_end><block_end><else_stmt><block_start>alpha_mode=0<block_end><return>alpha_mode<block_end><def_stmt>set_alpha_mode self value<block_start>material=self.id_data<line_sep>value=ALPHA_MODE_ITEMS[value][0]<if_stmt>value<eq>'OPAQUE'<block_start>material.use_transparency=<false><line_sep>material.game_settings.alpha_blend='OPAQUE'<block_end><elif_stmt>value<eq>'MASK'<block_start>material.use_transparency=<true><line_sep>material.game_settings.alpha_blend='CLIP'<block_end><elif_stmt>value<eq>'BLEND'<block_start>material.use_transparency=<true><line_sep>material.game_settings.alpha_blend='ALPHA'<block_end><block_end><def_stmt>get_roughness_factor self<block_start>material=self.id_data<line_sep>hardness=material.specular_hardness<if_stmt>1.0<l>self.hardness_float<l>511.0<and><not>hardness<l>self.hardness_float<l>hardness+1<block_start>self.hardness_float=material.specular_hardness<block_end>roughness=pow(2.0/(self.hardness_float+2.0) 0.25)<line_sep><return>max(min(roughness 1.0) 0.0)<block_end><def_stmt>set_roughness_factor self value<block_start>material=self.id_data<if_stmt>value<le>0<block_start>value=0.00001<block_end>roughness_texture=self.metal_roughness_texture<if_stmt>roughness_texture<block_start>slot=material.texture_slots[roughness_texture]<line_sep>slot.hardness_factor=value<block_end>material.specular_intensity=0.04/(math.pi<times>pow(value 4.0))<line_sep>material.specular_color=(1.0 1.0 1.0)<line_sep>self.hardness_float=(2.0/pow(value 4.0))-2.0<line_sep>material.specular_hardness=min(math.floor(self.hardness_float) 511)<block_end><def_stmt>get_texture self search_func index_prop<block_start>material=self.id_data<line_sep>slots=[t<for>t material.texture_slots<if>t<and>t.texture<and>t.texture_coords<eq>'UV']<line_sep>slot=<none><for_stmt>slot slots[::-1]<block_start><if_stmt>search_func(slot)<block_start><break><block_end><block_end><else_stmt><block_start><return>''<block_end><if_stmt>(bpy.context.space_data<and>bpy.context.space_data.type<eq>'PROPERTIES'<and>bpy.context.object)<block_start>uv_layers=bpy.context.object.data.uv_layers<line_sep>setattr(self index_prop uv_layers.find(slot.uv_layer)<if>slot.uv_layer<else>0)<block_end><return>slot.texture.name<block_end><def_stmt>_clear_slot_settings slot<block_start>slot.use_map_diffuse=<false><line_sep>slot.use_map_color_diffuse=<false><line_sep>slot.use_map_alpha=<false><line_sep>slot.use_map_translucency=<false><line_sep>slot.use_map_ambient=<false><line_sep>slot.use_map_emit=<false><line_sep>slot.use_map_mirror=<false><line_sep>slot.use_map_raymir=<false><line_sep>slot.use_map_specular=<false><line_sep>slot.use_map_color_spec=<false><line_sep>slot.use_map_hardness=<false><line_sep>slot.use_map_normal=<false><line_sep>slot.use_map_warp=<false><line_sep>slot.use_map_displacement=<false><line_sep>slot.blend_type='MIX'<block_end><def_stmt>set_texture self value current_value update_func<block_start>material=self.id_data<line_sep>current_index=material.texture_slots.find(current_value)<line_sep>slot_index=material.texture_slots.find(value)<line_sep># Clear slot <if_stmt><not>value<block_start><if_stmt>current_index<ne>-1<block_start>material.texture_slots.clear(current_index)<block_end><return><block_end># Don't do anything if the correct texture is already set <if_stmt>value<eq>current_value<block_start><return><block_end>bl_texture=bpy.data.textures[value]<line_sep># Texture is not already in a slot on this material <if_stmt>current_index<eq>-1<and>slot_index<eq>-1<block_start>slot=material.texture_slots.add()<line_sep>slot.texture=bl_texture<line_sep>_clear_slot_settings(slot)<line_sep>update_func(slot)<line_sep><return><block_end># Adjust existing slot to meet texture criteria slot=material.texture_slots[slot_index]<line_sep>_clear_slot_settings(slot)<line_sep>update_func(slot)<if_stmt>slot_index<l>current_index<block_start>material.active_texture_index=slot_index<for_stmt>_ range(current_index-slot_index)<block_start>bpy.ops.texture.slot_move(type='DOWN')<line_sep>material.active_texture_index<augsub>1<block_end><block_end><block_end><def_stmt>get_base_color_texture self<block_start><return>get_texture(self <lambda>t:t.use_map_color_diffuse 'base_color_text_index')<block_end><def_stmt>set_base_color_texture self value<block_start><def_stmt>update slot<block_start>slot.use_map_color_diffuse=<true><line_sep>slot.blend_type='MULTIPLY'<block_end>set_texture(self value get_base_color_texture(self) update)<block_end><def_stmt>get_metal_roughness_texture self<block_start><return>get_texture(self <lambda>t:t.use_map_hardness 'metal_rough_text_index')<block_end><def_stmt>set_metal_roughness_texture self value<block_start><def_stmt>update slot<block_start>slot.use_map_hardness=<true><line_sep>slot.hardness_factor=self.roughness_factor<block_end>set_texture(self value get_metal_roughness_texture(self) update)<block_end><def_stmt>get_normal_texture self<block_start><return>get_texture(self <lambda>t:t.use_map_normal 'normal_text_index')<block_end><def_stmt>set_normal_texture self value<block_start><def_stmt>update slot<block_start>slot.use_map_normal=<true><block_end>set_texture(self value get_normal_texture(self) update)<block_end><def_stmt>get_emissive_texture self<block_start><return>get_texture(self <lambda>t:t.use_map_emit 'emissive_text_index')<block_end><def_stmt>set_emissive_texture self value<block_start><def_stmt>update slot<block_start>slot.use_map_emit=<true><block_end>set_texture(self value get_emissive_texture(self) update)<block_end><class_stmt>PbrSettings(bpy.types.PropertyGroup)<block_start>hardness_float=bpy.props.FloatProperty()<line_sep>base_color_text_index=0<line_sep>metal_rough_text_index=0<line_sep>normal_text_index=0<line_sep>occlusion_text_index=0<line_sep>emissive_text_index=0<line_sep>base_color_factor=bpy.props.FloatVectorProperty(name='Base Color Factor' size=4 subtype='COLOR' min=0.0 max=1.0 get=get_base_color_factor set=set_base_color_factor )<line_sep>base_color_texture=bpy.props.StringProperty(name='Texture' get=get_base_color_texture set=set_base_color_texture )<line_sep>alpha_mode=bpy.props.EnumProperty(items=ALPHA_MODE_ITEMS name='Alpha Mode' default='OPAQUE' get=get_alpha_mode set=set_alpha_mode )<line_sep>alpha_cutoff=bpy.props.FloatProperty(name='Alpha Cutoff' min=0.0 max=1.0 default=0.5 )<line_sep>metallic_factor=bpy.props.FloatProperty(name='Metallic Factor' min=0.0 max=1.0 )<line_sep>roughness_factor=bpy.props.FloatProperty(name='Roughness Factor' min=0.0 max=1.0 get=get_roughness_factor set=set_roughness_factor )<line_sep>metal_roughness_texture=bpy.props.StringProperty(name='Texture' get=get_metal_roughness_texture set=set_metal_roughness_texture )<line_sep>normal_texture=bpy.props.StringProperty(name='Normal' get=get_normal_texture set=set_normal_texture )<line_sep>occlusion_texture=bpy.props.StringProperty(name='Occlusion' )<line_sep>emissive_factor=bpy.props.FloatVectorProperty(name='Emissive Factor' size=3 subtype='COLOR' min=0.0 max=1.0 get=get_emissive_factor set=set_emissive_factor )<line_sep>emissive_texture=bpy.props.StringProperty(name='Texture' get=get_emissive_texture set=set_emissive_texture )<block_end><class_stmt>PbrExportPanel(bpy.types.Panel)<block_start>bl_idname='MATERIAL_PT_pbr_export'<line_sep>bl_label='PBR Export'<line_sep>bl_space_type='PROPERTIES'<line_sep>bl_region_type='WINDOW'<line_sep>bl_context='material'<line_sep>@classmethod<def_stmt>poll cls context<block_start><return>context.material<is><not><none><and>hasattr(context.material 'pbr_export_settings')<block_end><def_stmt>draw self context<block_start>settings=context.material.pbr_export_settings<line_sep>self.layout.label('Base Color:')<line_sep>box=self.layout.box()<line_sep>box.prop(settings 'base_color_factor' text='Factor')<line_sep>box.prop_search(settings 'base_color_texture' bpy.data 'textures')<line_sep>box.prop(settings 'alpha_mode')<line_sep>box.prop(settings 'alpha_cutoff')<line_sep>self.layout.label('Roughness:')<line_sep>box=self.layout.box()<line_sep>box.prop(settings 'metallic_factor' text='Metallic')<line_sep>box.prop(settings 'roughness_factor' text='Factor')<line_sep>box.prop_search(settings 'metal_roughness_texture' bpy.data 'textures')<line_sep>self.layout.label('Emissive:')<line_sep>box=self.layout.box()<line_sep>box.prop(settings 'emissive_factor' text='Factor')<line_sep>box.prop_search(settings 'emissive_texture' bpy.data 'textures')<line_sep>self.layout.prop_search(settings 'normal_texture' bpy.data 'textures')<line_sep>self.layout.prop_search(settings 'occlusion_texture' bpy.data 'textures')<line_sep>self.layout.prop(context.material.game_settings 'use_backface_culling')<block_end><block_end>
<import_from_stmt>typing AsyncIterator<class_stmt>Channel<block_start><async_keyword><def_stmt>wait_message self<arrow>AsyncIterator[bool]<block_start><ellipsis><block_end><async_keyword><def_stmt>get self<arrow>bytes<block_start><ellipsis><block_end><block_end><class_stmt>Redis<block_start><def_stmt>__init__ self conn<block_start><ellipsis><block_end><block_end>
"""Functions to assign and manipulate link capacities of a topology. Link capacities can be assigned either deterministically or randomly, according to various models. """<import_from_stmt>distutils.version LooseVersion<import_stmt>networkx<as>nx<import_from_stmt>fnss.util random_from_pdf<import_from_stmt>fnss.units capacity_units<line_sep>__all__=['set_capacities_constant' 'set_capacities_random' 'set_capacities_random_uniform' 'set_capacities_random_power_law' 'set_capacities_random_zipf' 'set_capacities_random_zipf_mandelbrot' 'set_capacities_degree_gravity' 'set_capacities_betweenness_gravity' 'set_capacities_eigenvector_gravity' 'set_capacities_communicability_gravity' 'set_capacities_pagerank_gravity' 'set_capacities_edge_betweenness' 'set_capacities_edge_communicability' 'get_capacities' 'clear_capacities']<def_stmt>set_capacities_constant topology capacity capacity_unit='Mbps' links=<none><block_start>""" Set constant link capacities Parameters ---------- topology : Topology The topology to which link capacities will be set capacity : float The value of capacity to set links : iterable, optional Iterable container of links, represented as (u, v) tuples to which capacity will be set. If None or not specified, the capacity will be applied to all links. capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) Examples -------- >>> import fnss >>> topology = fnss.erdos_renyi_topology(50, 0.1) >>> fnss.set_capacities_constant(topology, 10, 'Mbps') """<if_stmt>capacity<le>0<block_start><raise>ValueError('Capacity must be positive')<block_end><if_stmt><not>capacity_unit<in>capacity_units<block_start><raise>ValueError("The capacity_unit argument is not valid")<block_end>conversion_factor=1<if_stmt>'capacity_unit'<in>topology.graph<and>links<is><not><none># If a capacity_unit is set, that means that some links have already # been assigned capacities, so set these capacity using the same unit # already used <block_start>curr_capacity_unit=topology.graph['capacity_unit']<if_stmt>curr_capacity_unit<ne>capacity_unit<block_start>conversion_factor=float(capacity_units[capacity_unit])/capacity_units[curr_capacity_unit]<block_end><block_end><else_stmt><block_start>topology.graph['capacity_unit']=capacity_unit<block_end>edges=links<or>topology.edges()<for_stmt>u,v edges<block_start>topology.adj[u][v]['capacity']=capacity<times>conversion_factor<block_end><return><block_end><def_stmt>set_capacities_random topology capacity_pdf capacity_unit='Mbps'<block_start>""" Set random link capacities according to a given probability density function Parameters ---------- topology : Topology The topology to which link capacities will be set capacity_pdf : dict A dictionary representing the probability that a capacity value is assigned to a link capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) links : list, optional List of links, represented as (u, v) tuples to which capacity will be set. If None or not specified, the capacity will be applied to all links. Examples -------- >>> import fnss >>> topology = fnss.erdos_renyi_topology(50, 0.1) >>> pdf = {10: 0.5, 100: 0.2, 1000: 0.3} >>> fnss.set_capacities_constant(topology, pdf, 'Mbps') """<if_stmt><not>capacity_unit<in>capacity_units<block_start><raise>ValueError("The capacity_unit argument is not valid")<block_end><if_stmt>any((capacity<l>0<for>capacity capacity_pdf.keys()))<block_start><raise>ValueError('All capacities in capacity_pdf must be positive')<block_end>topology.graph['capacity_unit']=capacity_unit<for_stmt>u,v topology.edges()<block_start>topology.adj[u][v]['capacity']=random_from_pdf(capacity_pdf)<block_end><return><block_end><def_stmt>set_capacities_random_power_law topology capacities capacity_unit='Mbps' alpha=1.1<block_start>""" Set random link capacities according to a power-law probability density function. The probability that a capacity :math:`c_i` is assigned to a link is: .. math:: p(c_i) = \\frac{{c_i}^{-\\alpha}}{\\sum_{c_k \\in C}{{c_k}^{-\\alpha}}}. Where :math:`C` is the set of allowed capacity, i.e. the ``capacities`` argument Note that this capacity assignment differs from ``set_capacities_random_zipf`` because, while in Zipf assignment the power law relationship is between the rank of a capacity and the probability of being assigned to a link, in this assignment, the power law is between the value of the capacity and the probability of being assigned to a link. Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) """<if_stmt>alpha<le>0.0<block_start><raise>ValueError('alpha must be positive')<block_end>capacities=sorted(capacities)<line_sep>pdf=[capacities[i]<power>(-alpha)<for>i range(len(capacities))]<line_sep>norm_factor=sum(pdf)<line_sep>norm_pdf={cap:pdf[i]/norm_factor<for>i,cap enumerate(capacities)}<line_sep>set_capacities_random(topology norm_pdf capacity_unit=capacity_unit)<block_end><def_stmt>set_capacities_random_zipf_mandelbrot topology capacities capacity_unit='Mbps' alpha=1.1 q=0.0 reverse=<false><block_start>""" Set random link capacities according to a Zipf-Mandelbrot probability density function. This capacity allocation consists in the following steps: 1. All capacities are sorted in descending or order (or ascending if reverse is True) 2. The i-th value of the sorted capacities list is then assigned to a link with probability .. math:: p(i) = \\frac{1/(i + q)^\\alpha}{\\sum_{i = 1}^{N}{1/(i + q)^\\alpha}}. Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) alpha : float, default 1.1 The :math`\alpha` parameter of the Zipf-Mandlebrot density function q : float, default 0 The :math`q` parameter of the Zipf-Mandlebrot density function reverse : bool, optional If False, lower capacity links are the most frequent, if True, higher capacity links are more frequent """<if_stmt>alpha<le>0.0<block_start><raise>ValueError('alpha must be positive')<block_end><if_stmt>q<l>0.0<block_start><raise>ValueError('q must be >= 0')<block_end>capacities=sorted(capacities reverse=reverse)<line_sep>pdf={cap:1.0/(i+1.0+q)<power>alpha<for>i,cap enumerate(capacities)}<line_sep>norm_factor=sum(pdf.values())<line_sep>norm_pdf={capacity:pdf[capacity]/norm_factor<for>capacity pdf}<line_sep>set_capacities_random(topology norm_pdf capacity_unit=capacity_unit)<block_end><def_stmt>set_capacities_random_zipf topology capacities capacity_unit='Mbps' alpha=1.1 reverse=<false><block_start>""" Set random link capacities according to a Zipf probability density function. The same objective can be achieved by invoking the function ``set_capacities_random_zipf_mandlebrot`` with parameter q set to 0. This capacity allocation consists in the following steps: 1. All capacities are sorted in descending or order (or ascending if reverse is True) 2. The i-th value of the sorted capacities list is then assigned to a link with probability .. math:: p(i) = \\frac{1/i^\\alpha}{\\sum_{i = 1}^{N}{1/i^\\alpha}}. Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) alpha : float, default 1.1 The :math`\alpha` parameter of the Zipf density function reverse : bool, optional If False, lower capacity links are the most frequent, if True, higher capacity links are more frequent """<line_sep>set_capacities_random_zipf_mandelbrot(topology capacities alpha=alpha q=0.0 reverse=reverse capacity_unit=capacity_unit)<block_end><def_stmt>set_capacities_random_uniform topology capacities capacity_unit='Mbps'<block_start>""" Set random link capacities according to a uniform probability density function. Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) """<line_sep>capacity_pdf={capacity:1.0/len(capacities)<for>capacity capacities}<line_sep>set_capacities_random(topology capacity_pdf capacity_unit=capacity_unit)<block_end><def_stmt>set_capacities_degree_gravity topology capacities capacity_unit='Mbps'<block_start>""" Set link capacities proportionally to the product of the degrees of the two end-points of the link Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) """<if_stmt>topology.is_directed()<block_start>in_degree=nx.in_degree_centrality(topology)<line_sep>out_degree=nx.out_degree_centrality(topology)<line_sep>gravity={(u v):out_degree[u]<times>in_degree[v]<for>(u v) topology.edges()}<block_end><else_stmt><block_start>degree=nx.degree_centrality(topology)<line_sep>gravity={(u v):degree[u]<times>degree[v]<for>(u v) topology.edges()}<block_end>_set_capacities_proportionally(topology capacities gravity capacity_unit=capacity_unit)<block_end><def_stmt>set_capacities_betweenness_gravity topology capacities capacity_unit='Mbps' weighted=<true><block_start>""" Set link capacities proportionally to the product of the betweenness centralities of the two end-points of the link Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) weighted : bool, optional Indicate whether link weights need to be used to compute shortest paths. If links do not have link weights or this parameter is False, shortest paths are calculated based on hop count. """<line_sep>weight='weight'<if>weighted<else><none><line_sep>centrality=nx.betweenness_centrality(topology normalized=<false> weight=weight)<line_sep>_set_capacities_gravity(topology capacities centrality capacity_unit)<block_end><def_stmt>set_capacities_eigenvector_gravity topology capacities capacity_unit='Mbps' max_iter=1000<block_start>""" Set link capacities proportionally to the product of the eigenvector centralities of the two end-points of the link Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) max_iter : int, optional The max number of iteration of the algorithm allowed. If a solution is not found within this period Raises ------ RuntimeError : if the algorithm does not converge in max_iter iterations """<try_stmt><block_start>centrality=nx.eigenvector_centrality(topology max_iter=max_iter)<block_end><except_stmt>nx.NetworkXError<block_start><raise>RuntimeError('Algorithm did not converge in %d iterations'%max_iter)<block_end>_set_capacities_gravity(topology capacities centrality capacity_unit)<block_end><def_stmt>set_capacities_pagerank_gravity topology capacities capacity_unit='Mbps' alpha=0.85 weight=<none><block_start>""" Set link capacities proportionally to the product of the Pagerank centralities of the two end-points of the link Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) alpha : float, optional The apha parameter of the PageRank algorithm weight : str, optional The name of the link attribute to use for the PageRank algorithm. Valid attributes include *capacity* *delay* and *weight*. If ``None``, all links are assigned the same weight. """<line_sep>centrality=nx.pagerank_numpy(topology alpha=alpha personalization=<none> weight=weight)<line_sep>_set_capacities_gravity(topology capacities centrality capacity_unit)<block_end><def_stmt>set_capacities_communicability_gravity topology capacities capacity_unit='Mbps'<block_start>""" Set link capacities proportionally to the product of the communicability centralities of the two end-points of the link Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) """<if_stmt>LooseVersion(nx.__version__)<l>LooseVersion("2.0")<block_start>centrality=nx.communicability_centrality(topology)<block_end><else_stmt><block_start>centrality=nx.subgraph_centrality(topology)<block_end>_set_capacities_gravity(topology capacities centrality capacity_unit)<block_end><def_stmt>set_capacities_edge_betweenness topology capacities capacity_unit='Mbps' weighted=<true><block_start>""" Set link capacities proportionally to edge betweenness centrality of the link. Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) weighted : bool, optional Indicate whether link weights need to be used to compute shortest paths. If links do not have link weights or this parameter is False, shortest paths are calculated based on hop count. """<line_sep>weight='weight'<if>weighted<else><none><line_sep>centrality=nx.edge_betweenness_centrality(topology normalized=<false> weight=weight)<line_sep>_set_capacities_proportionally(topology capacities centrality capacity_unit=capacity_unit)<block_end><def_stmt>set_capacities_edge_communicability topology capacities capacity_unit='Mbps'<block_start>""" Set link capacities proportionally to edge communicability centrality of the link. Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) """<line_sep>communicability=nx.communicability(topology)<line_sep>centrality={(u v):communicability[u][v]<for>(u v) topology.edges()}<line_sep>_set_capacities_proportionally(topology capacities centrality capacity_unit=capacity_unit)<block_end><def_stmt>_set_capacities_gravity topology capacities node_metric capacity_unit='Mbps'<block_start>""" Set link capacities proportionally to the product of the values of a given node metric of the two end-points of the link Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values node_metric : dict A dictionary with all values of the given node metric, keyed by node name capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) """<line_sep>gravity={(u v):node_metric[u]<times>node_metric[v]<for>(u v) topology.edges()}<line_sep>_set_capacities_proportionally(topology capacities gravity capacity_unit=capacity_unit)<block_end><def_stmt>_set_capacities_proportionally topology capacities metric capacity_unit='Mbps'<block_start>""" Set link capacities proportionally to the value of a given edge metric. Parameters ---------- topology : Topology The topology to which link capacities will be set capacities : list A list of all possible capacity values metric : dict A dictionary with all values of the given edge metric, keyed by edge name capacity_unit : str, optional The unit in which capacity value is expressed (e.g. Mbps, Gbps etc..) """<if_stmt><not>capacity_unit<in>capacity_units<block_start><raise>ValueError("The capacity_unit argument is not valid")<block_end><if_stmt>any((capacity<l>0<for>capacity capacities))<block_start><raise>ValueError('All capacities must be positive')<block_end><if_stmt>len(capacities)<eq>0<block_start><raise>ValueError('The list of capacities cannot be empty')<block_end>topology.graph['capacity_unit']=capacity_unit<line_sep># If there is only one capacity the capacities list then all links are # assigned the same capacity <if_stmt>len(capacities)<eq>1<block_start>set_capacities_constant(topology capacities[0] capacity_unit)<line_sep><return><block_end># get min and max of selected edge metric min_metric=min(metric.values())<line_sep>max_metric=max(metric.values())<line_sep>capacities=sorted(capacities)<line_sep>min_capacity=capacities[0]-0.5<times>(capacities[1]-capacities[0])<line_sep>max_capacity=capacities[-1]+0.5<times>(capacities[-1]-capacities[-2])<line_sep>capacity_boundaries=[0.5<times>(capacities[i]+capacities[i+1])<for>i range(len(capacities)-1)]<line_sep>capacity_boundaries.append(max_capacity)<line_sep>metric_boundaries=[(capacity_boundary-min_capacity)<times>((max_metric-min_metric)/(max_capacity-min_capacity))+min_metric<for>capacity_boundary capacity_boundaries]<line_sep># to prevent float rounding errors metric_boundaries[-1]=max_metric+0.1<for_stmt>(u v),metric_value metric.items()<block_start><for_stmt>i,boundary enumerate(metric_boundaries)<block_start><if_stmt>metric_value<le>boundary<block_start>capacity=capacities[i]<line_sep>topology.adj[u][v]['capacity']=capacity<line_sep><break><block_end><block_end># if the loop is not stopped yet, it means that because of float # rounding error, max_capacity < metric_boundaries[-1], so we set the # greatest capacity value. # Anyway, the code should never reach this point, because before the # for loop we are already adjusting the value of metric_boundaries[-1] # to make it > max_capacity <else_stmt><block_start>topology.adj[u][v]['capacity']=capacities[-1]<block_end><block_end><block_end><def_stmt>get_capacities topology<block_start>""" Returns a dictionary with all link capacities. Parameters ---------- topology : Topology The topology whose link delays are requested Returns ------- capacities : dict Dictionary of link capacities keyed by link. Examples -------- >>> import fnss >>> topology = fnss.Topology() >>> topology.add_path([1,2,3]) >>> fnss.set_capacities_constant(topology, 10, 'Mbps') >>> capacity = get_capacities(topology) >>> capacity[(1,2)] 10 """<line_sep><return>nx.get_edge_attributes(topology 'capacity')<block_end><def_stmt>clear_capacities topology<block_start>""" Remove all capacities from the topology. Parameters ---------- topology : Topology """<line_sep>topology.graph.pop('capacity_unit' <none>)<for_stmt>u,v topology.edges()<block_start>topology.adj[u][v].pop('capacity' <none>)<block_end><block_end>
# Copyright 2021, <NAME>, mailto:<EMAIL> # # Python tests originally created or extracted from other peoples work. The # parts were too small to be protected. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # """Tests uncompiled functions and compiled functions responses to inspect and isistance. """<import_stmt>inspect<import_stmt>types<line_sep># nuitka-project: --python-flag=no_warnings <async_keyword><def_stmt>compiledAsyncgen <block_start><yield>1<block_end>print(type(compiledAsyncgen()))<assert_stmt>inspect.isfunction(compiledAsyncgen)<is><true><assert_stmt>inspect.isgeneratorfunction(compiledAsyncgen)<is><false><assert_stmt>inspect.iscoroutinefunction(compiledAsyncgen)<is><false><assert_stmt>inspect.isasyncgenfunction(compiledAsyncgen)<is><true><assert_stmt>isinstance(compiledAsyncgen() types.GeneratorType)<is><false><assert_stmt>isinstance(compiledAsyncgen() types.CoroutineType)<is><false><assert_stmt>isinstance(compiledAsyncgen() types.AsyncGeneratorType)<is><true><assert_stmt>type(compiledAsyncgen())<eq>types.AsyncGeneratorType type(compiledAsyncgen())<assert_stmt>isinstance(compiledAsyncgen types.AsyncGeneratorType)<is><false><line_sep>
#Global PARENT_DIR="PARENT_DIR"<line_sep>#Logging LOG_FILE="LOG_FILE"<line_sep>SAVE_DIR="SAVE_DIR"<line_sep>TENSORBOARD_LOG_DIR="TENSORBOARD_LOG_DIR"<line_sep>#Preprocessing Dataset DATASET_PATH="DATASET_PATH"<line_sep>#DeepSense Parameters ##Dataset Parameters BATCH_SIZE="BATCH_SIZE"<line_sep>HISTORY_LENGTH="HISTORY_LENGTH"<line_sep>HORIZON="HORIZON"<line_sep>MEMORY_SIZE="MEMORY_SIZE"<line_sep>NUM_ACTIONS="NUM_ACTIONS"<line_sep>NUM_CHANNELS="NUM_CHANNELS"<line_sep>SPLIT_SIZE="SPLIT_SIZE"<line_sep>WINDOW_SIZE="WINDOW_SIZE"<line_sep>##Dropout Layer Parameters CONV_KEEP_PROB="CONV_KEEP_PROB"<line_sep>DENSE_KEEP_PROB="DENSE_KEEP_PROB"<line_sep>GRU_KEEP_PROB="GRU_KEEP_PROB"<line_sep>## Convolution Layer Parameters FILTER_SIZES="FILTER_SIZES"<line_sep>KERNEL_SIZES="KERNEL_SIZES"<line_sep>PADDING="PADDING"<line_sep>SAME="SAME"<line_sep>VALID="VALID"<line_sep>## GRU Parameters GRU_CELL_SIZE="GRU_CELL_SIZE"<line_sep>GRU_NUM_CELLS="GRU_NUM_CELLS"<line_sep>##FullyConnected Layer Parameters DENSE_LAYER_SIZES="DENSE_LAYER_SIZES"<line_sep>#configuration section names CONVOLUTION="convolution"<line_sep>DATASET="dataset"<line_sep>DENSE="dense"<line_sep>DROPOUT="dropout"<line_sep>GLOBAL="global"<line_sep>GRU="gru"<line_sep>LOGGING="logging"<line_sep>PREPROCESSING="preprocessing"<line_sep>
<import_from_stmt>libcloud.container.types Provider<import_from_stmt>libcloud.container.providers get_driver<line_sep>cls=get_driver(Provider.JOYENT)<line_sep>conn=cls(host='us-east-1.docker.joyent.com' port=2376 key_file='key.pem' cert_file='~/.sdc/docker/admin/ca.pem')<line_sep>conn.list_images()<line_sep>
<import_stmt>pytest<import_from_stmt>detect_secrets.core.usage ParserBuilder<line_sep>@pytest.fixture<def_stmt>parser <block_start><return>ParserBuilder().add_console_use_arguments()<block_end><def_stmt>test_normal_mode_requires_single_file parser<block_start><with_stmt>pytest.raises(SystemExit)<block_start>parser.parse_args(['audit' 'fileA' 'fileB'])<block_end><block_end><def_stmt>test_diff_mode_requires_two_files parser<block_start><with_stmt>pytest.raises(SystemExit)<block_start>parser.parse_args(['audit' 'fileA' '--diff'])<block_end><block_end>
<import_from_future_stmt> print_function<import_from_future_stmt> absolute_import<import_stmt>unittest<import_stmt>csv<import_from_stmt>owmeta_core.context Context<import_from_stmt>owmeta_core.command OWM<import_from_stmt>owmeta_core.bundle Bundle<import_from_stmt>owmeta.worm Worm<import_from_stmt>owmeta.cell Cell<import_from_stmt>owmeta.neuron Neuron<import_from_stmt>owmeta.connection Connection<import_stmt>rdflib<as>R<import_stmt>pytest<line_sep>@[email protected]_bundle<class_stmt>DataIntegrityTest(unittest.TestCase)<block_start>""" Integration tests that read from the database and ensure that basic queries have expected answers, as a way to keep data quality high. """<line_sep>@classmethod<def_stmt>setUpClass cls# grab the list of the names of the 302 neurons <block_start>csvfile=open('tests/neurons.csv' 'r')<line_sep>reader=csv.reader(csvfile delimiter=';' quotechar='|')<line_sep># array that holds the names of the 302 neurons at class-level scope cls.neurons=[]<for_stmt>row reader<block_start><if_stmt>len(row[0])<g>0# Only saves valid neuron names <block_start>cls.neurons.append(row[0])<block_end><block_end><block_end><def_stmt>setUp self<block_start>self.bnd=Bundle('openworm/owmeta-data')<line_sep>self.bnd.initdb()<line_sep>self.conn=self.bnd.connection<line_sep>self.conf=self.conn.conf<line_sep>self.g=self.conf["rdf.graph"]<line_sep>self.context=self.conn(Context)(ident="http://openworm.org/data")<line_sep>self.qctx=self.context.stored<block_end><def_stmt>tearDown self<block_start>self.conn.disconnect()<block_end><def_stmt>test_correct_neuron_number self<block_start>""" This test verifies that the worm model has exactly 302 neurons. """<line_sep># FIXME: Test execution is not properly isolated -- it fails if # test_compare_to_xls fails. Other conditions may cause # it to pass net=self.qctx(Worm).query().get_neuron_network()<line_sep>self.assertEqual(302 net.neuron.count())<block_end><def_stmt>test_correct_muscle_number self<block_start>""" This test verifies that the worm model has exactly 158 muscles. 95 body wall muscles, 37 Pharynx muscles, 26 other muscles See counts on row 3 here: https://docs.google.com/spreadsheets/d/1NDx9LRF_B2phR5w4HlEtxJzxx1ZIPT2gA0ZmNmozjos/edit#gid=1 """<line_sep>self.assertEqual(158 self.qctx(Worm).query().muscle.count())<block_end><def_stmt>test_INS_26_neuropeptide_neuron_list self<block_start>""" This test verifies that the set of neurons which contain the neuropeptide INS-26 is correct (the list is given below). """<line_sep>neuronlist=self.qctx(Neuron)()<line_sep>neuronlist.neuropeptide("INS-26")<line_sep>thlist=set(x.name()<for>x neuronlist.load())<line_sep>self.assertEqual({'ASEL' 'ASER' 'ASIL' 'ASIR'} thlist)<block_end><def_stmt>test_bentley_expr_data self<block_start>""" This verifies that the data in Bentley et. al (2016) receptor expression has been incorporated, by checking that one of the novel receptor expression patterns is in the worm. """<line_sep>va9=self.qctx(Neuron).query('VA9')<line_sep>self.assertIn('LGC-53' va9.receptors())<block_end><def_stmt>test_unique_neuron_node self<block_start>""" There should one and only one unique RDF node for every neuron. If more than one is present for a given cell name, then our data is inconsistent. If there is not at least one present, then we are missing neurons. """<line_sep>results={}<for_stmt>n self.neurons# Create a SPARQL query per neuron that looks for all RDF nodes # that have text matching the name of the neuron <block_start>qres=self.g.query(f""" SELECT distinct ?n WHERE {{ ?n <{Cell.name.link}> {R.Literal(n).n3()} }} LIMIT 5 """)<line_sep>results[n]=(len(qres) [x[0]<for>x qres])<block_end># If there is not only one result back, then there is more than one RDF # node. more_than_one=[(x results[x])<for>x results<if>results[x][0]<g>1]<line_sep>less_than_one=[(x results[x])<for>x results<if>results[x][0]<l>1]<line_sep>self.assertEqual(0 len(more_than_one) "Some neurons have more than 1 node: "+"\n".join(str(x)<for>x more_than_one))<line_sep>self.assertEqual(0 len(less_than_one) "Some neurons have no node: "+"\n".join(str(x)<for>x less_than_one))<block_end><def_stmt>test_neurons_have_types self<block_start>""" Every Neuron should have a non-blank type """<line_sep>results=set()<for_stmt>n self.neurons<block_start>s=f'''SELECT ?v WHERE {{ ?k <{Cell.name.link}> {R.Literal(n).n3()} . ?k <http://www.w3.org/1999/02/22-rdf-syntax-ns#type> <{Neuron.rdf_type}> . ?k <{Neuron.type.link}> ?v . }}'''<line_sep>qres=self.g.query(s)<for_stmt>x qres<block_start>v=x[0]<if_stmt>isinstance(v R.Literal)<block_start>results.add(n)<block_end><block_end><block_end>self.assertEqual(len(results) len(self.neurons) "Some neurons are missing a type: {}".format(set(self.neurons)-results))<block_end><def_stmt>test_neuron_GJ_degree self<block_start>""" Get the number of gap junctions from a representation """<line_sep># was 81 -- now retunring 44 -- are we sure this is correct? self.assertEqual(self.qctx(Neuron).query(name='AVAL').GJ_degree() 44)<block_end><def_stmt>test_neuron_Syn_degree self<block_start>""" Get the number of chemical synapses from a representation """<line_sep># was 187 -- now returning 105 -- are we sure this is correct? self.assertEqual(self.qctx(Neuron).query(name='AVAL').Syn_degree() 105)<block_end>@unittest.skip("have not yet defined asserts")<def_stmt>test_what_nodes_get_type_info self<block_start>qres=self.g.query("""SELECT ?o ?p ?s WHERE {{ ?o <http://openworm.org/entities/SimpleProperty/value> "motor". ?o ?p ?s # for that type ?o, get its value ?v }} LIMIT 10 """)<for_stmt>row qres<block_start>print(row)<block_end><block_end><def_stmt>test_all_cells_have_wormbaseID self<block_start>""" This test verifies that every cell has a Wormbase ID. """<line_sep>cells=set(self.qctx(Cell)().load())<for_stmt>cell cells<block_start><assert_stmt>cell.wormbaseID()<is><not><none><block_end><block_end><def_stmt>test_all_neurons_have_wormbaseID self<block_start>""" This test verifies that every neuron has a Wormbase ID. """<line_sep>net=self.qctx(Worm).query().get_neuron_network()<for_stmt>neuron_object net.neurons()<block_start><assert_stmt>neuron_object.wormbaseID()<is><not><none><block_end><block_end><def_stmt>test_all_muscles_have_wormbaseID self<block_start>""" This test verifies that every muscle has a Wormbase ID. """<line_sep>muscles=self.qctx(Worm).query().muscles()<for_stmt>muscle_object muscles<block_start><assert_stmt>muscle_object.wormbaseID()<is><not><none><block_end><block_end><def_stmt>test_all_neurons_are_cells self<block_start>""" This test verifies that all Neuron objects are also Cell objects. """<line_sep>net=self.qctx(Worm).query().get_neuron_network()<for_stmt>neuron_object net.neurons()<block_start>self.assertIsInstance(neuron_object Cell)<block_end><block_end><def_stmt>test_all_muscles_are_cells self<block_start>""" This test verifies that all Muscle objects are also Cell objects. """<line_sep>muscles=self.qctx(Worm).query().muscles()<for_stmt>muscle_object muscles<block_start>self.assertIsInstance(muscle_object Cell)<block_end><block_end><def_stmt>test_correct_connections_number self<block_start>""" This test verifies that there are exactly 7319 connections. """<line_sep>net=self.qctx(Worm).query().get_neuron_network()<line_sep># XXX: The synapses contain some cells that aren't neurons self.assertEqual(7319 net.synapses.count())<block_end><def_stmt>test_number_neuron_to_neuron self<block_start>""" This test verifies that the worm model has exactly 5805 neuron to neuron connections. """<line_sep>synapse=self.qctx(Connection)()<line_sep>synapse.termination('neuron')<line_sep>self.qctx(Worm).query().get_neuron_network().synapse(synapse)<line_sep>self.assertEqual(5805 synapse.count())<block_end><def_stmt>test_number_neuron_to_muscle self<block_start>""" This test verifies that the worm model has exactly 1111 neuron to muscle connections. """<line_sep>synapse=self.qctx(Connection)()<line_sep>synapse.termination('muscle')<line_sep>self.qctx(Worm).query().get_neuron_network().synapse(synapse)<line_sep>self.assertEqual(1111 synapse.count())<block_end><def_stmt>test_correct_number_unique_neurons self<block_start>""" This test verifies that the worm model has exactly 300 unique neurons making connections. """<line_sep>synapse=self.qctx(Connection)()<line_sep>pre=self.qctx(Neuron)()<line_sep>synapse.pre_cell(pre)<line_sep>self.qctx(Worm).query().get_neuron_network().synapse(synapse)<line_sep>self.assertEqual(300 pre.count())<block_end><def_stmt>test_unconnected_neurons self<block_start>""" This test verifies that there are exactly 2 unconnected neurons, i.e., CANL and CANR, in the new connectome. """<line_sep># In previous tests, there is a check for exactly 302 neurons in total. # There is also a test for exactly 300 unique neurons making connections. # That means it should be enough to check that the set {CANL, CANR} and # the set of neurons making connections are disjoint. neuron=self.qctx(Neuron)()<line_sep>synapse=self.qctx(Connection)()<line_sep>synapse.pre_cell(neuron)<line_sep>self.qctx(Worm).query().get_neuron_network().synapse(synapse)<line_sep>connected_neurons=set()<line_sep>unconnected_neurons={'CANL' 'CANR'}<for_stmt>name neuron.name.get()<block_start>connected_neurons.add(name)<block_end>self.assertTrue(connected_neurons.isdisjoint(unconnected_neurons))<block_end><def_stmt>test_neuron_lineage_names self<block_start>""" Neurons should have lineage names in the bundle """<line_sep>neuron=self.qctx(Neuron)()<line_sep>self.qctx(Worm).query().get_neuron_network().neuron(neuron)<for_stmt>n neuron.load()<block_start><assert_stmt>set(n.lineageName.get())<block_end><block_end><block_end>
"""Create a new document."""<line_sep># Import local modules <import_from_stmt>photoshop Session<with_stmt>Session()<as>ps<block_start>ps.app.preferences.rulerUnits=ps.Units.Pixels<line_sep>ps.app.documents.add(1920 1080 name="my_new_document")<block_end>
<import_stmt>os<import_stmt>numpy<as>np<import_from_stmt>sklearn.datasets load_iris<import_from_stmt>sklearn.ensemble RandomForestClassifier<import_stmt>shap<import_stmt>mlflow<import_from_stmt>utils to_pandas_Xy<line_sep># prepare training data X,y=to_pandas_Xy(load_iris())<line_sep># train a model model=RandomForestClassifier()<line_sep>model.fit(X y)<line_sep># log an explanation <with_stmt>mlflow.start_run()<as>run<block_start>mlflow.shap.log_explanation(model.predict_proba X)<block_end># list artifacts client=mlflow.tracking.MlflowClient()<line_sep>artifact_path="model_explanations_shap"<line_sep>artifacts=[x.path<for>x client.list_artifacts(run.info.run_id artifact_path)]<line_sep>print("# artifacts:")<line_sep>print(artifacts)<line_sep># load back the logged explanation dst_path=client.download_artifacts(run.info.run_id artifact_path)<line_sep>base_values=np.load(os.path.join(dst_path "base_values.npy"))<line_sep>shap_values=np.load(os.path.join(dst_path "shap_values.npy"))<line_sep># show a force plot shap.force_plot(base_values[0] shap_values[0 0 :] X.iloc[0 :] matplotlib=<true>)<line_sep>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("GEN")<line_sep># this will run plig-in energy-flat random particle gun # and puts particles (HepMCPRoduct) into edm::Event process.load("SimGeneral.HepPDTESSource.pdt_cfi")<line_sep>process.RandomNumberGeneratorService=cms.Service("RandomNumberGeneratorService" moduleSeeds=cms.PSet(generator=cms.untracked.uint32(456789)) sourceSeed=cms.untracked.uint32(54321))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(2000))<line_sep>process.source=cms.Source("EmptySource")<line_sep>process.generator=cms.EDProducer("FlatRandomEGunProducer" PGunParameters=cms.PSet(PartID=cms.vint32(211) MinEta=cms.double(3.5765) MaxEta=cms.double(3.5765) MinPhi=cms.double(0.6109) MaxPhi=cms.double(0.6109) MinE=cms.double(100.0) MaxE=cms.double(100.0)) AddAntiParticle=cms.bool(<false>) psethack=cms.string('single pion 100GeV on fwd hcal') Verbosity=cms.untracked.int32(0) ## for printouts, set it to 1 (or greater) firstRun=cms.untracked.uint32(1))<line_sep>process.GEN=cms.OutputModule("PoolOutputModule" fileName=cms.untracked.string('mc_pi+100_etaphi344.root'))<line_sep>process.p1=cms.Path(process.generator)<line_sep>process.p2=cms.EndPath(process.GEN)<line_sep>
<import_from_stmt>base DatadogBaseAction<import_from_stmt>datadog api<class_stmt>DatadogCreateComment(DatadogBaseAction)<block_start><def_stmt>_run self **kwargs<block_start><return>api.Comment.create(**kwargs)<block_end><block_end><class_stmt>DatadogDeleteComment(DatadogBaseAction)<block_start><def_stmt>_run self **kwargs<block_start><return>api.Comment.delete(kwargs.pop("comment_id"))<block_end><block_end><class_stmt>DatadogEditComment(DatadogBaseAction)<block_start><def_stmt>_run self **kwargs<block_start><return>api.Comment.update(kwargs.pop("comment_id") **kwargs)<block_end><block_end>
"""Python Cookbook Chapter 9, recipe 10. """<import_stmt>logging<import_stmt>sys<import_from_stmt>logging Formatter<import_from_stmt>pathlib Path<def_stmt>create_log <block_start>PROD_LOG_FORMAT=('[{asctime}]'<concat>' {levelname} in {module}: {message}')<with_stmt>Path('sample.log').open('w')<as>sample_log_file<block_start>logging.basicConfig(stream=sample_log_file level=logging.DEBUG)<line_sep>logger=logging.getLogger()<for_stmt>handler logger.handlers<block_start>handler.setFormatter(Formatter(PROD_LOG_FORMAT style='{'))<block_end>logger.info("Sample Message One")<line_sep>logger.debug("Debugging")<line_sep>logger.warn("Something might have gone wrong")<block_end><block_end><import_stmt>re<import_from_stmt>pathlib Path<import_stmt>csv<line_sep>log_pattern=re.compile(r"\[(?P<timestamp>.*?)\]"<concat>r"\s(?P<levelname>\w+)"<concat>r"\sin\s(?P<module>[\w\._]+):"<concat>r"\s(?P<message>.*)")<def_stmt>extract_row_iter source_log_file<block_start><for_stmt>line source_log_file<block_start>match=log_pattern.match(line)<if_stmt>match<is><none><block_start><continue><block_end><yield>match.groupdict()<block_end><block_end><def_stmt>parse_log <block_start>summary_path=Path('summary_log.csv')<with_stmt>summary_path.open('w')<as>summary_file<block_start>writer=csv.DictWriter(summary_file ['timestamp' 'levelname' 'module' 'message'])<line_sep>writer.writeheader()<line_sep>source_log_dir=Path('.')<for_stmt>source_log_path source_log_dir.glob('*.log')<block_start><with_stmt>source_log_path.open()<as>source_log_file<block_start>writer.writerows(extract_row_iter(source_log_file))<block_end>print('Converted' source_log_path 'to' summary_path)<block_end><block_end><block_end><def_stmt>counting_extract_row_iter counts source_log_file<block_start><for_stmt>line source_log_file<block_start>match=log_pattern.match(line)<if_stmt>match<is><none><block_start>counts['non-match']<augadd>1<line_sep><continue><block_end>counts['valid']<augadd>1<line_sep><yield>match.groupdict()<block_end><block_end><import_from_stmt>collections Counter<def_stmt>parse_log2 <block_start>summary_path=Path('summary_log.csv')<with_stmt>summary_path.open('w')<as>summary_file<block_start>writer=csv.DictWriter(summary_file ['timestamp' 'levelname' 'module' 'message'])<line_sep>writer.writeheader()<line_sep>source_log_dir=Path('.')<for_stmt>source_log_path source_log_dir.glob('*.log')<block_start>counts=Counter()<with_stmt>source_log_path.open()<as>source_log_file<block_start>writer.writerows(counting_extract_row_iter(counts source_log_file))<block_end>print('Converted' source_log_path 'to' summary_path)<line_sep>print(counts)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>create_log()<line_sep>parse_log2()<block_end>
#----------------------------------------------------------------------------- # Copyright (c) 2012 - 2018, Anaconda, Inc. and Intake contributors # All rights reserved. # # The full license is in the LICENSE file, distributed with this software. #----------------------------------------------------------------------------- <import_stmt>importlib<import_stmt>re<import_stmt>logging<import_stmt>os<import_stmt>warnings<import_from_stmt>._version get_versions<line_sep>__version__=get_versions()['version']<del_stmt>get_versions<import_from_stmt>.source registry<import_from_stmt>.catalog.base Catalog<line_sep>imports={"DataSource":"intake.source.base:DataSource" 'Schema':"intake.source.base:Schema" "load_combo_catalog":"intake.catalog.default:load_combo_catalog" "upload":"intake.container:upload" "gui":"intake.interface:instance" "cat":"intake.catalog:builtin" "output_notebook":"intake.interface:output_notebook" "register_driver":"intake.source:register_driver" "unregister_driver":"intake.source:unregister_driver" }<line_sep>openers=set()<line_sep>logger=logging.getLogger('intake')<def_stmt>__getattr__ attr<block_start>"""Lazy attribute propagator Defers inputs of functions until they are needed, according to the contents of the ``imports`` (submodules and classes) and ``openers`` (functions which instantiate data sources directly) dicts. All keys in ``openers`` must start with "open_", else they will be ignored. """<line_sep>gl=globals()<if_stmt>attr<in>openers<and>attr[:5]<eq>"open_"<block_start>driver=registry[attr[5:]]# "open_..." gl[attr]=driver<block_end><else_stmt><block_start><if_stmt>attr<in>gl<block_start><return>gl[attr]<block_end><elif_stmt>attr<in>imports<block_start>dest=imports[attr]<line_sep>modname=dest.split(":" 1)[0]<line_sep>logger.debug("Importing: %s"%modname)<line_sep>mod=importlib.import_module(modname)<if_stmt>":"<in>dest<block_start>gl[attr]=getattr(mod dest.split(":")[1])<block_end><else_stmt><block_start>gl[attr]=mod<block_end><block_end><block_end><if_stmt>attr<eq>"__all__"<block_start><return>__dir__()<block_end><try_stmt><block_start><return>gl[attr]<block_end><except_stmt>KeyError<block_start><raise>AttributeError(attr)<block_end><block_end><def_stmt>__dir__ *_ **__<block_start><return>sorted(list(globals())+list(openers)+list(imports))<block_end><def_stmt>make_open_functions <block_start>"""From the current state of ``registry``, create open_* functions"""<import_from_stmt>.source.discovery drivers<for_stmt>name drivers.enabled_plugins()<block_start>func_name='open_'+name<if_stmt><not>func_name.isidentifier()# primitive name normalization <block_start>func_name=re.sub('[-=~^&|@+]' '_' func_name)<block_end><if_stmt>func_name.isidentifier()# stash name for dir() and later fetch <block_start>openers.add(func_name)<block_end><else_stmt><block_start>warnings.warn('Invalid Intake plugin name "%s" found.' name stacklevel=2)<block_end><block_end><block_end>make_open_functions()<def_stmt>open_catalog uri=<none> **kwargs<block_start>"""Create a Catalog object Can load YAML catalog files, connect to an intake server, or create any arbitrary Catalog subclass instance. In the general case, the user should supply ``driver=`` with a value from the plugins registry which has a container type of catalog. File locations can generally be remote, if specifying a URL protocol. The default behaviour if not specifying the driver is as follows: - if ``uri`` is a a single string ending in "yml" or "yaml", open it as a catalog file - if ``uri`` is a list of strings, a string containing a glob character ("*") or a string not ending in "y(a)ml", open as a set of catalog files. In the latter case, assume it is a directory. - if ``uri`` beings with protocol ``"intake:"``, connect to a remote Intake server - if ``uri`` is ``None`` or missing, create a base Catalog object without entries. Parameters ---------- uri: str or pathlib.Path Designator for the location of the catalog. kwargs: passed to subclass instance, see documentation of the individual catalog classes. For example, ``yaml_files_cat`` (when specifying multiple uris or a glob string) takes the additional parameter ``flatten=True|False``, specifying whether all data sources are merged in a single namespace, or each file becomes a sub-catalog. See also -------- intake.open_yaml_files_cat, intake.open_yaml_file_cat, intake.open_intake_remote """<line_sep>driver=kwargs.pop('driver' <none>)<if_stmt>isinstance(uri os.PathLike)<block_start>uri=os.fspath(uri)<block_end><if_stmt>driver<is><none><block_start><if_stmt>uri<block_start><if_stmt>((isinstance(uri str)<and>"*"<in>uri)<or>((isinstance(uri (list tuple)))<and>len(uri)<g>1))# glob string or list of files/globs <block_start>driver='yaml_files_cat'<block_end><elif_stmt>isinstance(uri (list tuple))<and>len(uri)<eq>1<block_start>uri=uri[0]<if_stmt>"*"<in>uri[0]# single glob string in a list <block_start>driver='yaml_files_cat'<block_end><else_stmt># single filename in a list <block_start>driver='yaml_file_cat'<block_end><block_end><elif_stmt>isinstance(uri str)# single URL <block_start><if_stmt>uri.startswith('intake:')# server <block_start>driver='intake_remote'<block_end><else_stmt><block_start><if_stmt>uri.endswith(('.yml' '.yaml'))<block_start>driver='yaml_file_cat'<block_end><else_stmt><block_start>uri=uri.rstrip('/')+'/*.y*ml'<line_sep>driver='yaml_files_cat'<block_end><block_end><block_end><else_stmt><block_start><raise>ValueError("URI not understood: %s"%uri)<block_end><block_end><else_stmt># empty cat <block_start>driver='catalog'<block_end><block_end><if_stmt>'_file'<not><in>driver<block_start>kwargs.pop('fs' <none>)<block_end><if_stmt>driver<not><in>registry<block_start><raise>ValueError('Unknown catalog driver (%s), supply one of: %s'%(driver list(sorted(registry))))<block_end><return>registry[driver](uri **kwargs)<block_end>
# --coding:utf-8-- # # Copyright (c) 2020 vesoft inc. All rights reserved. # # This source code is licensed under Apache 2.0 License. <import_stmt>time<import_from_stmt>tests.common.nebula_test_suite NebulaTestSuite<class_stmt>TestBigInt(NebulaTestSuite)<block_start>@classmethod<def_stmt>prepare self<block_start>resp=self.execute('CREATE SPACE IF NOT EXISTS BigInt2031(partition_num={partition_num}, replica_factor={replica_factor})'.format(partition_num=self.partition_num replica_factor=self.replica_factor))<line_sep>self.check_resp_succeeded(resp)<line_sep>time.sleep(self.delay)<line_sep>resp=self.execute('USE BigInt2031')<line_sep>self.check_resp_succeeded(resp)<block_end><def_stmt>test_issue2031 self<block_start>time.sleep(self.delay)<line_sep>resp=self.execute('CREATE TAG person1(name string, age bigint)')<line_sep>self.check_resp_failed(resp)<line_sep>resp=self.execute('CREATE TAG person2(name string, age bigint DEFAULT 100)')<line_sep>self.check_resp_failed(resp)<line_sep>resp=self.execute('CREATE TAG person3(name string, age Bigint)')<line_sep>self.check_resp_failed(resp)<line_sep>resp=self.execute('CREATE TAG person4(name string, age BIGINT)')<line_sep>self.check_resp_failed(resp)<block_end>@classmethod<def_stmt>cleanup self<block_start>resp=self.execute('drop space BigInt2031')<line_sep>self.check_resp_succeeded(resp)<block_end><block_end>
<import_stmt>numpy<import_stmt>scipy.signal<import_from_stmt>generate *<def_stmt>generate <block_start><def_stmt>gentaps n<block_start>b,a=scipy.signal.butter(n-1 0.5)<line_sep><return>b.astype(numpy.float32) a.astype(numpy.float32)<block_end><def_stmt>process b_taps a_taps x<block_start><return>[scipy.signal.lfilter(b_taps a_taps x).astype(type(x[0]))]<block_end>vectors=[]<line_sep>x=random_complex64(256)<line_sep>b_taps,a_taps=gentaps(3)<line_sep>vectors.append(TestVector([b_taps a_taps] [x] process(b_taps a_taps x) "3 Float32 b taps, 3 Float32 a taps, 256 ComplexFloat32 input, 256 ComplexFloat32 output"))<line_sep>b_taps,a_taps=gentaps(5)<line_sep>vectors.append(TestVector([b_taps a_taps] [x] process(b_taps a_taps x) "5 Float32 b taps, 5 Float32 a taps, 256 ComplexFloat32 input, 256 ComplexFloat32 output"))<line_sep>b_taps,a_taps=gentaps(10)<line_sep>vectors.append(TestVector([b_taps a_taps] [x] process(b_taps a_taps x) "10 Float32 b taps, 10 Float32 a taps, 256 ComplexFloat32 input, 256 ComplexFloat32 output"))<line_sep>x=random_float32(256)<line_sep>b_taps,a_taps=gentaps(3)<line_sep>vectors.append(TestVector([b_taps a_taps] [x] process(b_taps a_taps x) "3 Float32 b taps, 3 Float32 a taps, 256 Float32 input, 256 Float32 output"))<line_sep>b_taps,a_taps=gentaps(5)<line_sep>vectors.append(TestVector([b_taps a_taps] [x] process(b_taps a_taps x) "5 Float32 b taps, 5 Float32 a taps, 256 Float32 input, 256 Float32 output"))<line_sep>b_taps,a_taps=gentaps(10)<line_sep>vectors.append(TestVector([b_taps a_taps] [x] process(b_taps a_taps x) "10 Float32 b taps, 10 Float32 a taps, 256 Float32 input, 256 Float32 output"))<line_sep><return>BlockSpec("IIRFilterBlock" vectors 1e-6)<block_end>
faces=fetch_olivetti_faces()<line_sep># set up the figure fig=plt.figure(figsize=(6 6))# figure size in inches fig.subplots_adjust(left=0 right=1 bottom=0 top=1 hspace=0.05 wspace=0.05)<line_sep># plot the faces: <for_stmt>i range(64)<block_start>ax=fig.add_subplot(8 8 i+1 xticks=[] yticks=[])<line_sep>ax.imshow(faces.images[i] cmap=plt.cm.bone interpolation='nearest')<block_end>
""" Adds page content format. """<import_stmt>datetime<import_stmt>logging<import_from_stmt>sqlalchemy Column MetaData<import_from_stmt>galaxy.model.custom_types TrimmedString<import_from_stmt>galaxy.model.migrate.versions.util add_column drop_column<line_sep>now=datetime.datetime.utcnow<line_sep>log=logging.getLogger(__name__)<line_sep>metadata=MetaData()<def_stmt>upgrade migrate_engine<block_start>metadata.bind=migrate_engine<line_sep>print(__doc__)<line_sep>metadata.reflect()<line_sep>content_format_column=Column('content_format' TrimmedString(32) default='html' server_default="html" nullable=<false>)<line_sep>add_column(content_format_column 'page_revision' metadata)<block_end><def_stmt>downgrade migrate_engine<block_start>metadata.bind=migrate_engine<line_sep>metadata.reflect()<line_sep>drop_column('content_format' 'page_revision' metadata)<block_end>
# Copyright (c) 2021 zfit <import_stmt>tensorflow<as>tf<import_from_stmt>.core.constraint GaussianConstraint PoissonConstraint SimpleConstraint LogNormalConstraint <import_from_stmt>.util ztyping<line_sep>__all__=["nll_gaussian" "SimpleConstraint" "GaussianConstraint" "PoissonConstraint" "LogNormalConstraint"]<def_stmt>nll_gaussian params:ztyping.ParamTypeInput observation:ztyping.NumericalScalarType uncertainty:ztyping.NumericalScalarType<arrow>tf.Tensor<block_start>"""Return negative log likelihood graph for gaussian constraints on a list of parameters. Args: params: The parameters to constraint. observation: observed values of the parameter. uncertainty: Uncertainties or covariance/error. matrix of the observed values. Can either be a single value, a list of values, an array or a tensor. Returns: The constraint object. Raises: ShapeIncompatibleError: if params, mu and sigma don't have the same size. """<line_sep><return>GaussianConstraint(params=params observation=observation uncertainty=uncertainty)<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # # http://www.apache.org/licenses/LICENSE-2.0 # # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. <import_from_stmt>aliyunsdkcore.request RpcRequest<import_from_stmt>aliyunsdkeipanycast.endpoint endpoint_data<class_stmt>AllocateAnycastEipAddressRequest(RpcRequest)<block_start><def_stmt>__init__ self<block_start>RpcRequest.__init__(self 'Eipanycast' '2020-03-09' 'AllocateAnycastEipAddress' 'eipanycast')<line_sep>self.set_method('POST')<if_stmt>hasattr(self "endpoint_map")<block_start>setattr(self "endpoint_map" endpoint_data.getEndpointMap())<block_end><if_stmt>hasattr(self "endpoint_regional")<block_start>setattr(self "endpoint_regional" endpoint_data.getEndpointRegional())<block_end><block_end><def_stmt>get_Bandwidth self<block_start><return>self.get_query_params().get('Bandwidth')<block_end><def_stmt>set_Bandwidth self Bandwidth<block_start>self.add_query_param('Bandwidth' Bandwidth)<block_end><def_stmt>get_ServiceLocation self<block_start><return>self.get_query_params().get('ServiceLocation')<block_end><def_stmt>set_ServiceLocation self ServiceLocation<block_start>self.add_query_param('ServiceLocation' ServiceLocation)<block_end><def_stmt>get_ClientToken self<block_start><return>self.get_query_params().get('ClientToken')<block_end><def_stmt>set_ClientToken self ClientToken<block_start>self.add_query_param('ClientToken' ClientToken)<block_end><def_stmt>get_Description self<block_start><return>self.get_query_params().get('Description')<block_end><def_stmt>set_Description self Description<block_start>self.add_query_param('Description' Description)<block_end><def_stmt>get_InternetChargeType self<block_start><return>self.get_query_params().get('InternetChargeType')<block_end><def_stmt>set_InternetChargeType self InternetChargeType<block_start>self.add_query_param('InternetChargeType' InternetChargeType)<block_end><def_stmt>get_Name self<block_start><return>self.get_query_params().get('Name')<block_end><def_stmt>set_Name self Name<block_start>self.add_query_param('Name' Name)<block_end><def_stmt>get_InstanceChargeType self<block_start><return>self.get_query_params().get('InstanceChargeType')<block_end><def_stmt>set_InstanceChargeType self InstanceChargeType<block_start>self.add_query_param('InstanceChargeType' InstanceChargeType)<block_end><block_end>
# This file is part of Scapy # See http://www.secdev.org/projects/scapy for more information # Copyright (C) <NAME> <<EMAIL>> # Copyright (C) <NAME> <<EMAIL>> # This program is published under a GPLv2 license """ Native Microsoft Windows sockets (L3 only) ## Notice: ICMP packets DISCLAIMER: Please use Npcap/Winpcap to send/receive ICMP. It is going to work. Below is some additional information, mainly implemented in a testing purpose. When in native mode, everything goes through the Windows kernel. This firstly requires that the Firewall is open. Be sure it allows ICMPv4/6 packets in and out. Windows may drop packets that it finds wrong. for instance, answers to ICMP packets with id=0 or seq=0 may be dropped. It means that sent packets should (most of the time) be perfectly built. A perfectly built ICMP req packet on Windows means that its id is 1, its checksum (IP and ICMP) are correctly built, but also that its seq number is in the "allowed range". In fact, every time an ICMP packet is sent on Windows, a global sequence number is increased, which is only reset at boot time. The seq number of the received ICMP packet must be in the range [current, current + 3] to be valid, and received by the socket. The current number is quite hard to get, thus we provide in this module the get_actual_icmp_seq() function. Example: >>> conf.use_pcap = False >>> a = conf.L3socket() # This will (most likely) work: >>> current = get_current_icmp_seq() >>> a.sr(IP(dst="www.google.com", ttl=128)/ICMP(id=1, seq=current)) # This won't: >>> a.sr(IP(dst="www.google.com", ttl=128)/ICMP()) PS: on computers where the firewall isn't open, Windows temporarily opens it when using the `ping` util from cmd.exe. One can first call a ping on cmd, then do custom calls through the socket using get_current_icmp_seq(). See the tests (windows.uts) for an example. """<import_stmt>io<import_stmt>os<import_stmt>socket<import_stmt>subprocess<import_stmt>time<import_from_stmt>scapy.automaton SelectableObject<import_from_stmt>scapy.arch.common _select_nonblock<import_from_stmt>scapy.arch.windows.structures GetIcmpStatistics<import_from_stmt>scapy.compat raw<import_from_stmt>scapy.config conf<import_from_stmt>scapy.data MTU<import_from_stmt>scapy.error Scapy_Exception warning<import_from_stmt>scapy.supersocket SuperSocket<line_sep># Watch out for import loops (inet...) <class_stmt>L3WinSocket(SuperSocket SelectableObject)<block_start>desc="a native Layer 3 (IPv4) raw socket under Windows"<line_sep>nonblocking_socket=<true><line_sep>__slots__=["promisc" "cls" "ipv6" "proto"]<def_stmt>__init__ self iface=<none> proto=socket.IPPROTO_IP ttl=128 ipv6=<false> promisc=<true> **kwargs<block_start><import_from_stmt>scapy.layers.inet IP<import_from_stmt>scapy.layers.inet6 IPv6<for_stmt>kwarg kwargs<block_start>warning("Dropping unsupported option: %s"%kwarg)<block_end>af=socket.AF_INET6<if>ipv6<else>socket.AF_INET<line_sep>self.proto=proto<if_stmt>ipv6<block_start><import_from_stmt>scapy.arch get_if_addr6<line_sep>self.host_ip6=get_if_addr6(conf.iface)<or>"::1"<if_stmt>proto<eq>socket.IPPROTO_IP# We'll restrict ourselves to UDP, as TCP isn't bindable # on AF_INET6 <block_start>self.proto=socket.IPPROTO_UDP<block_end><block_end># On Windows, with promisc=False, you won't get much self.ipv6=ipv6<line_sep>self.cls=IPv6<if>ipv6<else>IP<line_sep>self.promisc=promisc<line_sep># Notes: # - IPPROTO_RAW only works to send packets. # - IPPROTO_IPV6 exists in MSDN docs, but using it will result in # no packets being received. Same for its options (IPV6_HDRINCL...) # However, using IPPROTO_IP with AF_INET6 will still receive # the IPv6 packets <try_stmt><block_start>self.ins=socket.socket(af socket.SOCK_RAW self.proto)<line_sep>self.outs=socket.socket(af socket.SOCK_RAW socket.IPPROTO_RAW)<block_end><except_stmt>OSError<as>e<block_start><if_stmt>e.errno<eq>10013<block_start><raise>OSError("Windows native L3 Raw sockets are only "<concat>"usable as administrator ! "<concat>"Install Winpcap/Npcap to workaround !")<block_end><raise><block_end>self.ins.setsockopt(socket.SOL_SOCKET socket.SO_REUSEADDR 1)<line_sep>self.outs.setsockopt(socket.SOL_SOCKET socket.SO_REUSEADDR 1)<line_sep>self.ins.setsockopt(socket.SOL_SOCKET socket.SO_RCVBUF 2<power>30)<line_sep>self.outs.setsockopt(socket.SOL_SOCKET socket.SO_SNDBUF 2<power>30)<line_sep># IOCTL Include IP headers self.ins.setsockopt(socket.IPPROTO_IP socket.IP_HDRINCL 1)<line_sep>self.outs.setsockopt(socket.IPPROTO_IP socket.IP_HDRINCL 1)<line_sep># set TTL self.ins.setsockopt(socket.IPPROTO_IP socket.IP_TTL ttl)<line_sep>self.outs.setsockopt(socket.IPPROTO_IP socket.IP_TTL ttl)<line_sep># Bind on all ports iface=iface<or>conf.iface<line_sep>host=iface.ip<if>iface.ip<else>socket.gethostname()<line_sep>self.ins.bind((host 0))<line_sep>self.ins.setblocking(<false>)<line_sep># Get as much data as possible: reduce what is cropped <if_stmt>ipv6<block_start><try_stmt># Not all Windows versions <block_start>self.ins.setsockopt(socket.IPPROTO_IPV6 socket.IPV6_RECVTCLASS 1)<line_sep>self.ins.setsockopt(socket.IPPROTO_IPV6 socket.IPV6_HOPLIMIT 1)<block_end><except_stmt>(OSError socket.error)<block_start><pass><block_end><block_end><else_stmt><block_start><try_stmt># Not Windows XP <block_start>self.ins.setsockopt(socket.IPPROTO_IP socket.IP_RECVDSTADDR 1)<block_end><except_stmt>(OSError socket.error)<block_start><pass><block_end><try_stmt># Windows 10+ recent builds only <block_start>self.ins.setsockopt(socket.IPPROTO_IP socket.IP_RECVTTL 1)<block_end><except_stmt>(OSError socket.error)<block_start><pass><block_end><block_end><if_stmt>promisc# IOCTL Receive all packets <block_start>self.ins.ioctl(socket.SIO_RCVALL socket.RCVALL_ON)<block_end><block_end><def_stmt>send self x<block_start>data=raw(x)<if_stmt>self.cls<not><in>x<block_start><raise>Scapy_Exception("L3WinSocket can only send IP/IPv6 packets !"<concat>" Install Npcap/Winpcap to send more")<block_end>dst_ip=str(x[self.cls].dst)<line_sep>self.outs.sendto(data (dst_ip 0))<block_end><def_stmt>nonblock_recv self x=MTU<block_start><return>self.recv()<block_end># https://docs.microsoft.com/en-us/windows/desktop/winsock/tcp-ip-raw-sockets-2 # noqa: E501 # - For IPv4 (address family of AF_INET), an application receives the IP # header at the front of each received datagram regardless of the # IP_HDRINCL socket option. # - For IPv6 (address family of AF_INET6), an application receives # everything after the last IPv6 header in each received datagram # regardless of the IPV6_HDRINCL socket option. The application does # not receive any IPv6 headers using a raw socket. <def_stmt>recv_raw self x=MTU<block_start><try_stmt><block_start>data,address=self.ins.recvfrom(x)<block_end><except_stmt>io.BlockingIOError<block_start><return><none> <none> <none><block_end><import_from_stmt>scapy.layers.inet IP<import_from_stmt>scapy.layers.inet6 IPv6<if_stmt>self.ipv6# AF_INET6 does not return the IPv6 header. Let's build it # (host, port, flowinfo, scopeid) <block_start>host,_,flowinfo,_=address<line_sep>header=raw(IPv6(src=host dst=self.host_ip6 fl=flowinfo nh=self.proto # fixed for AF_INET6 plen=len(data)))<line_sep><return>IPv6 header+data time.time()<block_end><else_stmt><block_start><return>IP data time.time()<block_end><block_end><def_stmt>check_recv self<block_start><return><true><block_end><def_stmt>close self<block_start><if_stmt><not>self.closed<and>self.promisc<block_start>self.ins.ioctl(socket.SIO_RCVALL socket.RCVALL_OFF)<block_end>super(L3WinSocket self).close()<block_end>@staticmethod<def_stmt>select sockets remain=<none><block_start><return>_select_nonblock(sockets remain=remain)<block_end><block_end><class_stmt>L3WinSocket6(L3WinSocket)<block_start>desc="a native Layer 3 (IPv6) raw socket under Windows"<def_stmt>__init__ self **kwargs<block_start>super(L3WinSocket6 self).__init__(ipv6=<true> **kwargs)<block_end><block_end><def_stmt>open_icmp_firewall host<block_start>"""Temporarily open the ICMP firewall. Tricks Windows into allowing ICMP packets for a short period of time (~ 1 minute)"""<line_sep># We call ping with a timeout of 1ms: will return instantly <with_stmt>open(os.devnull 'wb')<as>DEVNULL<block_start><return>subprocess.Popen("ping -4 -w 1 -n 1 %s"%host shell=<true> stdout=DEVNULL stderr=DEVNULL).wait()<block_end><block_end><def_stmt>get_current_icmp_seq <block_start>"""See help(scapy.arch.windows.native) for more information. Returns the current ICMP seq number."""<line_sep><return>GetIcmpStatistics()['stats']['icmpOutStats']['dwEchos']<block_end>
<import_stmt>time<import_from_stmt>nose2.tools such<def_stmt>slow_blocking_init <block_start>print("YEAH2")<line_sep>time.sleep(1)<line_sep>print("a second elapsed")<line_sep>time.sleep(1)<line_sep>print("a second elapsed")<line_sep><return><true><block_end><class_stmt>Layer1(object)<block_start>description="Layer1 description"<line_sep>@classmethod<def_stmt>setUp cls<block_start>print("YEAH")<line_sep>it.obj=<false><block_end><block_end><class_stmt>Layer2(object)<block_start>description="Layer2 description"<line_sep>@classmethod<def_stmt>setUp cls<block_start>it.obj=slow_blocking_init()<block_end><block_end><with_stmt>such.A("system with a fast initial setup layer")<as>it<block_start>it.uses(Layer1)<line_sep>@it.should("not have obj initialized")<def_stmt>test <block_start><assert_stmt><not>it.obj<block_end><with_stmt>it.having("a second slow setup layer")<block_start>it.uses(Layer2)<line_sep>@it.should("have obj initialized")<def_stmt>test2 <block_start><assert_stmt>it.obj<block_end><block_end><block_end>it.createTests(globals())<line_sep>
<import_stmt>streamlit<as>st<class_stmt>App<block_start><def_stmt>run self<block_start>st.title("Cannot st.cache classmethod issue")<line_sep>App.get_data1()<line_sep>st.info("data1 loaded")<line_sep>self.get_data2()<line_sep>st.info("data2 loaded")<block_end>@[email protected]<def_stmt>get_data1 cls<block_start><pass><block_end>@st.cache@classmethod<def_stmt>get_data2 cls<block_start><pass><block_end><block_end>App().run()<line_sep>
"""Tests for migration_utils."""<import_from_stmt>keras.initializers GlorotUniform<as>V2GlorotUniform<import_from_stmt>keras.legacy_tf_layers migration_utils<import_stmt>tensorflow<as>tf<class_stmt>DeterministicRandomTestToolTest(tf.test.TestCase)<block_start><def_stmt>test_constant_mode_no_seed self<block_start>"""Test random tensor generation consistancy in constant mode. Verify that the random tensor generated without using the seed is consistant between graph and eager mode """<line_sep># Generate three random tensors to show how the stateful random number # generation and glorot_uniform_initializer match between sessions and # eager execution. random_tool=migration_utils.DeterministicRandomTestTool()<with_stmt>random_tool.scope()<block_start>graph=tf.Graph()<with_stmt>graph.as_default() tf.compat.v1.Session(graph=graph)<as>sess<block_start>a=tf.compat.v1.random.uniform(shape=(3 1))<line_sep># adding additional computation/ops to the graph and ensuring consistant # random number generation a=a<times>3<line_sep>b=tf.compat.v1.random.uniform(shape=(3 3))<line_sep>b=b<times>3<line_sep>c=tf.compat.v1.random.uniform(shape=(3 3))<line_sep>c=c<times>3<line_sep>d=tf.compat.v1.glorot_uniform_initializer()(shape=(6 6) dtype=tf.float32)<line_sep>graph_a,graph_b,graph_c,graph_d=sess.run([a b c d])<block_end>a=tf.compat.v2.random.uniform(shape=(3 1))<line_sep>a=a<times>3<line_sep>b=tf.compat.v2.random.uniform(shape=(3 3))<line_sep>b=b<times>3<line_sep>c=tf.compat.v2.random.uniform(shape=(3 3))<line_sep>c=c<times>3<line_sep>d=V2GlorotUniform()(shape=(6 6) dtype=tf.float32)<block_end># validate that the generated random tensors match self.assertAllClose(graph_a a)<line_sep>self.assertAllClose(graph_b b)<line_sep>self.assertAllClose(graph_c c)<line_sep>self.assertAllClose(graph_d d)<line_sep># In constant mode, because b and c were generated with the same seed within # the same scope and have the same shape, they will have exactly the same # values. # validate that b and c are the same, also graph_b and graph_c self.assertAllClose(b c)<line_sep>self.assertAllClose(graph_b graph_c)<block_end><def_stmt>test_constant_mode_seed_argument self<block_start>"""Test random tensor generation consistancy in constant mode. Verify that the random tensor generated by setting the global seeed in the args is consistant between graph and eager mode. """<line_sep>random_tool=migration_utils.DeterministicRandomTestTool()<with_stmt>random_tool.scope()<block_start>graph=tf.Graph()<with_stmt>graph.as_default() tf.compat.v1.Session(graph=graph)<as>sess# adding additional computation/ops to the graph and ensuring consistant # random number generation <block_start>a=tf.compat.v1.random.uniform(shape=(3 1) seed=1234)<line_sep>a=a<times>3<line_sep>b=tf.compat.v1.random.uniform(shape=(3 3) seed=1234)<line_sep>b=b<times>3<line_sep>c=tf.compat.v1.glorot_uniform_initializer(seed=1234)(shape=(6 6) dtype=tf.float32)<line_sep>graph_a,graph_b,graph_c=sess.run([a b c])<block_end>a=tf.compat.v2.random.uniform(shape=(3 1) seed=1234)<line_sep>a=a<times>3<line_sep>b=tf.compat.v2.random.uniform(shape=(3 3) seed=1234)<line_sep>b=b<times>3<line_sep>c=V2GlorotUniform(seed=1234)(shape=(6 6) dtype=tf.float32)<block_end># validate that the generated random tensors match self.assertAllClose(graph_a a)<line_sep>self.assertAllClose(graph_b b)<line_sep>self.assertAllClose(graph_c c)<block_end><def_stmt>test_num_rand_ops self<block_start>"""Test random tensor generation consistancy in num_random_ops mode. Verify that the random tensor generated without using the seed is consistant between graph and eager mode. Random tensor generated should be different based on random ops ordering """<line_sep>random_tool=migration_utils.DeterministicRandomTestTool(mode="num_random_ops")<with_stmt>random_tool.scope()<block_start>graph=tf.Graph()<with_stmt>graph.as_default() tf.compat.v1.Session(graph=graph)<as>sess# adding additional computation/ops to the graph and ensuring consistant # random number generation <block_start>a=tf.compat.v1.random.uniform(shape=(3 1))<line_sep>a=a<times>3<line_sep>b=tf.compat.v1.random.uniform(shape=(3 3))<line_sep>b=b<times>3<line_sep>c=tf.compat.v1.random.uniform(shape=(3 3))<line_sep>c=c<times>3<line_sep>d=tf.compat.v1.glorot_uniform_initializer()(shape=(6 6) dtype=tf.float32)<line_sep>graph_a,graph_b,graph_c,graph_d=sess.run([a b c d])<block_end><block_end>random_tool=migration_utils.DeterministicRandomTestTool(mode="num_random_ops")<with_stmt>random_tool.scope()<block_start>a=tf.compat.v2.random.uniform(shape=(3 1))<line_sep>a=a<times>3<line_sep>b=tf.compat.v2.random.uniform(shape=(3 3))<line_sep>b=b<times>3<line_sep>c=tf.compat.v2.random.uniform(shape=(3 3))<line_sep>c=c<times>3<line_sep>d=V2GlorotUniform()(shape=(6 6) dtype=tf.float32)<block_end># validate that the generated random tensors match self.assertAllClose(graph_a a)<line_sep>self.assertAllClose(graph_b b)<line_sep>self.assertAllClose(graph_c c)<line_sep>self.assertAllClose(graph_d d)<line_sep># validate that the tensors differ based on ops ordering self.assertNotAllClose(b c)<line_sep>self.assertNotAllClose(graph_b graph_c)<block_end><def_stmt>test_num_rand_ops_program_order self<block_start>"""Test random tensor generation consistancy in num_random_ops mode. validate that in this mode random number generation is sensitive to program order, so the generated random tesnors should not match. """<line_sep>random_tool=migration_utils.DeterministicRandomTestTool(mode="num_random_ops")<with_stmt>random_tool.scope()<block_start>a=tf.random.uniform(shape=(3 1))<line_sep># adding additional computation/ops to the graph and ensuring consistant # random number generation a=a<times>3<line_sep>b=tf.random.uniform(shape=(3 3))<line_sep>b=b<times>3<block_end>random_tool=migration_utils.DeterministicRandomTestTool(mode="num_random_ops")<with_stmt>random_tool.scope()<block_start>b_prime=tf.random.uniform(shape=(3 3))<line_sep># adding additional computation/ops to the graph and ensuring consistant # random number generation b_prime=b_prime<times>3<line_sep>a_prime=tf.random.uniform(shape=(3 1))<line_sep>a_prime=a_prime<times>3<block_end># validate that the tensors are different self.assertNotAllClose(a a_prime)<line_sep>self.assertNotAllClose(b b_prime)<block_end><def_stmt>test_num_rand_ops_operation_seed self<block_start>"""Test random tensor generation consistancy in num_random_ops mode. validate if random number generation match across two different program orders. """<line_sep>random_tool=migration_utils.DeterministicRandomTestTool(mode="num_random_ops")<with_stmt>random_tool.scope()# operation seed = 0 <block_start>a=tf.random.uniform(shape=(3 1))<line_sep>a=a<times>3<line_sep># operation seed = 1 b=tf.random.uniform(shape=(3 3))<line_sep>b=b<times>3<block_end>random_tool=migration_utils.DeterministicRandomTestTool(mode="num_random_ops")<with_stmt>random_tool.scope()<block_start>random_tool.operation_seed=1<line_sep>b_prime=tf.random.uniform(shape=(3 3))<line_sep>b_prime=b_prime<times>3<line_sep>random_tool.operation_seed=0<line_sep>a_prime=tf.random.uniform(shape=(3 1))<line_sep>a_prime=a_prime<times>3<block_end>self.assertAllClose(a a_prime)<line_sep>self.assertAllClose(b b_prime)<block_end><def_stmt>test_num_rand_ops_disallow_repeated_ops_seed self<block_start>"""Test random tensor generation consistancy in num_random_ops mode. validate if DeterministicRandomTestTool disallows reusing already-used operation seeds. """<line_sep>random_tool=migration_utils.DeterministicRandomTestTool(mode="num_random_ops")<with_stmt>random_tool.scope()<block_start>random_tool.operation_seed=1<line_sep>b_prime=tf.random.uniform(shape=(3 3))<line_sep>b_prime=b_prime<times>3<line_sep>random_tool.operation_seed=0<line_sep>a_prime=tf.random.uniform(shape=(3 1))<line_sep>a_prime=a_prime<times>3<line_sep>error_string="An exception should have been raised before this"<line_sep>error_raised="An exception should have been raised before this"<try_stmt><block_start>c=tf.random.uniform(shape=(3 1))<line_sep><raise>RuntimeError(error_string)<block_end><except_stmt>ValueError<as>err<block_start>err_raised=err<block_end>self.assertNotEqual(err_raised error_string)<block_end><block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>tf.test.main()<block_end>
<import_stmt>argparse<import_stmt>os<import_from_stmt>typing List<import_stmt>cv2<import_stmt>numpy<as>np<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.keras.preprocessing.text Tokenizer<import_from_stmt>tf2_utils.inferer Inferer<line_sep>TARGET_IMAGE_HEIGHT=64<line_sep>TARGET_IMAGE_WIDTH=256<line_sep>CHAR_VECTOR="0123456789abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ!\"#$%&'()*+,-./:;<=>?@[\\]^_`{|}~"<def_stmt>infere_images images_path:List[str]<block_start>inferer=Inferer()<line_sep>tokenizer=get_tokenizer()<for_stmt>image_path images_path<block_start>image=load_image(image_path)<line_sep>logits=inferer(image)<line_sep>sequence_length=[logits.shape[1]]<line_sep>sequences_decoded=tf.nn.ctc_greedy_decoder(tf.transpose(logits [1 0 2]) sequence_length merge_repeated=<false>)[0][0]<line_sep>sequences_decoded=tf.sparse.to_dense(sequences_decoded).numpy()<line_sep>word=tokenizer.sequences_to_texts(sequences_decoded)[0]<line_sep>print(word)<block_end><block_end><def_stmt>get_tokenizer <block_start>tokenizer=Tokenizer(char_level=<true> lower=<false> oov_token="<OOV>")<line_sep>tokenizer.fit_on_texts(CHAR_VECTOR)<line_sep><return>tokenizer<block_end><def_stmt>load_image image_path:str<block_start>image=cv2.imread(os.path.join(image_path))<line_sep>image=cv2.resize(image (TARGET_IMAGE_WIDTH TARGET_IMAGE_HEIGHT))<line_sep>image=image.astype(np.float32)/127.5-1.0<line_sep><return>tf.expand_dims(tf.constant(image) 0)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument("--images_path" nargs="+" type=str help="Path to the images to infere" )<line_sep>args=parser.parse_args()<line_sep>infere_images(args.images_path)<block_end>
<import_stmt>unittest<import_from_stmt>vex.parsers.g_cmd GlobalLexer<class_stmt>TestGlobalLexer(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>self.lexer=GlobalLexer()<block_end><def_stmt>testCanMatchFullPattern self<block_start>actual=self.lexer.parse(r'/foo/p#')<line_sep>self.assertEqual(actual ['foo' 'p#'])<block_end><def_stmt>testCanMatchEmtpySearch self<block_start>actual=self.lexer.parse(r'//p#')<line_sep>self.assertEqual(actual ['' 'p#'])<block_end><def_stmt>testCanEscapeCharactersInSearchPattern self<block_start>actual=self.lexer.parse(r'/\/foo\//p#')<line_sep>self.assertEqual(actual ['/foo/' 'p#'])<block_end><def_stmt>testCanEscapeBackSlashes self<block_start>actual=self.lexer.parse(r'/\\/p#')<line_sep>self.assertEqual(actual ['\\' 'p#'])<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
<import_stmt>unittest<import_from_stmt>prompt_toolkit.widgets Button<import_from_stmt>unittest.mock patch MagicMock<import_from_stmt>hummingbot.client.tab.data_types CommandTab<import_from_stmt>hummingbot.client.ui.hummingbot_cli HummingbotCLI<import_from_stmt>hummingbot.client.ui.custom_widgets CustomTextArea<class_stmt>HummingbotCLITest(unittest.TestCase)<block_start>command_name="command_1"<def_stmt>setUp self<arrow><none><block_start>super().setUp()<line_sep>tabs={self.command_name:CommandTab(self.command_name <none> <none> <none> MagicMock())}<line_sep>self.mock_hb=MagicMock()<line_sep>self.app=HummingbotCLI(<none> <none> <none> tabs)<line_sep>self.app.app=MagicMock()<block_end><def_stmt>test_handle_tab_command_on_close_argument self<block_start>tab=self.app.command_tabs[self.command_name]<line_sep>tab.close_button=MagicMock()<line_sep>tab.button=MagicMock()<line_sep>tab.output_field=MagicMock()<line_sep>self.app.handle_tab_command(self.mock_hb self.command_name {"close":<true>})<line_sep>self.assertIsNone(tab.button)<line_sep>self.assertIsNone(tab.close_button)<line_sep>self.assertIsNone(tab.output_field)<line_sep>self.assertFalse(tab.is_selected)<line_sep>self.assertEqual(tab.tab_index 0)<block_end><def_stmt>test_handle_tab_command_create_new_tab_and_display self<block_start>tab=self.app.command_tabs[self.command_name]<line_sep>self.app.handle_tab_command(self.mock_hb self.command_name {"close":<false>})<line_sep>self.assertIsInstance(tab.button Button)<line_sep>self.assertIsInstance(tab.close_button Button)<line_sep>self.assertIsInstance(tab.output_field CustomTextArea)<line_sep>self.assertEqual(tab.tab_index 1)<line_sep>self.assertTrue(tab.is_selected)<line_sep>self.assertTrue(tab.tab_class.display.called)<block_end>@patch("hummingbot.client.ui.layout.Layout")@patch("hummingbot.client.ui.layout.FloatContainer")@patch("hummingbot.client.ui.layout.ConditionalContainer")@patch("hummingbot.client.ui.layout.Box")@patch("hummingbot.client.ui.layout.HSplit")@patch("hummingbot.client.ui.layout.VSplit")<def_stmt>test_handle_tab_command_on_existing_tab self mock_vsplit mock_hsplit mock_box moc_cc moc_fc mock_layout<block_start>tab=self.app.command_tabs[self.command_name]<line_sep>tab.button=MagicMock()<line_sep>tab.output_field=MagicMock()<line_sep>tab.close_button=MagicMock()<line_sep>tab.is_selected=<false><line_sep>self.app.handle_tab_command(self.mock_hb self.command_name {"close":<false>})<line_sep>self.assertTrue(tab.is_selected)<line_sep>self.assertTrue(tab.tab_class.display.call_count<eq>1)<line_sep># Test display not called if there is a running task tab.is_selected=<false><line_sep>tab.task=MagicMock()<line_sep>tab.task.done.return_value=<false><line_sep>self.app.handle_tab_command(self.mock_hb self.command_name {"close":<false>})<line_sep>self.assertTrue(tab.is_selected)<line_sep>self.assertTrue(tab.tab_class.display.call_count<eq>1)<block_end>@patch("hummingbot.client.ui.layout.Layout")@patch("hummingbot.client.ui.layout.FloatContainer")@patch("hummingbot.client.ui.layout.ConditionalContainer")@patch("hummingbot.client.ui.layout.Box")@patch("hummingbot.client.ui.layout.HSplit")@patch("hummingbot.client.ui.layout.VSplit")<def_stmt>test_tab_navigation self mock_vsplit mock_hsplit mock_box moc_cc moc_fc mock_layout<block_start>tab2=CommandTab("command_2" <none> <none> <none> MagicMock() <false>)<line_sep>self.app.command_tabs["command_2"]=tab2<line_sep>tab1=self.app.command_tabs[self.command_name]<line_sep>self.app.handle_tab_command(self.mock_hb self.command_name {"close":<false>})<line_sep>self.app.handle_tab_command(self.mock_hb "command_2" {"close":<false>})<line_sep>self.assertTrue(tab2.is_selected)<line_sep>self.app.tab_navigate_left()<line_sep>self.assertTrue(tab1.is_selected)<line_sep>self.assertFalse(tab2.is_selected)<line_sep>self.app.tab_navigate_left()<line_sep>self.assertTrue(all(<not>t.is_selected<for>t self.app.command_tabs.values()))<line_sep>self.app.tab_navigate_left()<line_sep>self.assertTrue(all(<not>t.is_selected<for>t self.app.command_tabs.values()))<line_sep>self.app.tab_navigate_right()<line_sep>self.assertTrue(tab1.is_selected)<line_sep>self.app.tab_navigate_right()<line_sep>self.assertFalse(tab1.is_selected)<line_sep>self.assertTrue(tab2.is_selected)<line_sep>self.app.tab_navigate_right()<line_sep>self.assertFalse(tab1.is_selected)<line_sep>self.assertTrue(tab2.is_selected)<block_end><block_end>
<import_stmt>torch<import_from_stmt>..distances CosineSimilarity<import_from_stmt>..reducers DivisorReducer<import_from_stmt>..utils common_functions<as>c_f<import_from_stmt>.base_regularizer BaseRegularizer<class_stmt>SparseCentersRegularizer(BaseRegularizer)<block_start><def_stmt>__init__ self num_classes centers_per_class **kwargs<block_start>super().__init__(**kwargs)<assert_stmt>centers_per_class<g>1<line_sep>c_f.assert_distance_type(self CosineSimilarity)<line_sep>self.set_class_masks(num_classes centers_per_class)<line_sep>self.add_to_recordable_attributes(list_of_names=["num_classes" "centers_per_class"] is_stat=<false>)<line_sep>self.add_to_recordable_attributes(list_of_names=["same_class_center_sim" "diff_class_center_sim"] is_stat=<true> )<block_end><def_stmt>compute_loss self weights<block_start>center_similarities=self.distance(weights)<line_sep>small_val=c_f.small_val(weights.dtype)<line_sep>center_similarities_masked=torch.clamp(2.0<times>center_similarities[self.same_class_mask] max=2)<line_sep>divisor=2<times>torch.sum(self.same_class_mask)<line_sep>reg=torch.sqrt(2.0+small_val-center_similarities_masked)<line_sep>self.set_stats(center_similarities)<line_sep><return>{"loss":{"losses":reg "indices":c_f.torch_arange_from_size(reg) "reduction_type":"element" "divisor":divisor }}<block_end><def_stmt>set_class_masks self num_classes centers_per_class<block_start>total_num_centers=num_classes<times>centers_per_class<line_sep>self.diff_class_mask=torch.ones(total_num_centers total_num_centers dtype=torch.bool)<line_sep>self.same_class_mask=torch.zeros(total_num_centers total_num_centers dtype=torch.bool)<for_stmt>i range(num_classes)<block_start>s,e=i<times>centers_per_class (i+1)<times>centers_per_class<line_sep>curr_block=torch.ones(centers_per_class centers_per_class)<line_sep>curr_block=torch.triu(curr_block diagonal=1)<line_sep>self.same_class_mask[s:e s:e]=curr_block<line_sep>self.diff_class_mask[s:e s:e]=0<block_end><block_end><def_stmt>set_stats self center_similarities<block_start><if_stmt>self.collect_stats<block_start><with_stmt>torch.no_grad()<block_start>self.same_class_center_sim=torch.mean(center_similarities[self.same_class_mask]).item()<line_sep>self.diff_class_center_sim=torch.mean(center_similarities[self.diff_class_mask]).item()<block_end><block_end><block_end><def_stmt>get_default_distance self<block_start><return>CosineSimilarity()<block_end><def_stmt>get_default_reducer self<block_start><return>DivisorReducer()<block_end><block_end>
<import_from_stmt>torch.nn Conv2d Module Sequential InstanceNorm2d ReLU ConvTranspose2d<import_from_stmt>tha2.nn.backcomp.nn.init_function create_init_function<def_stmt>Conv7 in_channels:int out_channels:int initialization_method='he'<arrow>Module<block_start>init=create_init_function(initialization_method)<line_sep><return>init(Conv2d(in_channels out_channels kernel_size=7 stride=1 padding=3 bias=<false>))<block_end><def_stmt>Conv3 in_channels:int out_channels:int initialization_method='he'<arrow>Module<block_start>init=create_init_function(initialization_method)<line_sep><return>init(Conv2d(in_channels out_channels kernel_size=3 stride=1 padding=1 bias=<false>))<block_end><def_stmt>Conv7Block in_channels:int out_channels:int initialization_method='he'<arrow>Module<block_start><return>Sequential(Conv7(in_channels out_channels initialization_method) InstanceNorm2d(out_channels affine=<true>) ReLU(inplace=<true>))<block_end><def_stmt>DownsampleBlock in_channels:int initialization_method='he'<arrow>Module<block_start>init=create_init_function(initialization_method)<line_sep><return>Sequential(init(Conv2d(in_channels in_channels<times>2 kernel_size=4 stride=2 padding=1 bias=<false>)) InstanceNorm2d(in_channels<times>2 affine=<true>) ReLU(inplace=<true>))<block_end><def_stmt>UpsampleBlock in_channels:int out_channels:int initialization_method='he'<arrow>Module<block_start>init=create_init_function(initialization_method)<line_sep><return>Sequential(init(ConvTranspose2d(in_channels out_channels kernel_size=4 stride=2 padding=1 bias=<false>)) InstanceNorm2d(out_channels affine=<true>) ReLU(inplace=<true>))<block_end>
# This code is part of Qiskit. # # (C) Copyright IBM 2018, 2020. # # This code is licensed under the Apache License, Version 2.0. You may # obtain a copy of this license in the LICENSE.txt file in the root directory # of this source tree or at http://www.apache.org/licenses/LICENSE-2.0. # # Any modifications or derivative works of this code must retain this # copyright notice, and modified files need to carry a notice indicating # that they have been altered from the originals. """Test the quantum amplitude estimation algorithm."""<import_stmt>unittest<import_from_stmt>test.python.algorithms QiskitAlgorithmsTestCase<import_stmt>numpy<as>np<import_from_stmt>ddt ddt idata data unpack<import_from_stmt>qiskit QuantumRegister QuantumCircuit BasicAer<import_from_stmt>qiskit.circuit.library QFT GroverOperator<import_from_stmt>qiskit.utils QuantumInstance<import_from_stmt>qiskit.algorithms AmplitudeEstimation MaximumLikelihoodAmplitudeEstimation IterativeAmplitudeEstimation FasterAmplitudeEstimation EstimationProblem <import_from_stmt>qiskit.quantum_info Operator Statevector<class_stmt>BernoulliStateIn(QuantumCircuit)<block_start>"""A circuit preparing sqrt(1 - p)|0> + sqrt(p)|1>."""<def_stmt>__init__ self probability<block_start>super().__init__(1)<line_sep>angle=2<times>np.arcsin(np.sqrt(probability))<line_sep>self.ry(angle 0)<block_end><block_end><class_stmt>BernoulliGrover(QuantumCircuit)<block_start>"""The Grover operator corresponding to the Bernoulli A operator."""<def_stmt>__init__ self probability<block_start>super().__init__(1 global_phase=np.pi)<line_sep>self.angle=2<times>np.arcsin(np.sqrt(probability))<line_sep>self.ry(2<times>self.angle 0)<block_end><def_stmt>power self power matrix_power=<false><block_start><if_stmt>matrix_power<block_start><return>super().power(power <true>)<block_end>powered=QuantumCircuit(1)<line_sep>powered.ry(power<times>2<times>self.angle 0)<line_sep><return>powered<block_end><block_end><class_stmt>SineIntegral(QuantumCircuit)<block_start>r"""Construct the A operator to approximate the integral \int_0^1 \sin^2(x) d x with a specified number of qubits. """<def_stmt>__init__ self num_qubits<block_start>qr_state=QuantumRegister(num_qubits "state")<line_sep>qr_objective=QuantumRegister(1 "obj")<line_sep>super().__init__(qr_state qr_objective)<line_sep># prepare 1/sqrt{2^n} sum_x |x>_n self.h(qr_state)<line_sep># apply the sine/cosine term self.ry(2<times>1/2/2<power>num_qubits qr_objective[0])<for_stmt>i,qubit enumerate(qr_state)<block_start>self.cry(2<times>2<power>i/2<power>num_qubits qubit qr_objective[0])<block_end><block_end><block_end>@ddt<class_stmt>TestBernoulli(QiskitAlgorithmsTestCase)<block_start>"""Tests based on the Bernoulli A operator. This class tests * the estimation result * the constructed circuits """<def_stmt>setUp self<block_start>super().setUp()<line_sep>self._statevector=QuantumInstance(backend=BasicAer.get_backend("statevector_simulator") seed_simulator=2 seed_transpiler=2 )<line_sep>self._unitary=QuantumInstance(backend=BasicAer.get_backend("unitary_simulator") shots=1 seed_simulator=42 seed_transpiler=91 )<def_stmt>qasm shots=100<block_start><return>QuantumInstance(backend=BasicAer.get_backend("qasm_simulator") shots=shots seed_simulator=2 seed_transpiler=2 )<block_end>self._qasm=qasm<block_end>@idata([[0.2 AmplitudeEstimation(2) {"estimation":0.5 "mle":0.2}] [0.49 AmplitudeEstimation(3) {"estimation":0.5 "mle":0.49}] [0.2 MaximumLikelihoodAmplitudeEstimation([0 1 2]) {"estimation":0.2}] [0.49 MaximumLikelihoodAmplitudeEstimation(3) {"estimation":0.49}] [0.2 IterativeAmplitudeEstimation(0.1 0.1) {"estimation":0.2}] [0.49 IterativeAmplitudeEstimation(0.001 0.01) {"estimation":0.49}] [0.2 FasterAmplitudeEstimation(0.1 3 rescale=<false>) {"estimation":0.2}] [0.12 FasterAmplitudeEstimation(0.1 2 rescale=<false>) {"estimation":0.12}] ])@unpack<def_stmt>test_statevector self prob qae expect<block_start>"""statevector test"""<line_sep>qae.quantum_instance=self._statevector<line_sep>problem=EstimationProblem(BernoulliStateIn(prob) 0 BernoulliGrover(prob))<line_sep>result=qae.estimate(problem)<line_sep>self.assertGreaterEqual(self._statevector.time_taken 0.0)<line_sep>self._statevector.reset_execution_results()<for_stmt>key,value expect.items()<block_start>self.assertAlmostEqual(value getattr(result key) places=3 msg=f"estimate `{key}` failed")<block_end><block_end>@idata([[0.2 100 AmplitudeEstimation(4) {"estimation":0.14644 "mle":0.193888}] [0.0 1000 AmplitudeEstimation(2) {"estimation":0.0 "mle":0.0}] [0.2 100 MaximumLikelihoodAmplitudeEstimation([0 1 2 4 8]) {"estimation":0.199606} ] [0.8 10 IterativeAmplitudeEstimation(0.1 0.05) {"estimation":0.811711}] [0.2 1000 FasterAmplitudeEstimation(0.1 3 rescale=<false>) {"estimation":0.198640}] [0.12 100 FasterAmplitudeEstimation(0.01 3 rescale=<false>) {"estimation":0.119037} ] ])@unpack<def_stmt>test_qasm self prob shots qae expect<block_start>"""qasm test"""<line_sep>qae.quantum_instance=self._qasm(shots)<line_sep>problem=EstimationProblem(BernoulliStateIn(prob) [0] BernoulliGrover(prob))<line_sep>result=qae.estimate(problem)<for_stmt>key,value expect.items()<block_start>self.assertAlmostEqual(value getattr(result key) places=3 msg=f"estimate `{key}` failed")<block_end><block_end>@data(<true> <false>)<def_stmt>test_qae_circuit self efficient_circuit<block_start>"""Test circuits resulting from canonical amplitude estimation. Build the circuit manually and from the algorithm and compare the resulting unitaries. """<line_sep>prob=0.5<line_sep>problem=EstimationProblem(BernoulliStateIn(prob) objective_qubits=[0])<for_stmt>m [2 5]<block_start>qae=AmplitudeEstimation(m)<line_sep>angle=2<times>np.arcsin(np.sqrt(prob))<line_sep># manually set up the inefficient AE circuit qr_eval=QuantumRegister(m "a")<line_sep>qr_objective=QuantumRegister(1 "q")<line_sep>circuit=QuantumCircuit(qr_eval qr_objective)<line_sep># initial Hadamard gates <for_stmt>i range(m)<block_start>circuit.h(qr_eval[i])<block_end># A operator circuit.ry(angle qr_objective)<if_stmt>efficient_circuit<block_start>qae.grover_operator=BernoulliGrover(prob)<for_stmt>power range(m)<block_start>circuit.cry(2<times>2<power>power<times>angle qr_eval[power] qr_objective[0])<block_end><block_end><else_stmt><block_start>oracle=QuantumCircuit(1)<line_sep>oracle.z(0)<line_sep>state_preparation=QuantumCircuit(1)<line_sep>state_preparation.ry(angle 0)<line_sep>grover_op=GroverOperator(oracle state_preparation)<for_stmt>power range(m)<block_start>circuit.compose(grover_op.power(2<power>power).control() qubits=[qr_eval[power] qr_objective[0]] inplace=<true> )<block_end><block_end># fourier transform iqft=QFT(m do_swaps=<false>).inverse().reverse_bits()<line_sep>circuit.append(iqft.to_instruction() qr_eval)<line_sep>actual_circuit=qae.construct_circuit(problem measurement=<false>)<line_sep>self.assertEqual(Operator(circuit) Operator(actual_circuit))<block_end><block_end>@data(<true> <false>)<def_stmt>test_iqae_circuits self efficient_circuit<block_start>"""Test circuits resulting from iterative amplitude estimation. Build the circuit manually and from the algorithm and compare the resulting unitaries. """<line_sep>prob=0.5<line_sep>problem=EstimationProblem(BernoulliStateIn(prob) objective_qubits=[0])<for_stmt>k [2 5]<block_start>qae=IterativeAmplitudeEstimation(0.01 0.05)<line_sep>angle=2<times>np.arcsin(np.sqrt(prob))<line_sep># manually set up the inefficient AE circuit q_objective=QuantumRegister(1 "q")<line_sep>circuit=QuantumCircuit(q_objective)<line_sep># A operator circuit.ry(angle q_objective)<if_stmt>efficient_circuit<block_start>qae.grover_operator=BernoulliGrover(prob)<line_sep>circuit.ry(2<times>k<times>angle q_objective[0])<block_end><else_stmt><block_start>oracle=QuantumCircuit(1)<line_sep>oracle.z(0)<line_sep>state_preparation=QuantumCircuit(1)<line_sep>state_preparation.ry(angle 0)<line_sep>grover_op=GroverOperator(oracle state_preparation)<for_stmt>_ range(k)<block_start>circuit.compose(grover_op inplace=<true>)<block_end><block_end>actual_circuit=qae.construct_circuit(problem k measurement=<false>)<line_sep>self.assertEqual(Operator(circuit) Operator(actual_circuit))<block_end><block_end>@data(<true> <false>)<def_stmt>test_mlae_circuits self efficient_circuit<block_start>"""Test the circuits constructed for MLAE"""<line_sep>prob=0.5<line_sep>problem=EstimationProblem(BernoulliStateIn(prob) objective_qubits=[0])<for_stmt>k [2 5]<block_start>qae=MaximumLikelihoodAmplitudeEstimation(k)<line_sep>angle=2<times>np.arcsin(np.sqrt(prob))<line_sep># compute all the circuits used for MLAE circuits=[]<line_sep># 0th power q_objective=QuantumRegister(1 "q")<line_sep>circuit=QuantumCircuit(q_objective)<line_sep>circuit.ry(angle q_objective)<line_sep>circuits<augadd>[circuit]<line_sep># powers of 2 <for_stmt>power range(k)<block_start>q_objective=QuantumRegister(1 "q")<line_sep>circuit=QuantumCircuit(q_objective)<line_sep># A operator circuit.ry(angle q_objective)<line_sep># Q^(2^j) operator <if_stmt>efficient_circuit<block_start>qae.grover_operator=BernoulliGrover(prob)<line_sep>circuit.ry(2<times>2<power>power<times>angle q_objective[0])<block_end><else_stmt><block_start>oracle=QuantumCircuit(1)<line_sep>oracle.z(0)<line_sep>state_preparation=QuantumCircuit(1)<line_sep>state_preparation.ry(angle 0)<line_sep>grover_op=GroverOperator(oracle state_preparation)<for_stmt>_ range(2<power>power)<block_start>circuit.compose(grover_op inplace=<true>)<block_end><block_end>circuits<augadd>[circuit]<block_end>actual_circuits=qae.construct_circuits(problem measurement=<false>)<for_stmt>actual,expected zip(actual_circuits circuits)<block_start>self.assertEqual(Operator(actual) Operator(expected))<block_end><block_end><block_end><block_end>@ddt<class_stmt>TestSineIntegral(QiskitAlgorithmsTestCase)<block_start>"""Tests based on the A operator to integrate sin^2(x). This class tests * the estimation result * the confidence intervals """<def_stmt>setUp self<block_start>super().setUp()<line_sep>self._statevector=QuantumInstance(backend=BasicAer.get_backend("statevector_simulator") seed_simulator=123 seed_transpiler=41 )<def_stmt>qasm shots=100<block_start><return>QuantumInstance(backend=BasicAer.get_backend("qasm_simulator") shots=shots seed_simulator=7192 seed_transpiler=90000 )<block_end>self._qasm=qasm<block_end>@idata([[2 AmplitudeEstimation(2) {"estimation":0.5 "mle":0.270290}] [4 MaximumLikelihoodAmplitudeEstimation(4) {"estimation":0.272675}] [3 IterativeAmplitudeEstimation(0.1 0.1) {"estimation":0.272082}] [3 FasterAmplitudeEstimation(0.01 1) {"estimation":0.272082}] ])@unpack<def_stmt>test_statevector self n qae expect<block_start>"""Statevector end-to-end test"""<line_sep># construct factories for A and Q # qae.state_preparation = SineIntegral(n) qae.quantum_instance=self._statevector<line_sep>estimation_problem=EstimationProblem(SineIntegral(n) objective_qubits=[n])<line_sep># result = qae.run(self._statevector) result=qae.estimate(estimation_problem)<line_sep>self.assertGreaterEqual(self._statevector.time_taken 0.0)<line_sep>self._statevector.reset_execution_results()<for_stmt>key,value expect.items()<block_start>self.assertAlmostEqual(value getattr(result key) places=3 msg=f"estimate `{key}` failed")<block_end><block_end>@idata([[4 10 AmplitudeEstimation(2) {"estimation":0.5 "mle":0.333333}] [3 10 MaximumLikelihoodAmplitudeEstimation(2) {"estimation":0.256878}] [3 1000 IterativeAmplitudeEstimation(0.01 0.01) {"estimation":0.271790}] [3 1000 FasterAmplitudeEstimation(0.1 4) {"estimation":0.274168}] ])@unpack<def_stmt>test_qasm self n shots qae expect<block_start>"""QASM simulator end-to-end test."""<line_sep># construct factories for A and Q qae.quantum_instance=self._qasm(shots)<line_sep>estimation_problem=EstimationProblem(SineIntegral(n) objective_qubits=[n])<line_sep>result=qae.estimate(estimation_problem)<for_stmt>key,value expect.items()<block_start>self.assertAlmostEqual(value getattr(result key) places=3 msg=f"estimate `{key}` failed")<block_end><block_end>@idata([[AmplitudeEstimation(3) "mle" {"likelihood_ratio":(0.2494734 0.3003771) "fisher":(0.2486176 0.2999286) "observed_fisher":(0.2484562 0.3000900) } ] [MaximumLikelihoodAmplitudeEstimation(3) "estimation" {"likelihood_ratio":(0.2598794 0.2798536) "fisher":(0.2584889 0.2797018) "observed_fisher":(0.2659279 0.2722627) } ] ])@unpack<def_stmt>test_confidence_intervals self qae key expect<block_start>"""End-to-end test for all confidence intervals."""<line_sep>n=3<line_sep>qae.quantum_instance=self._statevector<line_sep>estimation_problem=EstimationProblem(SineIntegral(n) objective_qubits=[n])<line_sep># statevector simulator result=qae.estimate(estimation_problem)<line_sep>self.assertGreater(self._statevector.time_taken 0.0)<line_sep>self._statevector.reset_execution_results()<line_sep>methods=["lr" "fi" "oi"]# short for likelihood_ratio, fisher, observed_fisher alphas=[0.1 0.00001 0.9]# alpha shouldn't matter in statevector <for_stmt>alpha,method zip(alphas methods)<block_start>confint=qae.compute_confidence_interval(result alpha method)<line_sep># confidence interval based on statevector should be empty, as we are sure of the result self.assertAlmostEqual(confint[1]-confint[0] 0.0)<line_sep>self.assertAlmostEqual(confint[0] getattr(result key))<block_end># qasm simulator shots=100<line_sep>alpha=0.01<line_sep>qae.quantum_instance=self._qasm(shots)<line_sep>result=qae.estimate(estimation_problem)<for_stmt>method,expected_confint expect.items()<block_start>confint=qae.compute_confidence_interval(result alpha method)<line_sep>np.testing.assert_array_almost_equal(confint expected_confint)<line_sep>self.assertTrue(confint[0]<le>getattr(result key)<le>confint[1])<block_end><block_end><def_stmt>test_iqae_confidence_intervals self<block_start>"""End-to-end test for the IQAE confidence interval."""<line_sep>n=3<line_sep>qae=IterativeAmplitudeEstimation(0.1 0.01 quantum_instance=self._statevector)<line_sep>expected_confint=(0.1984050 0.3511015)<line_sep>estimation_problem=EstimationProblem(SineIntegral(n) objective_qubits=[n])<line_sep># statevector simulator result=qae.estimate(estimation_problem)<line_sep>self.assertGreaterEqual(self._statevector.time_taken 0.0)<line_sep>self._statevector.reset_execution_results()<line_sep>confint=result.confidence_interval<line_sep># confidence interval based on statevector should be empty, as we are sure of the result self.assertAlmostEqual(confint[1]-confint[0] 0.0)<line_sep>self.assertAlmostEqual(confint[0] result.estimation)<line_sep># qasm simulator shots=100<line_sep>qae.quantum_instance=self._qasm(shots)<line_sep>result=qae.estimate(estimation_problem)<line_sep>confint=result.confidence_interval<line_sep>np.testing.assert_array_almost_equal(confint expected_confint)<line_sep>self.assertTrue(confint[0]<le>result.estimation<le>confint[1])<block_end><block_end>@ddt<class_stmt>TestFasterAmplitudeEstimation(QiskitAlgorithmsTestCase)<block_start>"""Specific tests for Faster AE."""<def_stmt>test_rescaling self<block_start>"""Test the rescaling."""<line_sep>amplitude=0.8<line_sep>scaling=0.25<line_sep>circuit=QuantumCircuit(1)<line_sep>circuit.ry(2<times>np.arcsin(amplitude) 0)<line_sep>problem=EstimationProblem(circuit objective_qubits=[0])<line_sep>rescaled=problem.rescale(scaling)<line_sep>rescaled_amplitude=Statevector.from_instruction(rescaled.state_preparation).data[3]<line_sep>self.assertAlmostEqual(scaling<times>amplitude rescaled_amplitude)<block_end><def_stmt>test_run_without_rescaling self<block_start>"""Run Faster AE without rescaling if the amplitude is in [0, 1/4]."""<line_sep># construct estimation problem prob=0.11<line_sep>a_op=QuantumCircuit(1)<line_sep>a_op.ry(2<times>np.arcsin(np.sqrt(prob)) 0)<line_sep>problem=EstimationProblem(a_op objective_qubits=[0])<line_sep># construct algo without rescaling backend=BasicAer.get_backend("statevector_simulator")<line_sep>fae=FasterAmplitudeEstimation(0.1 1 rescale=<false> quantum_instance=backend)<line_sep># run the algo result=fae.estimate(problem)<line_sep># assert the result is correct self.assertAlmostEqual(result.estimation prob)<line_sep># assert no rescaling was used theta=np.mean(result.theta_intervals[-1])<line_sep>value_without_scaling=np.sin(theta)<power>2<line_sep>self.assertAlmostEqual(result.estimation value_without_scaling)<block_end><def_stmt>test_rescaling_with_custom_grover_raises self<block_start>"""Test that the rescaling option fails if a custom Grover operator is used."""<line_sep>prob=0.8<line_sep>a_op=BernoulliStateIn(prob)<line_sep>q_op=BernoulliGrover(prob)<line_sep>problem=EstimationProblem(a_op objective_qubits=[0] grover_operator=q_op)<line_sep># construct algo without rescaling backend=BasicAer.get_backend("statevector_simulator")<line_sep>fae=FasterAmplitudeEstimation(0.1 1 quantum_instance=backend)<line_sep># run the algo <with_stmt>self.assertWarns(Warning)<block_start>_=fae.estimate(problem)<block_end><block_end>@data(("statevector_simulator" 0.2) ("qasm_simulator" 0.199440))@unpack<def_stmt>test_good_state self backend_str expect<block_start>"""Test with a good state function."""<def_stmt>is_good_state bitstr<block_start><return>bitstr[1]<eq>"1"<block_end># construct the estimation problem where the second qubit is ignored a_op=QuantumCircuit(2)<line_sep>a_op.ry(2<times>np.arcsin(np.sqrt(0.2)) 0)<line_sep># oracle only affects first qubit oracle=QuantumCircuit(2)<line_sep>oracle.z(0)<line_sep># reflect only on first qubit q_op=GroverOperator(oracle a_op reflection_qubits=[0])<line_sep># but we measure both qubits (hence both are objective qubits) problem=EstimationProblem(a_op objective_qubits=[0 1] grover_operator=q_op is_good_state=is_good_state)<line_sep># construct algo backend=QuantumInstance(BasicAer.get_backend(backend_str) seed_simulator=2 seed_transpiler=2)<line_sep># cannot use rescaling with a custom grover operator fae=FasterAmplitudeEstimation(0.01 5 rescale=<false> quantum_instance=backend)<line_sep># run the algo result=fae.estimate(problem)<line_sep># assert the result is correct self.assertAlmostEqual(result.estimation expect places=5)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
<import_from_future_stmt> absolute_import<import_stmt>importlib<import_stmt>tensorflow<as>tf<import_from_stmt>ntm_cell NTMCell<import_from_stmt>ntm NTM<import_stmt>os<import_stmt>re<line_sep># import sh # from smart_open import smart_open <import_stmt>shutil<import_from_stmt>utils pp<line_sep>flags=tf.app.flags<line_sep>flags.DEFINE_string("task" "copy" "Task to run [copy, recall]")<line_sep>flags.DEFINE_integer("epoch" 100000 "Epoch to train [100000]")<line_sep>flags.DEFINE_integer("input_dim" 10 "Dimension of input [10]")<line_sep>flags.DEFINE_integer("output_dim" 10 "Dimension of output [10]")<line_sep>flags.DEFINE_integer("min_length" 1 "Minimum length of input sequence [1]")<line_sep>flags.DEFINE_integer("max_length" 10 "Maximum length of output sequence [10]")<line_sep>flags.DEFINE_integer("controller_layer_size" 1 "The size of LSTM controller [1]")<line_sep>flags.DEFINE_integer("controller_dim" 100 "Dimension of LSTM controller [100]")<line_sep>flags.DEFINE_integer("write_head_size" 1 "The number of write head [1]")<line_sep>flags.DEFINE_integer("read_head_size" 1 "The number of read head [1]")<line_sep>flags.DEFINE_integer("test_max_length" 120 "Maximum length of output sequence [120]")<line_sep>flags.DEFINE_string("checkpoint_dir" "checkpoint" "Directory name to save the checkpoints [checkpoint]")<line_sep>flags.DEFINE_boolean("is_train" <false> "True for training, False for testing [False]")<line_sep>flags.DEFINE_boolean("continue_train" <none> "True to continue training from saved checkpoint. False for restarting. None for automatic [None]")<line_sep># Submit job to microsoft PAI cluster # Read/Write WebHDFS #flags.DEFINE_string("pai_data_dir", "", "PAI data directory") #flags.DEFINE_boolean("hdfs", False, "True if read/write files on webhdfs") FLAGS=flags.FLAGS<def_stmt>create_ntm config sess **ntm_args<block_start>cell=NTMCell(input_dim=config.input_dim output_dim=config.output_dim controller_layer_size=config.controller_layer_size controller_dim=config.controller_dim write_head_size=config.write_head_size read_head_size=config.read_head_size)<line_sep>scope=ntm_args.pop('scope' 'NTM-%s'%config.task)<line_sep>ntm=NTM(cell sess config.min_length config.max_length test_max_length=config.test_max_length scope=scope **ntm_args)<line_sep><return>cell ntm<block_end># Change hdfs url to webhdfs and change port <def_stmt>UrlConvert hdfspath<block_start>regex=re.compile('^hdfs://')<if_stmt>re.match(regex hdfspath)<block_start>webhdfs=hdfspath.replace('hdfs' 'webhdfs' 1).replace(':9000' ':50070' 1)<block_end><return>webhdfs<block_end>#def write_file_to_local(hdfspath, localpath): # lines = list() # for line in smart_open(UrlConvert(hdfspath)): # lines.append(line) # with open(localpath, 'wb+') as f: # f.writelines(lines) #def write_data_to_local(src, dest): # if not os.path.exists(dest): # os.makedirs(dest) # files = [line.rsplit(None,1)[-1] for line in sh.hdfs('dfs','-ls',src).split('\n') if len(line.rsplit(None,1))][1:] # for f in files: # print(f) # write_file_to_local(f, os.path.join(dest, f.split('/')[-1])) <def_stmt>main _<block_start>pp.pprint(flags.FLAGS.__flags)<with_stmt>tf.device('/cpu:0') tf.Session()<as>sess<block_start><try_stmt><block_start>task=importlib.import_module('tasks.%s'%FLAGS.task)<block_end><except_stmt>ImportError<block_start>print("task '%s' does not have implementation"%FLAGS.task)<line_sep><raise><block_end><if_stmt>FLAGS.is_train<block_start>cell,ntm=create_ntm(FLAGS sess)<line_sep>task.train(ntm FLAGS sess)<block_end><else_stmt><block_start>cell,ntm=create_ntm(FLAGS sess forward_only=<true>)<block_end>#if FLAGS.hdfs: # hdfspath = "%s/%s/%s_%s" % (FLAGS.pai_data_dir, FLAGS.checkpoint_dir, FLAGS.task, FLAGS.max_length) # localpath = "%s/%s_%s" % (FLAGS.checkpoint_dir, FLAGS.task, FLAGS.max_length) # write_data_to_local(hdfspath, localpath) ntm.load(FLAGS.checkpoint_dir FLAGS.task)<if_stmt>FLAGS.task<eq>'copy'<block_start>task.run(ntm int(FLAGS.test_max_length<times>1/3) sess)<line_sep>print<line_sep>task.run(ntm int(FLAGS.test_max_length<times>2/3) sess)<line_sep>print<line_sep>task.run(ntm int(FLAGS.test_max_length<times>3/3) sess)<block_end><else_stmt><block_start>task.run(ntm int(FLAGS.test_max_length) sess)<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>tf.app.run()<block_end>
<import_from_stmt>...utils.byte_io_mdl ByteIO<class_stmt>DataBlock<block_start><def_stmt>__init__ self valve_file info_block<block_start><import_from_stmt>..resource_types ValveCompiledResource<import_from_stmt>.compiled_file_header InfoBlock<line_sep>self._valve_file:ValveCompiledResource=valve_file<line_sep>self.info_block:InfoBlock=info_block<with_stmt>self._valve_file.reader.save_current_pos()<block_start>self._valve_file.reader.seek(self.info_block.absolute_offset)<line_sep>self.reader=ByteIO(self._valve_file.reader.read(self.info_block.block_size))<block_end>self.data={}<line_sep>self.parsed=<false><block_end><def_stmt>read self<block_start>self.parsed=<true><line_sep><raise>NotImplementedError()<block_end><def_stmt>__repr__ self<block_start>template='<{} {}>'<line_sep><return>template.format(type(self).__name__ self.info_block.block_name)<block_end><block_end>
"""A collection of sphinx docstrings from the wild."""<import_stmt>ast<line_sep>FunctionDef=ast.FunctionDef<if_stmt>hasattr(ast 'AsyncFunctionDef')<block_start>FunctionDef=(ast.FunctionDef ast.AsyncFunctionDef)<block_end><def_stmt>publish_msgstr app source source_path source_line config settings# From https://github.com/sphinx-doc/sphinx # File: sphinx/transforms/il8n.py <block_start>"""Publish msgstr (single line) into docutils document :param sphinx.application.Sphinx app: sphinx application :param unicode source: source text :param unicode source_path: source path for warning indication :param source_line: source line for warning indication :param sphinx.config.Config config: sphinx config :param docutils.frontend.Values settings: docutils settings :return: document :rtype: docutils.nodes.document """<line_sep><ellipsis><line_sep># Expected item head to end with TokenType.COLON but was TokenType.WORD 'app' # noqa <block_end><def_stmt>_strip_basic_auth url# From https://github.com/sphinx-doc/sphinx # File: sphinx/ext/intersphinx.py <block_start>"""Returns *url* with basic auth credentials removed. Also returns the basic auth username and password if they're present in *url*. E.g.: https://user:[email protected] => https://example.com *url* need not include basic auth credentials. :param url: url which may or may not contain basic auth credentials :type url: ``str`` :return: *url* with any basic auth creds removed :rtype: ``str`` """<line_sep><ellipsis><block_end><def_stmt>extract_original_messages self# From https://github.com/sphinx-doc/sphinx # File: sphinx/addnodes.py <block_start>"""Extract translation messages. :returns: list of extracted messages or messages generator """<line_sep><ellipsis><block_end><def_stmt>read_requirements fh resolve=<false># From https://github.com/pypa/pipenv # File: pipenv/patched/safety/util.py <block_start>""" Reads requirements from a file like object and (optionally) from referenced files. :param fh: file like object to read from :param resolve: boolean. resolves referenced files. :return: generator """<line_sep># noqa <ellipsis><block_end><def_stmt>copytree self destination symlinks=<false># File: sphinx/testing/path.py <block_start>""" Recursively copy a directory to the given `destination`. If the given `destination` does not exist it will be created. :param symlinks: If ``True`` symbolic links in the source tree result in symbolic links in the destination tree otherwise the contents of the files pointed to by the symbolic links are copied. """<line_sep># Expected item to start with TokenType.COLON but was TokenType.INDENT <block_end><def_stmt>rmtree self ignore_errors=<false> onerror=<none># File: sphinx/testing/path.py <block_start>""" Removes the file or directory and any files or directories it may contain. :param ignore_errors: If ``True`` errors are silently ignored, otherwise an exception is raised in case an error occurs. :param onerror: A callback which gets called with the arguments `func`, `path` and `exc_info`. `func` is one of :func:`os.listdir`, :func:`os.remove` or :func:`os.rmdir`. `path` is the argument to the function which caused it to fail and `exc_info` is a tuple as returned by :func:`sys.exc_info`. """<line_sep># Expected item to start with TokenType.COLON but was TokenType.INDENT <block_end><def_stmt>test_params request# File: sphinx/testing/fixtures.py <block_start>""" test parameters that is specified by 'pytest.mark.test_params' :param Union[str] shared_result: If the value is provided, app._status and app._warning objects will be shared in the parametrized test functions and/or test functions that have same 'shared_result' value. **NOTE**: You can not specify shared_result and srcdir in same time. """<line_sep># Expected item head to end with TokenType.COLON but was TokenType.WORD 'shared_result' <block_end><def_stmt>add_uids doctree condition# File: sphinx/versioning.py <block_start>"""Add a unique id to every node in the `doctree` which matches the condition and yield the nodes. :param doctree: A :class:`docutils.nodes.document` instance. :param condition: A callable which returns either ``True`` or ``False`` for a given node. """<line_sep># Expected item to start with TokenType.COLON but was TokenType.INDENT <block_end><def_stmt>merge_doctrees old new condition# File: sphinx/versioning.py <block_start>"""Merge the `old` doctree with the `new` one while looking at nodes matching the `condition`. Each node which replaces another one or has been added to the `new` doctree will be yielded. :param condition: A callable which returns either ``True`` or ``False`` for a given node. """<line_sep># Expected item to start with TokenType.COLON but was TokenType.INDENT <block_end><def_stmt>_read_from_url url config=<none># File: sphinx/ext/intersphinx.py <block_start>"""Reads data from *url* with an HTTP *GET*. This function supports fetching from resources which use basic HTTP auth as laid out by RFC1738 § 3.1. See § 5 for grammar definitions for URLs. .. seealso: https://www.ietf.org/rfc/rfc1738.txt :param url: URL of an HTTP resource :type url: ``str`` :return: data read from resource described by *url* :rtype: ``file``-like object """<line_sep># Expected item to start with TokenType.COLON but was TokenType.NEWLINE <block_end><def_stmt>_get_safe_url url# File: sphinx/ext/intersphinx.py <block_start>"""Gets version of *url* with basic auth passwords obscured. This function returns results suitable for printing and logging. E.g.: https://user:[email protected] => https://[email protected] :param url: a url :type url: ``str`` :return: *url* with password removed :rtype: ``str`` """<line_sep># Expected item to start with TokenType.COLON but was TokenType.NEWLINE <block_end><def_stmt>find_catalog_source_files *args# File: sphinx/util/i18n.py <block_start>""" :param list locale_dirs: list of path as `['locale_dir1', 'locale_dir2', ...]` to find translation catalogs. Each path contains a structure such as `<locale>/LC_MESSAGES/domain.po`. :param str locale: a language as `'en'` :param list domains: list of domain names to get. If empty list or None is specified, get all domain names. default is None. :param boolean gettext_compact: * False: keep domains directory structure (default). * True: domains in the sub directory will be merged into 1 file. :param boolean force_all: Set True if you want to get all catalogs rather than updated catalogs. default is False. :return: [CatalogInfo(), ...] """<line_sep># Expected item head to end with TokenType.COLON but was TokenType.WORD 'locale' <block_end><def_stmt>get_full_module_name node# File: sphinx/util/nodes.py <block_start>""" return full module dotted path like: 'docutils.nodes.paragraph' :param nodes.Node node: target node :return: full module dotted path """<line_sep># Expected item head to end with TokenType.COLON but was TokenType.WORD 'node' <block_end><def_stmt>set_application self app# File: sphinx/parsers.py <block_start>"""set_application will be called from Sphinx to set app and other instance variables :param sphinx.application.Sphinx app: Sphinx application object """<line_sep># Expected item head to end with TokenType.COLON but was TokenType.WORD 'app' <block_end><def_stmt>write_bytes sef bytes append=<false># File: sphinx/testing/path.py <block_start>""" Writes the given `bytes` to the file. :param append: If ``True`` given `bytes` are added at the end of the file. """<line_sep># Expected item to start with TokenType.COLON but was TokenType.INDENT <block_end><def_stmt>repr_domxml node length=80# File: sphinx/util/nodes.py <block_start>""" return DOM XML representation of the specified node like: '<paragraph translatable="False"><inline classes="versionmodified">New in version...' :param nodes.Node node: target node :param int length: length of return value to be striped. if false-value is specified, repr_domxml returns full of DOM XML representation. :return: DOM XML representation """<line_sep># Expected item head to end with TokenType.COLON but was TokenType.WORD 'node' <block_end><def_stmt>docstrings <block_start>"""Get all of the docstrings in this file (including this one.) :return: The docstrings in this file. :rtype: List[str] """<with_stmt>open(__file__ 'r')<as>fin<block_start>data=fin.read()<block_end>this_script=ast.parse(data)<line_sep>functions=[x<for>x this_script.body<if>isinstance(x FunctionDef)]<line_sep><return>list(map(ast.get_docstring functions))<block_end>
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # <def_stmt>f_gold n<block_start><if_stmt>(n<eq>0)<block_start><return>"0"<line_sep><block_end>bin=""<while_stmt>(n<g>0)<block_start><if_stmt>(n&1<eq>0)<block_start>bin='0'+bin<block_end><else_stmt><block_start>bin='1'+bin<line_sep><block_end>n=n<rshift>1<line_sep><block_end><return>bin<line_sep><block_end>#TOFILL <if_stmt>__name__<eq>'__main__'<block_start>param=[(35 ) (17 ) (8 ) (99 ) (57 ) (39 ) (99 ) (14 ) (22 ) (7 )]<line_sep>n_success=0<for_stmt>i,parameters_set enumerate(param)<block_start><if_stmt>f_filled(*parameters_set)<eq>f_gold(*parameters_set)<block_start>n_success<augadd>1<block_end><block_end>print("#Results: %i, %i"%(n_success len(param)))<block_end>
<import_stmt>logging<import_from_stmt>datetime datetime timedelta<import_from_stmt>django.conf settings<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>django.db connection transaction<import_from_stmt>kitsune.questions.models Question Answer<import_from_stmt>kitsune.search.es7_utils index_objects_bulk<line_sep>log=logging.getLogger("k.cron")<class_stmt>Command(BaseCommand)<block_start>help="Archive all questions that were created over 180 days ago."<def_stmt>handle self **options# Set up logging so it doesn't send Ricky email. <block_start>logging.basicConfig(level=logging.ERROR)<line_sep># Get a list of ids of questions we're going to go change. We need # a list of ids so that we can feed it to the update, but then # also know what we need to update in the index. days_180=datetime.now()-timedelta(days=180)<line_sep>q_ids=list(Question.objects.filter(is_archived=<false>).filter(created__lte=days_180).values_list("id" flat=<true>))<if_stmt>q_ids<block_start>log.info("Updating %d questions" len(q_ids))<line_sep>sql=""" UPDATE questions_question SET is_archived = 1 WHERE id IN (%s) """%",".join(map(str q_ids))<line_sep>cursor=connection.cursor()<line_sep>cursor.execute(sql)<if_stmt><not>transaction.get_connection().in_atomic_block<block_start>transaction.commit()<block_end><if_stmt>settings.ES_LIVE_INDEXING# elastic v7 code: <block_start>answer_ids=list(Answer.objects.filter(question_id__in=q_ids).values_list("id" flat=<true>))<line_sep>index_objects_bulk.delay("QuestionDocument" q_ids)<line_sep>index_objects_bulk.delay("AnswerDocument" answer_ids)<block_end><block_end><block_end><block_end>
# -*- coding: utf-8 -*- """ Boxy Theme Extras """<import_stmt>sublime<import_stmt>sublime_plugin<import_from_stmt>collections OrderedDict<line_sep>NO_SELECTION=-1<line_sep>SUBLIME_LINTER='SublimeLinter'<line_sep>PLAIN_TASKS='PlainTasks'<line_sep>PLAIN_NOTES='PlainNotes'<line_sep>EXTRAS=OrderedDict([('PlainNotes' {'name':'Plain Notes' 'settings':'Note.sublime-settings' 'desc':'Choose a color scheme'}) ('PlainTasks' {'name':'Plain Tasks' 'settings':'PlainTasks.sublime-settings' 'desc':'Choose a color scheme'}) ('SublimeLinter' {'name':'Sublime Linter' 'settings':'SublimeLinter.sublime-settings' 'desc':'Activate a gutter theme' 'revert':'Revert the gutter theme to the defaults' 'boxy':'Packages/Boxy Theme/extras/SublimeLinter/Boxy.gutter-theme' 'default':'Packages/SublimeLinter/gutter-themes/Default/Default.gutter-theme'})])<line_sep>THEMES=['Boxy Monokai' 'Boxy Nova' 'Boxy Ocean' 'Boxy Solarized Dark' 'Boxy Solarized Light' 'Boxy Tomorrow' 'Boxy Yesterday']<def_stmt>get_settings pkg<block_start><return>sublime.load_settings(EXTRAS[pkg].get('settings'))<block_end><def_stmt>save_settings pkg<block_start><return>sublime.save_settings(EXTRAS[pkg].get('settings'))<block_end><def_stmt>get_theme pkg<block_start>settings=get_settings(pkg)<if_stmt>pkg<is>SUBLIME_LINTER<block_start>items=settings.get('user' '')<if_stmt>items<ne>''<block_start><return>items.get('gutter_theme' '')<block_end><block_end><if_stmt>pkg<in>(PLAIN_TASKS PLAIN_NOTES)<block_start><return>settings.get('color_scheme' '')<block_end><block_end><def_stmt>set_theme pkg path<block_start>settings=get_settings(pkg)<if_stmt>pkg<is>SUBLIME_LINTER<block_start>items=settings.get('user' '')<if_stmt>items<ne>''<block_start>items['gutter_theme']=path<line_sep><return>settings.set('user' items)<block_end><block_end><if_stmt>pkg<in>(PLAIN_TASKS PLAIN_NOTES)<block_start><return>settings.set('color_scheme' path)<block_end><block_end><def_stmt>activate_theme pkg path<block_start>set_theme(pkg path)<line_sep><return>save_settings(pkg)<block_end><def_stmt>revert_theme pkg path<block_start><if_stmt>path<is>''<block_start>get_settings(pkg).erase('color_scheme')<block_end><else_stmt><block_start>set_theme(pkg path)<block_end><return>save_settings(pkg)<block_end><class_stmt>BoxyExtrasCommand(sublime_plugin.WindowCommand)<block_start><def_stmt>display_list self extras<block_start>self.extras=extras<line_sep>self.quick_list=[]<line_sep>name=''<line_sep>desc=''<for_stmt>extra self.extras<block_start>name=self.extras[extra].get('name')<line_sep>desc=self.extras[extra].get('desc')<if_stmt>extra<is>SUBLIME_LINTER<block_start><if_stmt>get_theme(SUBLIME_LINTER)<eq>self.extras[SUBLIME_LINTER].get('boxy')<block_start>desc=self.extras[SUBLIME_LINTER].get('revert')<block_end><block_end>self.quick_list.append([name desc])<block_end>self.window.show_quick_panel(self.quick_list self.on_done)<block_end><def_stmt>on_done self index<block_start><if_stmt>index<is>NO_SELECTION<block_start><return><block_end><if_stmt>index<is>0<block_start>self.window.run_command('boxy_plain_notes')<block_end><if_stmt>index<is>1<block_start>self.window.run_command('boxy_plain_tasks')<block_end><if_stmt>index<is>2<block_start>current=get_theme(SUBLIME_LINTER)<line_sep>boxy=self.extras[SUBLIME_LINTER].get('boxy')<line_sep>default=self.extras[SUBLIME_LINTER].get('default')<if_stmt>current<eq>boxy<block_start><return>revert_theme(SUBLIME_LINTER default)<block_end><else_stmt><block_start><return>activate_theme(SUBLIME_LINTER boxy)<block_end><block_end><block_end><def_stmt>run self<block_start>self.display_list(EXTRAS)<block_end><block_end><class_stmt>BoxyPlainTasksCommand(sublime_plugin.WindowCommand)<block_start><def_stmt>display_list self themes<block_start>self.themes=themes<line_sep>self.initial_theme=get_theme(PLAIN_TASKS)<line_sep>quick_list=[theme<for>theme self.themes]<line_sep>self.quick_list=quick_list<line_sep>self.window.show_quick_panel(quick_list self.on_done on_highlight=self.on_highlighted)<block_end><def_stmt>on_highlighted self index<block_start>set_theme(PLAIN_TASKS self._quick_list_to_theme(index))<block_end><def_stmt>on_done self index<block_start><if_stmt>index<is>NO_SELECTION<block_start>revert_theme(PLAIN_TASKS self.initial_theme)<line_sep><return><block_end>activate_theme(PLAIN_TASKS self._quick_list_to_theme(index))<block_end><def_stmt>_quick_list_to_theme self index<block_start><return>('Packages/Boxy Theme/extras/PlainTasks/%s.hidden-tmTheme'%self.quick_list[index])<block_end><def_stmt>run self<block_start>self.display_list(THEMES)<block_end><block_end><class_stmt>BoxyPlainNotesCommand(sublime_plugin.WindowCommand)<block_start><def_stmt>display_list self themes<block_start>self.themes=themes<line_sep>self.initial_theme=get_theme(PLAIN_NOTES)<line_sep>quick_list=[theme<for>theme self.themes]<line_sep>self.quick_list=quick_list<line_sep>self.window.show_quick_panel(quick_list self.on_done on_highlight=self.on_highlighted)<block_end><def_stmt>on_highlighted self index<block_start>set_theme(PLAIN_NOTES self._quick_list_to_theme(index))<block_end><def_stmt>on_done self index<block_start><if_stmt>index<is>NO_SELECTION<block_start>revert_theme(PLAIN_NOTES self.initial_theme)<line_sep><return><block_end>activate_theme(PLAIN_NOTES self._quick_list_to_theme(index))<block_end><def_stmt>_quick_list_to_theme self index<block_start><return>('Packages/Boxy Theme/schemes/%s.tmTheme'%self.quick_list[index])<block_end><def_stmt>run self<block_start>self.display_list(THEMES)<block_end><block_end>
""" Jax integration. Importing this module registers the Jax backend with `phi.math`. Without this, Jax tensors cannot be handled by `phi.math` functions. To make Jax the default backend, import `phi.jax.flow`. """<import_from_stmt>phi math<as>_math<import_from_stmt>._jax_backend JaxBackend<as>_JaxBackend<line_sep>JAX=_JaxBackend()<line_sep>"""Backend for Jax operations."""<line_sep>_math.backend.BACKENDS.append(JAX)<line_sep>__all__=[key<for>key globals().keys()<if><not>key.startswith('_')]<line_sep>
<import_stmt>numpy<as>np<import_stmt>cv2<import_stmt>carla<import_from_stmt>camera.parameters CameraParams IntrinsicParams ExtrinsicParams<import_from_stmt>camera.coordinate_transformation CoordinateTransformation rotationMatrix3D<def_stmt>rad_lim rad<block_start><while_stmt>(rad<g>np.pi)<block_start>rad<augsub>(2<times>np.pi)<block_end><while_stmt>(rad<l>-np.pi)<block_start>rad<augadd>(2<times>np.pi)<block_end><return>rad<block_end><def_stmt>getLinearPose pose1 pose2 min_dist<block_start>x1,x2=pose1.location.x pose2.location.x<line_sep>y1,y2=pose1.location.y pose2.location.y<line_sep>z1,z2=pose1.location.z pose2.location.z<line_sep>roll1,roll2=np.deg2rad(pose1.rotation.roll) np.deg2rad(pose2.rotation.roll)<line_sep>pitch1,pitch2,=np.deg2rad(pose1.rotation.pitch) np.deg2rad(pose2.rotation.pitch)<line_sep>yaw1,yaw2,=np.deg2rad(pose1.rotation.yaw) np.deg2rad(pose2.rotation.yaw)<line_sep>distance=pose1.location.distance(pose2.location)<line_sep>total=int(distance/min_dist)<line_sep>result_list=[]<line_sep>tt=np.arange(total)/total<line_sep>x,y,z=tt<times>x2+(1-tt)<times>x1 tt<times>y2+(1-tt)<times>y1 tt<times>z2+(1-tt)<times>z1<line_sep>roll=np.rad2deg(rad_lim(roll2-roll1)<times>tt+roll1)<line_sep>pitch=np.rad2deg(rad_lim(pitch2-pitch1)<times>tt+pitch1)<line_sep>yaw=np.rad2deg(rad_lim(yaw2-yaw1)<times>tt+yaw1)<for_stmt>i range(total)<block_start>location=carla.Location(x=x[i] y=y[i] z=z[i])<line_sep>rotation=carla.Rotation(roll=roll[i] pitch=pitch[i] yaw=yaw[i])<line_sep>result_list.append(carla.Transform(location rotation))<block_end><return>result_list<block_end><class_stmt>CollectPerspectiveImage(object)<block_start><def_stmt>__init__ self param sensor<block_start>self.longitudinal_sample_number_near=param.longitudinal_sample_number_near<line_sep>self.longitudinal_sample_number_far=param.longitudinal_sample_number_far<line_sep>self.vehicle_half_width=param.vehicle_width/2<line_sep>self.lateral_step_factor=param.lateral_step_factor<line_sep>self.lateral_sample_array=np.linspace(-self.vehicle_half_width self.vehicle_half_width param.lateral_sample_number)<line_sep>self.sensor=sensor<line_sep>self.camera_params=CameraParams(IntrinsicParams(sensor) ExtrinsicParams(sensor))<line_sep>self.img_width=eval(sensor.attributes['image_size_x'])<line_sep>self.img_height=eval(sensor.attributes['image_size_y'])<line_sep>self.max_pixel=np.array([self.img_height self.img_width]).reshape([2 1])<line_sep>self.min_pixel=np.zeros((2 1))<line_sep>self.empty_image=np.zeros((self.img_height<floordiv>2 self.img_width<floordiv>2) dtype=np.dtype("uint8"))<block_end><def_stmt>data_augmentation self traj_pose_list<block_start>result_list=[]<for_stmt>i range(len(traj_pose_list)-1)<block_start>p1=traj_pose_list[i][1]<line_sep>p2=traj_pose_list[i+1][1]<if_stmt>float(i)/len(traj_pose_list)<l>0.4<block_start>min_dist=0.04<block_end><elif_stmt>float(i)/len(traj_pose_list)<l>0.6<block_start>min_dist=0.08<block_end><else_stmt><block_start>min_dist=0.12<block_end>result_list.extend(getLinearPose(p1 p2 min_dist))<block_end><return>result_list<block_end><def_stmt>drawDestInImage self dest_vec location rotation<block_start>empty_image=np.zeros((self.img_height<floordiv>2 self.img_width<floordiv>2 3) dtype=np.dtype("uint8"))<line_sep>R=rotationMatrix3D(np.deg2rad(rotation[2]) np.deg2rad(rotation[0]) np.deg2rad(rotation[1]))<line_sep>t=location.reshape(3 1)<line_sep>vehicle_vec=CoordinateTransformation.world3DToCamera3D(dest_vec R t)<line_sep>pixel_vec=CoordinateTransformation.world3DToImage2D(vehicle_vec self.camera_params.K self.camera_params.R self.camera_params.t)<line_sep>pixel_vec=pixel_vec[::-1 :]<line_sep>x_pixel=pixel_vec.astype(int)[0 0]<line_sep>y_pixel=pixel_vec.astype(int)[1 0]<line_sep>#print(dest_vec,pixel_vec) x_pixel=np.clip(x_pixel 10 self.img_height-10)<line_sep>y_pixel=np.clip(y_pixel 10 self.img_width-10)<line_sep>x_pixel,y_pixel=np.meshgrid(np.arange(max(0 x_pixel<floordiv>2-5) min(self.img_height<floordiv>2-1 x_pixel<floordiv>2+5)) np.arange(max(0 y_pixel<floordiv>2-5) min(self.img_width<floordiv>2-1 y_pixel<floordiv>2+5)) indexing='ij')<line_sep>empty_image[x_pixel y_pixel 2]=255<line_sep><return>cv2.resize(empty_image (self.img_width self.img_height) interpolation=cv2.INTER_CUBIC)<block_end><def_stmt>drawLineInImage self traj_pose vehicle_transform#traj_position = traj_pose.location <block_start>traj_vec=np.array([traj_pose.location.x traj_pose.location.y traj_pose.location.z]).reshape(3 1)<line_sep>rotation=vehicle_transform.rotation<line_sep>location=vehicle_transform.location<line_sep>R=rotationMatrix3D(np.deg2rad(rotation.roll) np.deg2rad(rotation.pitch) np.deg2rad(rotation.yaw))<line_sep>t=np.array([location.x location.y location.z]).reshape(3 1)<line_sep># along lateral theta=np.deg2rad(traj_pose.rotation.yaw+90)<line_sep>start_vec=np.array([self.vehicle_half_width<times>np.cos(theta) self.vehicle_half_width<times>np.sin(theta) 0]).reshape(3 1)+traj_vec<line_sep>start_vehicle_vec=CoordinateTransformation.world3DToCamera3D(start_vec R t)<line_sep>start_pixel_vec=CoordinateTransformation.world3DToImage2D(start_vehicle_vec self.camera_params.K self.camera_params.R self.camera_params.t)<line_sep>start_pixel_vec=start_pixel_vec[::-1 :]<line_sep>theta=np.deg2rad(traj_pose.rotation.yaw-90)<line_sep>end_vec=np.array([self.vehicle_half_width<times>np.cos(theta) self.vehicle_half_width<times>np.sin(theta) 0]).reshape(3 1)+traj_vec<line_sep>end_vehicle_vec=CoordinateTransformation.world3DToCamera3D(end_vec R t)<line_sep>end_pixel_vec=CoordinateTransformation.world3DToImage2D(end_vehicle_vec self.camera_params.K self.camera_params.R self.camera_params.t)<line_sep>end_pixel_vec=end_pixel_vec[::-1 :]<line_sep>flag1=(start_pixel_vec<ge>self.min_pixel).all()<and>(start_pixel_vec<l>self.max_pixel).all()<line_sep>flag2=(end_pixel_vec<ge>self.min_pixel).all()<and>(end_pixel_vec<l>self.max_pixel).all()<if_stmt><not>flag1<and><not>flag2<block_start><return><block_end>length=np.linalg.norm(end_pixel_vec-start_pixel_vec)<line_sep>direction=(end_pixel_vec-start_pixel_vec)/length<line_sep>lateral_sample_number=round(length/self.lateral_step_factor)+1<line_sep>distance_array=np.linspace(0 length lateral_sample_number)<line_sep>pixel_vec=start_pixel_vec+distance_array<times>direction<line_sep>x_pixel=pixel_vec.astype(int)[0]<line_sep>y_pixel=pixel_vec.astype(int)[1]<line_sep>mask=np.where((x_pixel<ge>0)&(x_pixel<l>self.img_height))[0]<line_sep>x_pixel=x_pixel[mask]<line_sep>y_pixel=y_pixel[mask]<line_sep>mask=np.where((y_pixel<ge>0)&(y_pixel<l>self.img_width))[0]<line_sep>x_pixel=x_pixel[mask]<line_sep>y_pixel=y_pixel[mask]<line_sep>self.empty_image[x_pixel<floordiv>2 y_pixel<floordiv>2]=255<line_sep>self.empty_image[np.clip(x_pixel<floordiv>2+1 0 self.img_height<floordiv>2-1) y_pixel<floordiv>2]=255<line_sep>self.empty_image[np.max(x_pixel<floordiv>2-1 0) y_pixel<floordiv>2]=255<line_sep><return><block_end><def_stmt>getPM self traj_pose_list vehicle_transform<block_start>self.empty_image=np.zeros((self.img_height<floordiv>2 self.img_width<floordiv>2) dtype=np.dtype("uint8"))<line_sep>aug_traj_pose_list=self.data_augmentation(traj_pose_list)<for_stmt>traj_pose aug_traj_pose_list<block_start>self.drawLineInImage(traj_pose vehicle_transform)<block_end>kernel=np.ones((5 5 ) np.uint8)<line_sep>self.empty_image=cv2.dilate(self.empty_image kernel iterations=1)<line_sep>self.empty_image=cv2.erode(self.empty_image kernel iterations=1)<line_sep><return>cv2.resize(self.empty_image (self.img_width self.img_height) interpolation=cv2.INTER_CUBIC)<block_end><block_end><class_stmt>InversePerspectiveMapping(object)<block_start><def_stmt>__init__ self param sensor<block_start>self.sensor=sensor<line_sep>self.camera_params=CameraParams(IntrinsicParams(sensor) ExtrinsicParams(sensor))<line_sep>self.img_width=400<line_sep>self.img_height=200<line_sep>self.empty_image=np.zeros((self.img_height self.img_width) dtype=np.uint8)<line_sep>self.longutudinal_length=param.longitudinal_length<line_sep>self.ksize=param.ksize<line_sep>f=float(self.img_height)/self.longutudinal_length<line_sep>self.pesudo_K=np.array([[f 0 self.img_width/2] [0 f self.img_height] [0 0 1]])<line_sep>self.reverseXY=rotationMatrix3D(0 0 -np.pi/2)<block_end><def_stmt>getIPM self image<block_start>self.empty_image=np.zeros((self.img_height self.img_width) dtype=np.uint8)<line_sep>index_array=np.argwhere(image<g>200)<line_sep>index_array=index_array[: :2]<line_sep>index_array=np.unique(index_array axis=0)<line_sep>index_array=np.array([index_array[: 1] index_array[: 0]])<line_sep>vehicle_vec=CoordinateTransformation.image2DToWorld3D2(index_array self.camera_params.K self.camera_params.R self.camera_params.t)<line_sep>vehicle_vec[: 2 0]=1.0<line_sep>temp=np.dot(self.pesudo_K self.reverseXY)<line_sep>vehicle_vec=np.squeeze(vehicle_vec axis=2)<line_sep>new_image_vec=np.dot(temp vehicle_vec.T)<line_sep>new_image_vec=new_image_vec[:2 :]<line_sep>new_image_vec=new_image_vec[::-1 :]<line_sep>new_image_y_pixel=new_image_vec[0 :].astype(int)<line_sep>new_image_x_pixel=new_image_vec[1 :].astype(int)<line_sep>#self.empty_image[new_image_y_pixel, new_image_x_pixel] = 255 mask=np.where((new_image_x_pixel<ge>0)&(new_image_x_pixel<l>self.img_width))[0]<line_sep>new_image_x_pixel=new_image_x_pixel[mask]<line_sep>new_image_y_pixel=new_image_y_pixel[mask]<line_sep>mask=np.where((new_image_y_pixel<ge>0)&(new_image_y_pixel<l>self.img_height))[0]<line_sep>new_image_x_pixel=new_image_x_pixel[mask]<line_sep>new_image_y_pixel=new_image_y_pixel[mask]<line_sep>self.empty_image[new_image_y_pixel new_image_x_pixel]=255<line_sep>self.empty_image[np.clip(new_image_y_pixel+1 0 self.img_height-1) new_image_x_pixel]=255<line_sep>self.empty_image[np.clip(new_image_y_pixel-1 0 self.img_height-1) new_image_x_pixel]=255<line_sep>#self.empty_image = cv2.GaussianBlur(self.empty_image, (self.ksize, self.ksize), 25) <return>self.empty_image<block_end><def_stmt>get_cost_map self ipm lidar<block_start>lidar=-lidar<line_sep>mask=np.where((lidar[: 0]<g>1.2)|(lidar[: 0]<l>-1.2)|(lidar[: 1]<g>2.0)|(lidar[: 1]<l>-4.0))[0]<line_sep>lidar=lidar[mask :]<line_sep>mask=np.where(lidar[: 2]<g>-1.95)[0]<line_sep>lidar=lidar[mask :]<line_sep>img2=np.zeros((self.img_height self.img_width) np.uint8)<line_sep>img2.fill(255)<line_sep>pixel_per_meter=float(self.img_height)/self.longutudinal_length<line_sep>u=(self.img_height-lidar[: 1]<times>pixel_per_meter).astype(int)<line_sep>v=(-lidar[: 0]<times>pixel_per_meter+self.img_width<floordiv>2).astype(int)<line_sep>mask=np.where((u<ge>0)&(u<l>self.img_height))[0]<line_sep>u=u[mask]<line_sep>v=v[mask]<line_sep>mask=np.where((v<ge>0)&(v<l>self.img_width))[0]<line_sep>u=u[mask]<line_sep>v=v[mask]<line_sep>img2[u v]=0<line_sep>#print(u,v) kernel=np.ones((17 17) np.uint8)<line_sep>img2=cv2.erode(img2 kernel iterations=1)<line_sep>kernel_size=(3 3)<line_sep>img=cv2.dilate(ipm kernel_size iterations=3)<line_sep>img=cv2.addWeighted(img 0.5 img2 0.5 0)<line_sep>mask=np.where((img2<l>50))<line_sep>u=mask[0]<line_sep>v=mask[1]<line_sep>img[u v]=0<line_sep>#kernel_size = (17, 17) #kernel_size = (9, 9) #sigma = 9#21 #img = cv2.GaussianBlur(img, kernel_size, sigma) <return>img<block_end><block_end>
# Implementation of SoftTriple Loss <import_stmt>math<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_from_stmt>torch.nn.parameter Parameter<import_from_stmt>torch.nn init<class_stmt>SoftTriple(nn.Module)<block_start><def_stmt>__init__ self la gamma tau margin dim cN K<block_start>super(SoftTriple self).__init__()<line_sep>self.la=la<line_sep>self.gamma=1./gamma<line_sep>self.tau=tau<line_sep>self.margin=margin<line_sep>self.cN=cN<line_sep>self.K=K<line_sep>self.fc=Parameter(torch.Tensor(dim cN<times>K))<line_sep>self.weight=torch.zeros(cN<times>K cN<times>K dtype=torch.bool).cuda()<for_stmt>i range(0 cN)<block_start><for_stmt>j range(0 K)<block_start>self.weight[i<times>K+j i<times>K+j+1:(i+1)<times>K]=1<block_end><block_end>init.kaiming_uniform_(self.fc a=math.sqrt(5))<line_sep><return><block_end><def_stmt>forward self input target<block_start>centers=F.normalize(self.fc p=2 dim=0)<line_sep>simInd=input.matmul(centers)<line_sep>simStruc=simInd.reshape(-1 self.cN self.K)<line_sep>prob=F.softmax(simStruc<times>self.gamma dim=2)<line_sep>simClass=torch.sum(prob<times>simStruc dim=2)<line_sep>marginM=torch.zeros(simClass.shape).cuda()<line_sep>marginM[torch.arange(0 marginM.shape[0]) target]=self.margin<line_sep>lossClassify=F.cross_entropy(self.la<times>(simClass-marginM) target)<if_stmt>self.tau<g>0<and>self.K<g>1<block_start>simCenter=centers.t().matmul(centers)<line_sep>reg=torch.sum(torch.sqrt(2.0+1e-5-2.<times>simCenter[self.weight]))/(self.cN<times>self.K<times>(self.K-1.))<line_sep><return>lossClassify+self.tau<times>reg<block_end><else_stmt><block_start><return>lossClassify<block_end><block_end><block_end>
<import_from_stmt>pandas timedelta_range<import_stmt>pandas._testing<as>tm<class_stmt>TestPickle<block_start><def_stmt>test_pickle_after_set_freq self<block_start>tdi=timedelta_range("1 day" periods=4 freq="s")<line_sep>tdi=tdi._with_freq(<none>)<line_sep>res=tm.round_trip_pickle(tdi)<line_sep>tm.assert_index_equal(res tdi)<block_end><block_end>
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) <import_from_stmt>spack *<class_stmt>Ply(AutotoolsPackage)<block_start>"""A light-weight dynamic tracer for Linux that leverages the kernel's BPF VM in concert with kprobes and tracepoints to attach probes to arbitrary points in the kernel."""<line_sep>homepage="https://github.com/iovisor/ply"<line_sep>git="https://github.com/iovisor/ply.git"<line_sep>version('2.1.1' commit='<PASSWORD>')<line_sep>depends_on('autoconf' type='build')<line_sep>depends_on('automake' type='build')<line_sep>depends_on('libtool' type='build')<line_sep>depends_on('m4' type='build')<def_stmt>autoreconf self spec prefix<block_start>bash=which("bash")<line_sep>bash('./autogen.sh')<block_end><block_end>
# Copyright 2017 Google LLC All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. <import_stmt>datetime<import_stmt>mock<import_stmt>pytest<def_stmt>_make_base_query *args **kwargs<block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<line_sep><return>BaseQuery(*args **kwargs)<block_end><def_stmt>_make_base_query_all_fields limit=9876 offset=12 skip_fields=() parent=<none> all_descendants=<true> <block_start>kwargs={"projection":mock.sentinel.projection "field_filters":mock.sentinel.filters "orders":mock.sentinel.orders "limit":limit "offset":offset "start_at":mock.sentinel.start_at "end_at":mock.sentinel.end_at "all_descendants":all_descendants }<for_stmt>field skip_fields<block_start>kwargs.pop(field)<block_end><if_stmt>parent<is><none><block_start>parent=mock.sentinel.parent<block_end><return>_make_base_query(parent **kwargs)<block_end><def_stmt>test_basequery_constructor_defaults <block_start>query=_make_base_query(mock.sentinel.parent)<assert_stmt>query._parent<is>mock.sentinel.parent<assert_stmt>query._projection<is><none><assert_stmt>query._field_filters<eq>()<assert_stmt>query._orders<eq>()<assert_stmt>query._limit<is><none><assert_stmt>query._offset<is><none><assert_stmt>query._start_at<is><none><assert_stmt>query._end_at<is><none><assert_stmt><not>query._all_descendants<block_end><def_stmt>test_basequery_constructor_explicit <block_start>limit=234<line_sep>offset=56<line_sep>query=_make_base_query_all_fields(limit=limit offset=offset)<assert_stmt>query._parent<is>mock.sentinel.parent<assert_stmt>query._projection<is>mock.sentinel.projection<assert_stmt>query._field_filters<is>mock.sentinel.filters<assert_stmt>query._orders<eq>mock.sentinel.orders<assert_stmt>query._limit<eq>limit<assert_stmt>query._offset<eq>offset<assert_stmt>query._start_at<is>mock.sentinel.start_at<assert_stmt>query._end_at<is>mock.sentinel.end_at<assert_stmt>query._all_descendants<block_end><def_stmt>test_basequery__client_property <block_start>parent=mock.Mock(_client=mock.sentinel.client spec=["_client"])<line_sep>query=_make_base_query(parent)<assert_stmt>query._client<is>mock.sentinel.client<block_end><def_stmt>test_basequery___eq___other_type <block_start>query=_make_base_query_all_fields()<line_sep>other=object()<assert_stmt><not>(query<eq>other)<block_end><def_stmt>test_basequery___eq___different_parent <block_start>parent=mock.sentinel.parent<line_sep>other_parent=mock.sentinel.other_parent<line_sep>query=_make_base_query_all_fields(parent=parent)<line_sep>other=_make_base_query_all_fields(parent=other_parent)<assert_stmt><not>(query<eq>other)<block_end><def_stmt>test_basequery___eq___different_projection <block_start>parent=mock.sentinel.parent<line_sep>query=_make_base_query_all_fields(parent=parent skip_fields=("projection" ))<line_sep>query._projection=mock.sentinel.projection<line_sep>other=_make_base_query_all_fields(parent=parent skip_fields=("projection" ))<line_sep>other._projection=mock.sentinel.other_projection<assert_stmt><not>(query<eq>other)<block_end><def_stmt>test_basequery___eq___different_field_filters <block_start>parent=mock.sentinel.parent<line_sep>query=_make_base_query_all_fields(parent=parent skip_fields=("field_filters" ))<line_sep>query._field_filters=mock.sentinel.field_filters<line_sep>other=_make_base_query_all_fields(parent=parent skip_fields=("field_filters" ))<line_sep>other._field_filters=mock.sentinel.other_field_filters<assert_stmt><not>(query<eq>other)<block_end><def_stmt>test_basequery___eq___different_orders <block_start>parent=mock.sentinel.parent<line_sep>query=_make_base_query_all_fields(parent=parent skip_fields=("orders" ))<line_sep>query._orders=mock.sentinel.orders<line_sep>other=_make_base_query_all_fields(parent=parent skip_fields=("orders" ))<line_sep>other._orders=mock.sentinel.other_orders<assert_stmt><not>(query<eq>other)<block_end><def_stmt>test_basequery___eq___different_limit <block_start>parent=mock.sentinel.parent<line_sep>query=_make_base_query_all_fields(parent=parent limit=10)<line_sep>other=_make_base_query_all_fields(parent=parent limit=20)<assert_stmt><not>(query<eq>other)<block_end><def_stmt>test_basequery___eq___different_offset <block_start>parent=mock.sentinel.parent<line_sep>query=_make_base_query_all_fields(parent=parent offset=10)<line_sep>other=_make_base_query_all_fields(parent=parent offset=20)<assert_stmt><not>(query<eq>other)<block_end><def_stmt>test_basequery___eq___different_start_at <block_start>parent=mock.sentinel.parent<line_sep>query=_make_base_query_all_fields(parent=parent skip_fields=("start_at" ))<line_sep>query._start_at=mock.sentinel.start_at<line_sep>other=_make_base_query_all_fields(parent=parent skip_fields=("start_at" ))<line_sep>other._start_at=mock.sentinel.other_start_at<assert_stmt><not>(query<eq>other)<block_end><def_stmt>test_basequery___eq___different_end_at <block_start>parent=mock.sentinel.parent<line_sep>query=_make_base_query_all_fields(parent=parent skip_fields=("end_at" ))<line_sep>query._end_at=mock.sentinel.end_at<line_sep>other=_make_base_query_all_fields(parent=parent skip_fields=("end_at" ))<line_sep>other._end_at=mock.sentinel.other_end_at<assert_stmt><not>(query<eq>other)<block_end><def_stmt>test_basequery___eq___different_all_descendants <block_start>parent=mock.sentinel.parent<line_sep>query=_make_base_query_all_fields(parent=parent all_descendants=<true>)<line_sep>other=_make_base_query_all_fields(parent=parent all_descendants=<false>)<assert_stmt><not>(query<eq>other)<block_end><def_stmt>test_basequery___eq___hit <block_start>query=_make_base_query_all_fields()<line_sep>other=_make_base_query_all_fields()<assert_stmt>query<eq>other<block_end><def_stmt>_compare_queries query1 query2 *attr_names<block_start>attrs1=query1.__dict__.copy()<line_sep>attrs2=query2.__dict__.copy()<assert_stmt>len(attrs1)<eq>len(attrs2)<line_sep># The only different should be in ``attr_name``. <for_stmt>attr_name attr_names<block_start>attrs1.pop(attr_name)<line_sep>attrs2.pop(attr_name)<block_end><for_stmt>key,value attrs1.items()<block_start><assert_stmt>value<is>attrs2[key]<block_end><block_end><def_stmt>test_basequery_select_invalid_path <block_start>query=_make_base_query(mock.sentinel.parent)<with_stmt>pytest.raises(ValueError)<block_start>query.select(["*"])<block_end><block_end><def_stmt>test_basequery_select <block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<line_sep>query1=_make_base_query_all_fields(all_descendants=<true>)<line_sep>field_paths2=["foo" "bar"]<line_sep>query2=query1.select(field_paths2)<assert_stmt>query2<is><not>query1<assert_stmt>isinstance(query2 BaseQuery)<assert_stmt>query2._projection<eq>_make_projection_for_select(field_paths2)<line_sep>_compare_queries(query1 query2 "_projection")<line_sep># Make sure it overrides. field_paths3=["foo.baz"]<line_sep>query3=query2.select(field_paths3)<assert_stmt>query3<is><not>query2<assert_stmt>isinstance(query3 BaseQuery)<assert_stmt>query3._projection<eq>_make_projection_for_select(field_paths3)<line_sep>_compare_queries(query2 query3 "_projection")<block_end><def_stmt>test_basequery_where_invalid_path <block_start>query=_make_base_query(mock.sentinel.parent)<with_stmt>pytest.raises(ValueError)<block_start>query.where("*" "==" 1)<block_end><block_end><def_stmt>test_basequery_where <block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.types document<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>query_inst=_make_base_query_all_fields(skip_fields=("field_filters" ) all_descendants=<true>)<line_sep>new_query=query_inst.where("power.level" ">" 9000)<assert_stmt>query_inst<is><not>new_query<assert_stmt>isinstance(new_query BaseQuery)<assert_stmt>len(new_query._field_filters)<eq>1<line_sep>field_pb=new_query._field_filters[0]<line_sep>expected_pb=query.StructuredQuery.FieldFilter(field=query.StructuredQuery.FieldReference(field_path="power.level") op=StructuredQuery.FieldFilter.Operator.GREATER_THAN value=document.Value(integer_value=9000) )<assert_stmt>field_pb<eq>expected_pb<line_sep>_compare_queries(query_inst new_query "_field_filters")<block_end><def_stmt>_where_unary_helper value op_enum op_string="=="<block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<line_sep>query_inst=_make_base_query_all_fields(skip_fields=("field_filters" ))<line_sep>field_path="feeeld"<line_sep>new_query=query_inst.where(field_path op_string value)<assert_stmt>query_inst<is><not>new_query<assert_stmt>isinstance(new_query BaseQuery)<assert_stmt>len(new_query._field_filters)<eq>1<line_sep>field_pb=new_query._field_filters[0]<line_sep>expected_pb=StructuredQuery.UnaryFilter(field=StructuredQuery.FieldReference(field_path=field_path) op=op_enum)<assert_stmt>field_pb<eq>expected_pb<line_sep>_compare_queries(query_inst new_query "_field_filters")<block_end><def_stmt>test_basequery_where_eq_null <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<line_sep>op_enum=StructuredQuery.UnaryFilter.Operator.IS_NULL<line_sep>_where_unary_helper(<none> op_enum)<block_end><def_stmt>test_basequery_where_gt_null <block_start><with_stmt>pytest.raises(ValueError)<block_start>_where_unary_helper(<none> 0 op_string=">")<block_end><block_end><def_stmt>test_basequery_where_eq_nan <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<line_sep>op_enum=StructuredQuery.UnaryFilter.Operator.IS_NAN<line_sep>_where_unary_helper(float("nan") op_enum)<block_end><def_stmt>test_basequery_where_le_nan <block_start><with_stmt>pytest.raises(ValueError)<block_start>_where_unary_helper(float("nan") 0 op_string="<=")<block_end><block_end><def_stmt>test_basequery_where_w_delete <block_start><import_from_stmt>google.cloud.firestore_v1 DELETE_FIELD<with_stmt>pytest.raises(ValueError)<block_start>_where_unary_helper(DELETE_FIELD 0)<block_end><block_end><def_stmt>test_basequery_where_w_server_timestamp <block_start><import_from_stmt>google.cloud.firestore_v1 SERVER_TIMESTAMP<with_stmt>pytest.raises(ValueError)<block_start>_where_unary_helper(SERVER_TIMESTAMP 0)<block_end><block_end><def_stmt>test_basequery_where_w_array_remove <block_start><import_from_stmt>google.cloud.firestore_v1 ArrayRemove<with_stmt>pytest.raises(ValueError)<block_start>_where_unary_helper(ArrayRemove([1 3 5]) 0)<block_end><block_end><def_stmt>test_basequery_where_w_array_union <block_start><import_from_stmt>google.cloud.firestore_v1 ArrayUnion<with_stmt>pytest.raises(ValueError)<block_start>_where_unary_helper(ArrayUnion([2 4 8]) 0)<block_end><block_end><def_stmt>test_basequery_order_by_invalid_path <block_start>query=_make_base_query(mock.sentinel.parent)<with_stmt>pytest.raises(ValueError)<block_start>query.order_by("*")<block_end><block_end><def_stmt>test_basequery_order_by <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<line_sep>query1=_make_base_query_all_fields(skip_fields=("orders" ) all_descendants=<true>)<line_sep>field_path2="a"<line_sep>query2=query1.order_by(field_path2)<assert_stmt>query2<is><not>query1<assert_stmt>isinstance(query2 BaseQuery)<line_sep>order=_make_order_pb(field_path2 StructuredQuery.Direction.ASCENDING)<assert_stmt>query2._orders<eq>(order )<line_sep>_compare_queries(query1 query2 "_orders")<line_sep># Make sure it appends to the orders. field_path3="b"<line_sep>query3=query2.order_by(field_path3 direction=BaseQuery.DESCENDING)<assert_stmt>query3<is><not>query2<assert_stmt>isinstance(query3 BaseQuery)<line_sep>order_pb3=_make_order_pb(field_path3 StructuredQuery.Direction.DESCENDING)<assert_stmt>query3._orders<eq>(order order_pb3)<line_sep>_compare_queries(query2 query3 "_orders")<block_end><def_stmt>test_basequery_limit <block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<line_sep>query1=_make_base_query_all_fields(all_descendants=<true>)<line_sep>limit2=100<line_sep>query2=query1.limit(limit2)<assert_stmt><not>query2._limit_to_last<assert_stmt>query2<is><not>query1<assert_stmt>isinstance(query2 BaseQuery)<assert_stmt>query2._limit<eq>limit2<line_sep>_compare_queries(query1 query2 "_limit")<line_sep># Make sure it overrides. limit3=10<line_sep>query3=query2.limit(limit3)<assert_stmt>query3<is><not>query2<assert_stmt>isinstance(query3 BaseQuery)<assert_stmt>query3._limit<eq>limit3<line_sep>_compare_queries(query2 query3 "_limit")<block_end><def_stmt>test_basequery_limit_to_last <block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<line_sep>query1=_make_base_query_all_fields(all_descendants=<true>)<line_sep>limit2=100<line_sep>query2=query1.limit_to_last(limit2)<assert_stmt>query2._limit_to_last<assert_stmt>query2<is><not>query1<assert_stmt>isinstance(query2 BaseQuery)<assert_stmt>query2._limit<eq>limit2<line_sep>_compare_queries(query1 query2 "_limit" "_limit_to_last")<line_sep># Make sure it overrides. limit3=10<line_sep>query3=query2.limit(limit3)<assert_stmt>query3<is><not>query2<assert_stmt>isinstance(query3 BaseQuery)<assert_stmt>query3._limit<eq>limit3<line_sep>_compare_queries(query2 query3 "_limit" "_limit_to_last")<block_end><def_stmt>test_basequery__resolve_chunk_size # With a global limit <block_start>query=_make_client().collection("asdf").limit(5)<assert_stmt>query._resolve_chunk_size(3 10)<eq>2<assert_stmt>query._resolve_chunk_size(3 1)<eq>1<assert_stmt>query._resolve_chunk_size(3 2)<eq>2<line_sep># With no limit query=_make_client().collection("asdf")._query()<assert_stmt>query._resolve_chunk_size(3 10)<eq>10<assert_stmt>query._resolve_chunk_size(3 1)<eq>1<assert_stmt>query._resolve_chunk_size(3 2)<eq>2<block_end><def_stmt>test_basequery_offset <block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<line_sep>query1=_make_base_query_all_fields(all_descendants=<true>)<line_sep>offset2=23<line_sep>query2=query1.offset(offset2)<assert_stmt>query2<is><not>query1<assert_stmt>isinstance(query2 BaseQuery)<assert_stmt>query2._offset<eq>offset2<line_sep>_compare_queries(query1 query2 "_offset")<line_sep># Make sure it overrides. offset3=35<line_sep>query3=query2.offset(offset3)<assert_stmt>query3<is><not>query2<assert_stmt>isinstance(query3 BaseQuery)<assert_stmt>query3._offset<eq>offset3<line_sep>_compare_queries(query2 query3 "_offset")<block_end><def_stmt>test_basequery__cursor_helper_w_dict <block_start>values={"a":7 "b":"foo"}<line_sep>query1=_make_base_query(mock.sentinel.parent)<line_sep>query1._all_descendants=<true><line_sep>query2=query1._cursor_helper(values <true> <true>)<assert_stmt>query2._parent<is>mock.sentinel.parent<assert_stmt>query2._projection<is><none><assert_stmt>query2._field_filters<eq>()<assert_stmt>query2._orders<eq>query1._orders<assert_stmt>query2._limit<is><none><assert_stmt>query2._offset<is><none><assert_stmt>query2._end_at<is><none><assert_stmt>query2._all_descendants<line_sep>cursor,before=query2._start_at<assert_stmt>cursor<eq>values<assert_stmt>before<block_end><def_stmt>test_basequery__cursor_helper_w_tuple <block_start>values=(7 "foo")<line_sep>query1=_make_base_query(mock.sentinel.parent)<line_sep>query2=query1._cursor_helper(values <false> <true>)<assert_stmt>query2._parent<is>mock.sentinel.parent<assert_stmt>query2._projection<is><none><assert_stmt>query2._field_filters<eq>()<assert_stmt>query2._orders<eq>query1._orders<assert_stmt>query2._limit<is><none><assert_stmt>query2._offset<is><none><assert_stmt>query2._end_at<is><none><line_sep>cursor,before=query2._start_at<assert_stmt>cursor<eq>list(values)<assert_stmt><not>before<block_end><def_stmt>test_basequery__cursor_helper_w_list <block_start>values=[7 "foo"]<line_sep>query1=_make_base_query(mock.sentinel.parent)<line_sep>query2=query1._cursor_helper(values <true> <false>)<assert_stmt>query2._parent<is>mock.sentinel.parent<assert_stmt>query2._projection<is><none><assert_stmt>query2._field_filters<eq>()<assert_stmt>query2._orders<eq>query1._orders<assert_stmt>query2._limit<is><none><assert_stmt>query2._offset<is><none><assert_stmt>query2._start_at<is><none><line_sep>cursor,before=query2._end_at<assert_stmt>cursor<eq>values<assert_stmt>cursor<eq>values<assert_stmt>before<block_end><def_stmt>test_basequery__cursor_helper_w_snapshot_wrong_collection <block_start>values={"a":7 "b":"foo"}<line_sep>docref=_make_docref("there" "doc_id")<line_sep>snapshot=_make_snapshot(docref values)<line_sep>collection=_make_collection("here")<line_sep>query=_make_base_query(collection)<with_stmt>pytest.raises(ValueError)<block_start>query._cursor_helper(snapshot <false> <false>)<block_end><block_end><def_stmt>test_basequery__cursor_helper_w_snapshot_other_collection_all_descendants <block_start>values={"a":7 "b":"foo"}<line_sep>docref=_make_docref("there" "doc_id")<line_sep>snapshot=_make_snapshot(docref values)<line_sep>collection=_make_collection("here")<line_sep>query1=_make_base_query(collection all_descendants=<true>)<line_sep>query2=query1._cursor_helper(snapshot <false> <false>)<assert_stmt>query2._parent<is>collection<assert_stmt>query2._projection<is><none><assert_stmt>query2._field_filters<eq>()<assert_stmt>query2._orders<eq>()<assert_stmt>query2._limit<is><none><assert_stmt>query2._offset<is><none><assert_stmt>query2._start_at<is><none><line_sep>cursor,before=query2._end_at<assert_stmt>cursor<is>snapshot<assert_stmt><not>before<block_end><def_stmt>test_basequery__cursor_helper_w_snapshot <block_start>values={"a":7 "b":"foo"}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>snapshot=_make_snapshot(docref values)<line_sep>collection=_make_collection("here")<line_sep>query1=_make_base_query(collection)<line_sep>query2=query1._cursor_helper(snapshot <false> <false>)<assert_stmt>query2._parent<is>collection<assert_stmt>query2._projection<is><none><assert_stmt>query2._field_filters<eq>()<assert_stmt>query2._orders<eq>()<assert_stmt>query2._limit<is><none><assert_stmt>query2._offset<is><none><assert_stmt>query2._start_at<is><none><line_sep>cursor,before=query2._end_at<assert_stmt>cursor<is>snapshot<assert_stmt><not>before<block_end><def_stmt>test_basequery_start_at <block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<line_sep>collection=_make_collection("here")<line_sep>query1=_make_base_query_all_fields(parent=collection skip_fields=("orders" ) all_descendants=<true>)<line_sep>query2=query1.order_by("hi")<line_sep>document_fields3={"hi":"mom"}<line_sep>query3=query2.start_at(document_fields3)<assert_stmt>query3<is><not>query2<assert_stmt>isinstance(query3 BaseQuery)<assert_stmt>query3._start_at<eq>(document_fields3 <true>)<line_sep>_compare_queries(query2 query3 "_start_at")<line_sep># Make sure it overrides. query4=query3.order_by("bye")<line_sep>values5={"hi":"zap" "bye":88}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>document_fields5=_make_snapshot(docref values5)<line_sep>query5=query4.start_at(document_fields5)<assert_stmt>query5<is><not>query4<assert_stmt>isinstance(query5 BaseQuery)<assert_stmt>query5._start_at<eq>(document_fields5 <true>)<line_sep>_compare_queries(query4 query5 "_start_at")<block_end><def_stmt>test_basequery_start_after <block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<line_sep>collection=_make_collection("here")<line_sep>query1=_make_base_query_all_fields(parent=collection skip_fields=("orders" ))<line_sep>query2=query1.order_by("down")<line_sep>document_fields3={"down":99.75}<line_sep>query3=query2.start_after(document_fields3)<assert_stmt>query3<is><not>query2<assert_stmt>isinstance(query3 BaseQuery)<assert_stmt>query3._start_at<eq>(document_fields3 <false>)<line_sep>_compare_queries(query2 query3 "_start_at")<line_sep># Make sure it overrides. query4=query3.order_by("out")<line_sep>values5={"down":100.25 "out":b"\x00\x01"}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>document_fields5=_make_snapshot(docref values5)<line_sep>query5=query4.start_after(document_fields5)<assert_stmt>query5<is><not>query4<assert_stmt>isinstance(query5 BaseQuery)<assert_stmt>query5._start_at<eq>(document_fields5 <false>)<line_sep>_compare_queries(query4 query5 "_start_at")<block_end><def_stmt>test_basequery_end_before <block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<line_sep>collection=_make_collection("here")<line_sep>query1=_make_base_query_all_fields(parent=collection skip_fields=("orders" ))<line_sep>query2=query1.order_by("down")<line_sep>document_fields3={"down":99.75}<line_sep>query3=query2.end_before(document_fields3)<assert_stmt>query3<is><not>query2<assert_stmt>isinstance(query3 BaseQuery)<assert_stmt>query3._end_at<eq>(document_fields3 <true>)<line_sep>_compare_queries(query2 query3 "_end_at")<line_sep># Make sure it overrides. query4=query3.order_by("out")<line_sep>values5={"down":100.25 "out":b"\x00\x01"}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>document_fields5=_make_snapshot(docref values5)<line_sep>query5=query4.end_before(document_fields5)<assert_stmt>query5<is><not>query4<assert_stmt>isinstance(query5 BaseQuery)<assert_stmt>query5._end_at<eq>(document_fields5 <true>)<line_sep>_compare_queries(query4 query5 "_end_at")<line_sep>_compare_queries(query4 query5 "_end_at")<block_end><def_stmt>test_basequery_end_at <block_start><import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<line_sep>collection=_make_collection("here")<line_sep>query1=_make_base_query_all_fields(parent=collection skip_fields=("orders" ))<line_sep>query2=query1.order_by("hi")<line_sep>document_fields3={"hi":"mom"}<line_sep>query3=query2.end_at(document_fields3)<assert_stmt>query3<is><not>query2<assert_stmt>isinstance(query3 BaseQuery)<assert_stmt>query3._end_at<eq>(document_fields3 <false>)<line_sep>_compare_queries(query2 query3 "_end_at")<line_sep># Make sure it overrides. query4=query3.order_by("bye")<line_sep>values5={"hi":"zap" "bye":88}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>document_fields5=_make_snapshot(docref values5)<line_sep>query5=query4.end_at(document_fields5)<assert_stmt>query5<is><not>query4<assert_stmt>isinstance(query5 BaseQuery)<assert_stmt>query5._end_at<eq>(document_fields5 <false>)<line_sep>_compare_queries(query4 query5 "_end_at")<block_end><def_stmt>test_basequery__filters_pb_empty <block_start>query=_make_base_query(mock.sentinel.parent)<assert_stmt>len(query._field_filters)<eq>0<assert_stmt>query._filters_pb()<is><none><block_end><def_stmt>test_basequery__filters_pb_single <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.types document<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>query1=_make_base_query(mock.sentinel.parent)<line_sep>query2=query1.where("x.y" ">" 50.5)<line_sep>filter_pb=query2._filters_pb()<line_sep>expected_pb=query.StructuredQuery.Filter(field_filter=query.StructuredQuery.FieldFilter(field=query.StructuredQuery.FieldReference(field_path="x.y") op=StructuredQuery.FieldFilter.Operator.GREATER_THAN value=document.Value(double_value=50.5) ))<assert_stmt>filter_pb<eq>expected_pb<block_end><def_stmt>test_basequery__filters_pb_multi <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.types document<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>query1=_make_base_query(mock.sentinel.parent)<line_sep>query2=query1.where("x.y" ">" 50.5)<line_sep>query3=query2.where("ABC" "==" 123)<line_sep>filter_pb=query3._filters_pb()<line_sep>op_class=StructuredQuery.FieldFilter.Operator<line_sep>expected_pb=query.StructuredQuery.Filter(composite_filter=query.StructuredQuery.CompositeFilter(op=StructuredQuery.CompositeFilter.Operator.AND filters=[query.StructuredQuery.Filter(field_filter=query.StructuredQuery.FieldFilter(field=query.StructuredQuery.FieldReference(field_path="x.y") op=op_class.GREATER_THAN value=document.Value(double_value=50.5) )) query.StructuredQuery.Filter(field_filter=query.StructuredQuery.FieldFilter(field=query.StructuredQuery.FieldReference(field_path="ABC") op=op_class.EQUAL value=document.Value(integer_value=123) )) ] ))<assert_stmt>filter_pb<eq>expected_pb<block_end><def_stmt>test_basequery__normalize_projection_none <block_start>query=_make_base_query(mock.sentinel.parent)<assert_stmt>query._normalize_projection(<none>)<is><none><block_end><def_stmt>test_basequery__normalize_projection_empty <block_start>projection=_make_projection_for_select([])<line_sep>query=_make_base_query(mock.sentinel.parent)<line_sep>normalized=query._normalize_projection(projection)<line_sep>field_paths=[field_ref.field_path<for>field_ref normalized.fields]<assert_stmt>field_paths<eq>["__name__"]<block_end><def_stmt>test_basequery__normalize_projection_non_empty <block_start>projection=_make_projection_for_select(["a" "b"])<line_sep>query=_make_base_query(mock.sentinel.parent)<assert_stmt>query._normalize_projection(projection)<is>projection<block_end><def_stmt>test_basequery__normalize_orders_wo_orders_wo_cursors <block_start>query=_make_base_query(mock.sentinel.parent)<line_sep>expected=[]<assert_stmt>query._normalize_orders()<eq>expected<block_end><def_stmt>test_basequery__normalize_orders_w_orders_wo_cursors <block_start>query=_make_base_query(mock.sentinel.parent).order_by("a")<line_sep>expected=[query._make_order("a" "ASCENDING")]<assert_stmt>query._normalize_orders()<eq>expected<block_end><def_stmt>test_basequery__normalize_orders_wo_orders_w_snapshot_cursor <block_start>values={"a":7 "b":"foo"}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>snapshot=_make_snapshot(docref values)<line_sep>collection=_make_collection("here")<line_sep>query=_make_base_query(collection).start_at(snapshot)<line_sep>expected=[query._make_order("__name__" "ASCENDING")]<assert_stmt>query._normalize_orders()<eq>expected<block_end><def_stmt>test_basequery__normalize_orders_w_name_orders_w_snapshot_cursor <block_start>values={"a":7 "b":"foo"}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>snapshot=_make_snapshot(docref values)<line_sep>collection=_make_collection("here")<line_sep>query=(_make_base_query(collection).order_by("__name__" "DESCENDING").start_at(snapshot))<line_sep>expected=[query._make_order("__name__" "DESCENDING")]<assert_stmt>query._normalize_orders()<eq>expected<block_end><def_stmt>test_basequery__normalize_orders_wo_orders_w_snapshot_cursor_w_neq_exists <block_start>values={"a":7 "b":"foo"}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>snapshot=_make_snapshot(docref values)<line_sep>collection=_make_collection("here")<line_sep>query=(_make_base_query(collection).where("c" "<=" 20).order_by("c" "DESCENDING").start_at(snapshot))<line_sep>expected=[query._make_order("c" "DESCENDING") query._make_order("__name__" "DESCENDING") ]<assert_stmt>query._normalize_orders()<eq>expected<block_end><def_stmt>test_basequery__normalize_orders_wo_orders_w_snapshot_cursor_w_neq_where <block_start>values={"a":7 "b":"foo"}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>snapshot=_make_snapshot(docref values)<line_sep>collection=_make_collection("here")<line_sep>query=_make_base_query(collection).where("c" "<=" 20).end_at(snapshot)<line_sep>expected=[query._make_order("c" "ASCENDING") query._make_order("__name__" "ASCENDING") ]<assert_stmt>query._normalize_orders()<eq>expected<block_end><def_stmt>test_basequery__normalize_orders_wo_orders_w_snapshot_cursor_w_isnull_where <block_start>values={"a":7 "b":"foo"}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>snapshot=_make_snapshot(docref values)<line_sep>collection=_make_collection("here")<line_sep>query=_make_base_query(collection).where("c" "==" <none>).end_at(snapshot)<line_sep>expected=[query._make_order("__name__" "ASCENDING") ]<assert_stmt>query._normalize_orders()<eq>expected<block_end><def_stmt>test_basequery__normalize_orders_w_name_orders_w_none_cursor <block_start>collection=_make_collection("here")<line_sep>query=(_make_base_query(collection).order_by("__name__" "DESCENDING").start_at(<none>))<line_sep>expected=[query._make_order("__name__" "DESCENDING")]<assert_stmt>query._normalize_orders()<eq>expected<block_end><def_stmt>test_basequery__normalize_cursor_none <block_start>query=_make_base_query(mock.sentinel.parent)<assert_stmt>query._normalize_cursor(<none> query._orders)<is><none><block_end><def_stmt>test_basequery__normalize_cursor_no_order <block_start>cursor=([1] <true>)<line_sep>query=_make_base_query(mock.sentinel.parent)<with_stmt>pytest.raises(ValueError)<block_start>query._normalize_cursor(cursor query._orders)<block_end><block_end><def_stmt>test_basequery__normalize_cursor_as_list_mismatched_order <block_start>cursor=([1 2] <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("b" "ASCENDING")<with_stmt>pytest.raises(ValueError)<block_start>query._normalize_cursor(cursor query._orders)<block_end><block_end><def_stmt>test_basequery__normalize_cursor_as_dict_mismatched_order <block_start>cursor=({"a":1} <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("b" "ASCENDING")<with_stmt>pytest.raises(ValueError)<block_start>query._normalize_cursor(cursor query._orders)<block_end><block_end><def_stmt>test_basequery__normalize_cursor_as_dict_extra_orders_ok <block_start>cursor=({"name":"Springfield"} <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("name").order_by("state")<line_sep>normalized=query._normalize_cursor(cursor query._orders)<assert_stmt>normalized<eq>(["Springfield"] <true>)<block_end><def_stmt>test_basequery__normalize_cursor_extra_orders_ok <block_start>cursor=(["Springfield"] <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("name").order_by("state")<line_sep>query._normalize_cursor(cursor query._orders)<block_end><def_stmt>test_basequery__normalize_cursor_w_delete <block_start><import_from_stmt>google.cloud.firestore_v1 DELETE_FIELD<line_sep>cursor=([DELETE_FIELD] <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("b" "ASCENDING")<with_stmt>pytest.raises(ValueError)<block_start>query._normalize_cursor(cursor query._orders)<block_end><block_end><def_stmt>test_basequery__normalize_cursor_w_server_timestamp <block_start><import_from_stmt>google.cloud.firestore_v1 SERVER_TIMESTAMP<line_sep>cursor=([SERVER_TIMESTAMP] <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("b" "ASCENDING")<with_stmt>pytest.raises(ValueError)<block_start>query._normalize_cursor(cursor query._orders)<block_end><block_end><def_stmt>test_basequery__normalize_cursor_w_array_remove <block_start><import_from_stmt>google.cloud.firestore_v1 ArrayRemove<line_sep>cursor=([ArrayRemove([1 3 5])] <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("b" "ASCENDING")<with_stmt>pytest.raises(ValueError)<block_start>query._normalize_cursor(cursor query._orders)<block_end><block_end><def_stmt>test_basequery__normalize_cursor_w_array_union <block_start><import_from_stmt>google.cloud.firestore_v1 ArrayUnion<line_sep>cursor=([ArrayUnion([2 4 8])] <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("b" "ASCENDING")<with_stmt>pytest.raises(ValueError)<block_start>query._normalize_cursor(cursor query._orders)<block_end><block_end><def_stmt>test_basequery__normalize_cursor_as_list_hit <block_start>cursor=([1] <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("b" "ASCENDING")<assert_stmt>query._normalize_cursor(cursor query._orders)<eq>([1] <true>)<block_end><def_stmt>test_basequery__normalize_cursor_as_dict_hit <block_start>cursor=({"b":1} <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("b" "ASCENDING")<assert_stmt>query._normalize_cursor(cursor query._orders)<eq>([1] <true>)<block_end><def_stmt>test_basequery__normalize_cursor_as_dict_with_dot_key_hit <block_start>cursor=({"b.a":1} <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("b.a" "ASCENDING")<assert_stmt>query._normalize_cursor(cursor query._orders)<eq>([1] <true>)<block_end><def_stmt>test_basequery__normalize_cursor_as_dict_with_inner_data_hit <block_start>cursor=({"b":{"a":1}} <true>)<line_sep>query=_make_base_query(mock.sentinel.parent).order_by("b.a" "ASCENDING")<assert_stmt>query._normalize_cursor(cursor query._orders)<eq>([1] <true>)<block_end><def_stmt>test_basequery__normalize_cursor_as_snapshot_hit <block_start>values={"b":1}<line_sep>docref=_make_docref("here" "doc_id")<line_sep>snapshot=_make_snapshot(docref values)<line_sep>cursor=(snapshot <true>)<line_sep>collection=_make_collection("here")<line_sep>query=_make_base_query(collection).order_by("b" "ASCENDING")<assert_stmt>query._normalize_cursor(cursor query._orders)<eq>([1] <true>)<block_end><def_stmt>test_basequery__normalize_cursor_w___name___w_reference <block_start>db_string="projects/my-project/database/(default)"<line_sep>client=mock.Mock(spec=["_database_string"])<line_sep>client._database_string=db_string<line_sep>parent=mock.Mock(spec=["_path" "_client"])<line_sep>parent._client=client<line_sep>parent._path=["C"]<line_sep>query=_make_base_query(parent).order_by("__name__" "ASCENDING")<line_sep>docref=_make_docref("here" "doc_id")<line_sep>values={"a":7}<line_sep>snapshot=_make_snapshot(docref values)<line_sep>expected=docref<line_sep>cursor=(snapshot <true>)<assert_stmt>query._normalize_cursor(cursor query._orders)<eq>([expected] <true>)<block_end><def_stmt>test_basequery__normalize_cursor_w___name___wo_slash <block_start>db_string="projects/my-project/database/(default)"<line_sep>client=mock.Mock(spec=["_database_string"])<line_sep>client._database_string=db_string<line_sep>parent=mock.Mock(spec=["_path" "_client" "document"])<line_sep>parent._client=client<line_sep>parent._path=["C"]<line_sep>document=parent.document.return_value=mock.Mock(spec=[])<line_sep>query=_make_base_query(parent).order_by("__name__" "ASCENDING")<line_sep>cursor=(["b"] <true>)<line_sep>expected=document<assert_stmt>query._normalize_cursor(cursor query._orders)<eq>([expected] <true>)<line_sep>parent.document.assert_called_once_with("b")<block_end><def_stmt>test_basequery__to_protobuf_all_fields <block_start><import_from_stmt>google.protobuf wrappers_pb2<import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.types document<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>parent=mock.Mock(id="cat" spec=["id"])<line_sep>query1=_make_base_query(parent)<line_sep>query2=query1.select(["X" "Y" "Z"])<line_sep>query3=query2.where("Y" ">" 2.5)<line_sep>query4=query3.order_by("X")<line_sep>query5=query4.limit(17)<line_sep>query6=query5.offset(3)<line_sep>query7=query6.start_at({"X":10})<line_sep>query8=query7.end_at({"X":25})<line_sep>structured_query_pb=query8._to_protobuf()<line_sep>query_kwargs={"from_":[query.StructuredQuery.CollectionSelector(collection_id=parent.id)] "select":query.StructuredQuery.Projection(fields=[query.StructuredQuery.FieldReference(field_path=field_path)<for>field_path ["X" "Y" "Z"]]) "where":query.StructuredQuery.Filter(field_filter=query.StructuredQuery.FieldFilter(field=query.StructuredQuery.FieldReference(field_path="Y") op=StructuredQuery.FieldFilter.Operator.GREATER_THAN value=document.Value(double_value=2.5) )) "order_by":[_make_order_pb("X" StructuredQuery.Direction.ASCENDING)] "start_at":query.Cursor(values=[document.Value(integer_value=10)] before=<true>) "end_at":query.Cursor(values=[document.Value(integer_value=25)]) "offset":3 "limit":wrappers_pb2.Int32Value(value=17) }<line_sep>expected_pb=query.StructuredQuery(**query_kwargs)<assert_stmt>structured_query_pb<eq>expected_pb<block_end><def_stmt>test_basequery__to_protobuf_select_only <block_start><import_from_stmt>google.cloud.firestore_v1.types query<line_sep>parent=mock.Mock(id="cat" spec=["id"])<line_sep>query1=_make_base_query(parent)<line_sep>field_paths=["a.b" "a.c" "d"]<line_sep>query2=query1.select(field_paths)<line_sep>structured_query_pb=query2._to_protobuf()<line_sep>query_kwargs={"from_":[query.StructuredQuery.CollectionSelector(collection_id=parent.id)] "select":query.StructuredQuery.Projection(fields=[query.StructuredQuery.FieldReference(field_path=field_path)<for>field_path field_paths]) }<line_sep>expected_pb=query.StructuredQuery(**query_kwargs)<assert_stmt>structured_query_pb<eq>expected_pb<block_end><def_stmt>test_basequery__to_protobuf_where_only <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.types document<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>parent=mock.Mock(id="dog" spec=["id"])<line_sep>query1=_make_base_query(parent)<line_sep>query2=query1.where("a" "==" u"b")<line_sep>structured_query_pb=query2._to_protobuf()<line_sep>query_kwargs={"from_":[query.StructuredQuery.CollectionSelector(collection_id=parent.id)] "where":query.StructuredQuery.Filter(field_filter=query.StructuredQuery.FieldFilter(field=query.StructuredQuery.FieldReference(field_path="a") op=StructuredQuery.FieldFilter.Operator.EQUAL value=document.Value(string_value=u"b") )) }<line_sep>expected_pb=query.StructuredQuery(**query_kwargs)<assert_stmt>structured_query_pb<eq>expected_pb<block_end><def_stmt>test_basequery__to_protobuf_order_by_only <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>parent=mock.Mock(id="fish" spec=["id"])<line_sep>query1=_make_base_query(parent)<line_sep>query2=query1.order_by("abc")<line_sep>structured_query_pb=query2._to_protobuf()<line_sep>query_kwargs={"from_":[query.StructuredQuery.CollectionSelector(collection_id=parent.id)] "order_by":[_make_order_pb("abc" StructuredQuery.Direction.ASCENDING)] }<line_sep>expected_pb=query.StructuredQuery(**query_kwargs)<assert_stmt>structured_query_pb<eq>expected_pb<block_end><def_stmt>test_basequery__to_protobuf_start_at_only # NOTE: "only" is wrong since we must have ``order_by`` as well. <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.types document<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>parent=mock.Mock(id="phish" spec=["id"])<line_sep>query_inst=(_make_base_query(parent).order_by("X.Y").start_after({"X":{"Y":u"Z"}}))<line_sep>structured_query_pb=query_inst._to_protobuf()<line_sep>query_kwargs={"from_":[StructuredQuery.CollectionSelector(collection_id=parent.id)] "order_by":[_make_order_pb("X.Y" StructuredQuery.Direction.ASCENDING)] "start_at":query.Cursor(values=[document.Value(string_value=u"Z")]) }<line_sep>expected_pb=StructuredQuery(**query_kwargs)<assert_stmt>structured_query_pb<eq>expected_pb<block_end><def_stmt>test_basequery__to_protobuf_end_at_only # NOTE: "only" is wrong since we must have ``order_by`` as well. <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.types document<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>parent=mock.Mock(id="ghoti" spec=["id"])<line_sep>query_inst=_make_base_query(parent).order_by("a").end_at({"a":88})<line_sep>structured_query_pb=query_inst._to_protobuf()<line_sep>query_kwargs={"from_":[query.StructuredQuery.CollectionSelector(collection_id=parent.id)] "order_by":[_make_order_pb("a" StructuredQuery.Direction.ASCENDING)] "end_at":query.Cursor(values=[document.Value(integer_value=88)]) }<line_sep>expected_pb=query.StructuredQuery(**query_kwargs)<assert_stmt>structured_query_pb<eq>expected_pb<block_end><def_stmt>test_basequery__to_protobuf_offset_only <block_start><import_from_stmt>google.cloud.firestore_v1.types query<line_sep>parent=mock.Mock(id="cartt" spec=["id"])<line_sep>query1=_make_base_query(parent)<line_sep>offset=14<line_sep>query2=query1.offset(offset)<line_sep>structured_query_pb=query2._to_protobuf()<line_sep>query_kwargs={"from_":[query.StructuredQuery.CollectionSelector(collection_id=parent.id)] "offset":offset }<line_sep>expected_pb=query.StructuredQuery(**query_kwargs)<assert_stmt>structured_query_pb<eq>expected_pb<block_end><def_stmt>test_basequery__to_protobuf_limit_only <block_start><import_from_stmt>google.protobuf wrappers_pb2<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>parent=mock.Mock(id="donut" spec=["id"])<line_sep>query1=_make_base_query(parent)<line_sep>limit=31<line_sep>query2=query1.limit(limit)<line_sep>structured_query_pb=query2._to_protobuf()<line_sep>query_kwargs={"from_":[query.StructuredQuery.CollectionSelector(collection_id=parent.id)] "limit":wrappers_pb2.Int32Value(value=limit) }<line_sep>expected_pb=query.StructuredQuery(**query_kwargs)<assert_stmt>structured_query_pb<eq>expected_pb<block_end><def_stmt>test_basequery_comparator_no_ordering <block_start>query=_make_base_query(mock.sentinel.parent)<line_sep>query._orders=[]<line_sep>doc1=mock.Mock()<line_sep>doc1.reference._path=("col" "adocument1")<line_sep>doc2=mock.Mock()<line_sep>doc2.reference._path=("col" "adocument2")<line_sep>sort=query._comparator(doc1 doc2)<assert_stmt>sort<eq>-1<block_end><def_stmt>test_basequery_comparator_no_ordering_same_id <block_start>query=_make_base_query(mock.sentinel.parent)<line_sep>query._orders=[]<line_sep>doc1=mock.Mock()<line_sep>doc1.reference._path=("col" "adocument1")<line_sep>doc2=mock.Mock()<line_sep>doc2.reference._path=("col" "adocument1")<line_sep>sort=query._comparator(doc1 doc2)<assert_stmt>sort<eq>0<block_end><def_stmt>test_basequery_comparator_ordering <block_start>query=_make_base_query(mock.sentinel.parent)<line_sep>orderByMock=mock.Mock()<line_sep>orderByMock.field.field_path="last"<line_sep>orderByMock.direction=1# ascending query._orders=[orderByMock]<line_sep>doc1=mock.Mock()<line_sep>doc1.reference._path=("col" "adocument1")<line_sep>doc1._data={"first":{"stringValue":"Ada"} "last":{"stringValue":"secondlovelace"} }<line_sep>doc2=mock.Mock()<line_sep>doc2.reference._path=("col" "adocument2")<line_sep>doc2._data={"first":{"stringValue":"Ada"} "last":{"stringValue":"lovelace"} }<line_sep>sort=query._comparator(doc1 doc2)<assert_stmt>sort<eq>1<block_end><def_stmt>test_basequery_comparator_ordering_descending <block_start>query=_make_base_query(mock.sentinel.parent)<line_sep>orderByMock=mock.Mock()<line_sep>orderByMock.field.field_path="last"<line_sep>orderByMock.direction=-1# descending query._orders=[orderByMock]<line_sep>doc1=mock.Mock()<line_sep>doc1.reference._path=("col" "adocument1")<line_sep>doc1._data={"first":{"stringValue":"Ada"} "last":{"stringValue":"secondlovelace"} }<line_sep>doc2=mock.Mock()<line_sep>doc2.reference._path=("col" "adocument2")<line_sep>doc2._data={"first":{"stringValue":"Ada"} "last":{"stringValue":"lovelace"} }<line_sep>sort=query._comparator(doc1 doc2)<assert_stmt>sort<eq>-1<block_end><def_stmt>test_basequery_comparator_missing_order_by_field_in_data_raises <block_start>query=_make_base_query(mock.sentinel.parent)<line_sep>orderByMock=mock.Mock()<line_sep>orderByMock.field.field_path="last"<line_sep>orderByMock.direction=1# ascending query._orders=[orderByMock]<line_sep>doc1=mock.Mock()<line_sep>doc1.reference._path=("col" "adocument1")<line_sep>doc1._data={}<line_sep>doc2=mock.Mock()<line_sep>doc2.reference._path=("col" "adocument2")<line_sep>doc2._data={"first":{"stringValue":"Ada"} "last":{"stringValue":"lovelace"} }<with_stmt>pytest.raises(ValueError)<as>exc_info<block_start>query._comparator(doc1 doc2)<block_end>(message )=exc_info.value.args<assert_stmt>message.startswith("Can only compare fields ")<block_end><def_stmt>test_basequery_recursive_multiple <block_start><import_from_stmt>google.cloud.firestore_v1.collection CollectionReference<import_from_stmt>google.cloud.firestore_v1.base_query BaseQuery<class_stmt>DerivedQuery(BaseQuery)<block_start>@staticmethod<def_stmt>_get_collection_reference_class <block_start><return>CollectionReference<block_end><block_end>query=DerivedQuery(_make_client().collection("asdf"))<assert_stmt>isinstance(query.recursive().recursive() DerivedQuery)<block_end><def_stmt>_get_op_class <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<line_sep><return>StructuredQuery.FieldFilter.Operator<block_end><def_stmt>test__enum_from_op_string_lt <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<line_sep>op_class=_get_op_class()<assert_stmt>_enum_from_op_string("<")<eq>op_class.LESS_THAN<block_end><def_stmt>test__enum_from_op_string_le <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<line_sep>op_class=_get_op_class()<assert_stmt>_enum_from_op_string("<=")<eq>op_class.LESS_THAN_OR_EQUAL<block_end><def_stmt>test__enum_from_op_string_eq <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<line_sep>op_class=_get_op_class()<assert_stmt>_enum_from_op_string("==")<eq>op_class.EQUAL<block_end><def_stmt>test__enum_from_op_string_ge <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<line_sep>op_class=_get_op_class()<assert_stmt>_enum_from_op_string(">=")<eq>op_class.GREATER_THAN_OR_EQUAL<block_end><def_stmt>test__enum_from_op_string_gt <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<line_sep>op_class=_get_op_class()<assert_stmt>_enum_from_op_string(">")<eq>op_class.GREATER_THAN<block_end><def_stmt>test__enum_from_op_string_array_contains <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<line_sep>op_class=_get_op_class()<assert_stmt>_enum_from_op_string("array_contains")<eq>op_class.ARRAY_CONTAINS<block_end><def_stmt>test__enum_from_op_string_in <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<line_sep>op_class=_get_op_class()<assert_stmt>_enum_from_op_string("in")<eq>op_class.IN<block_end><def_stmt>test__enum_from_op_string_array_contains_any <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<line_sep>op_class=_get_op_class()<assert_stmt>_enum_from_op_string("array_contains_any")<eq>op_class.ARRAY_CONTAINS_ANY<block_end><def_stmt>test__enum_from_op_string_not_in <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<line_sep>op_class=_get_op_class()<assert_stmt>_enum_from_op_string("not-in")<eq>op_class.NOT_IN<block_end><def_stmt>test__enum_from_op_string_not_eq <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<line_sep>op_class=_get_op_class()<assert_stmt>_enum_from_op_string("!=")<eq>op_class.NOT_EQUAL<block_end><def_stmt>test__enum_from_op_string_invalid <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_op_string<with_stmt>pytest.raises(ValueError)<block_start>_enum_from_op_string("?")<block_end><block_end><def_stmt>test__isnan_valid <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _isnan<assert_stmt>_isnan(float("nan"))<block_end><def_stmt>test__isnan_invalid <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _isnan<assert_stmt><not>_isnan(51.5)<assert_stmt><not>_isnan(<none>)<assert_stmt><not>_isnan("str")<assert_stmt><not>_isnan(int)<assert_stmt><not>_isnan(1.0+1.0j)<block_end><def_stmt>test__enum_from_direction_success <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_direction<import_from_stmt>google.cloud.firestore_v1.query Query<line_sep>dir_class=StructuredQuery.Direction<assert_stmt>_enum_from_direction(Query.ASCENDING)<eq>dir_class.ASCENDING<assert_stmt>_enum_from_direction(Query.DESCENDING)<eq>dir_class.DESCENDING<line_sep># Ints pass through <assert_stmt>_enum_from_direction(dir_class.ASCENDING)<eq>dir_class.ASCENDING<assert_stmt>_enum_from_direction(dir_class.DESCENDING)<eq>dir_class.DESCENDING<block_end><def_stmt>test__enum_from_direction_failure <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _enum_from_direction<with_stmt>pytest.raises(ValueError)<block_start>_enum_from_direction("neither-ASCENDING-nor-DESCENDING")<block_end><block_end><def_stmt>test__filter_pb_unary <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.base_query _filter_pb<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>unary_pb=query.StructuredQuery.UnaryFilter(field=query.StructuredQuery.FieldReference(field_path="a.b.c") op=StructuredQuery.UnaryFilter.Operator.IS_NULL )<line_sep>filter_pb=_filter_pb(unary_pb)<line_sep>expected_pb=query.StructuredQuery.Filter(unary_filter=unary_pb)<assert_stmt>filter_pb<eq>expected_pb<block_end><def_stmt>test__filter_pb_field <block_start><import_from_stmt>google.cloud.firestore_v1.types StructuredQuery<import_from_stmt>google.cloud.firestore_v1.types document<import_from_stmt>google.cloud.firestore_v1.types query<import_from_stmt>google.cloud.firestore_v1.base_query _filter_pb<line_sep>field_filter_pb=query.StructuredQuery.FieldFilter(field=query.StructuredQuery.FieldReference(field_path="XYZ") op=StructuredQuery.FieldFilter.Operator.GREATER_THAN value=document.Value(double_value=90.75) )<line_sep>filter_pb=_filter_pb(field_filter_pb)<line_sep>expected_pb=query.StructuredQuery.Filter(field_filter=field_filter_pb)<assert_stmt>filter_pb<eq>expected_pb<block_end><def_stmt>test__filter_pb_bad_type <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _filter_pb<with_stmt>pytest.raises(ValueError)<block_start>_filter_pb(<none>)<block_end><block_end><def_stmt>test__cursor_pb_no_pair <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _cursor_pb<assert_stmt>_cursor_pb(<none>)<is><none><block_end><def_stmt>test__cursor_pb_success <block_start><import_from_stmt>google.cloud.firestore_v1.types query<import_from_stmt>google.cloud.firestore_v1 _helpers<import_from_stmt>google.cloud.firestore_v1.base_query _cursor_pb<line_sep>data=[1.5 10 <true>]<line_sep>cursor_pair=data <true><line_sep>cursor_pb=_cursor_pb(cursor_pair)<line_sep>expected_pb=query.Cursor(values=[_helpers.encode_value(value)<for>value data] before=<true>)<assert_stmt>cursor_pb<eq>expected_pb<block_end><def_stmt>test__query_response_to_snapshot_empty <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _query_response_to_snapshot<line_sep>response_pb=_make_query_response()<line_sep>snapshot=_query_response_to_snapshot(response_pb <none> <none>)<assert_stmt>snapshot<is><none><block_end><def_stmt>test__query_response_to_snapshot_after_offset <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _query_response_to_snapshot<line_sep>skipped_results=410<line_sep>response_pb=_make_query_response(skipped_results=skipped_results)<line_sep>snapshot=_query_response_to_snapshot(response_pb <none> <none>)<assert_stmt>snapshot<is><none><block_end><def_stmt>test__query_response_to_snapshot_response <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _query_response_to_snapshot<import_from_stmt>google.cloud.firestore_v1.document DocumentSnapshot<line_sep>client=_make_client()<line_sep>collection=client.collection("a" "b" "c")<line_sep>_,expected_prefix=collection._parent_info()<line_sep># Create name for the protobuf. doc_id="gigantic"<line_sep>name="{}/{}".format(expected_prefix doc_id)<line_sep>data={"a":901 "b":<true>}<line_sep>response_pb=_make_query_response(name=name data=data)<line_sep>snapshot=_query_response_to_snapshot(response_pb collection expected_prefix)<assert_stmt>isinstance(snapshot DocumentSnapshot)<line_sep>expected_path=collection._path+(doc_id )<assert_stmt>snapshot.reference._path<eq>expected_path<assert_stmt>snapshot.to_dict()<eq>data<assert_stmt>snapshot.exists<assert_stmt>snapshot.read_time<eq>response_pb.read_time<assert_stmt>snapshot.create_time<eq>response_pb.document.create_time<assert_stmt>snapshot.update_time<eq>response_pb.document.update_time<block_end><def_stmt>test__collection_group_query_response_to_snapshot_empty <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _collection_group_query_response_to_snapshot <line_sep>response_pb=_make_query_response()<line_sep>snapshot=_collection_group_query_response_to_snapshot(response_pb <none>)<assert_stmt>snapshot<is><none><block_end><def_stmt>test__collection_group_query_response_to_snapshot_after_offset <block_start><import_from_stmt>google.cloud.firestore_v1.base_query _collection_group_query_response_to_snapshot <line_sep>skipped_results=410<line_sep>response_pb=_make_query_response(skipped_results=skipped_results)<line_sep>snapshot=_collection_group_query_response_to_snapshot(response_pb <none>)<assert_stmt>snapshot<is><none><block_end><def_stmt>test__collection_group_query_response_to_snapshot_response <block_start><import_from_stmt>google.cloud.firestore_v1.document DocumentSnapshot<import_from_stmt>google.cloud.firestore_v1.base_query _collection_group_query_response_to_snapshot <line_sep>client=_make_client()<line_sep>collection=client.collection("a" "b" "c")<line_sep>other_collection=client.collection("a" "b" "d")<line_sep>to_match=other_collection.document("gigantic")<line_sep>data={"a":901 "b":<true>}<line_sep>response_pb=_make_query_response(name=to_match._document_path data=data)<line_sep>snapshot=_collection_group_query_response_to_snapshot(response_pb collection)<assert_stmt>isinstance(snapshot DocumentSnapshot)<assert_stmt>snapshot.reference._document_path<eq>to_match._document_path<assert_stmt>snapshot.to_dict()<eq>data<assert_stmt>snapshot.exists<assert_stmt>snapshot.read_time<eq>response_pb._pb.read_time<assert_stmt>snapshot.create_time<eq>response_pb._pb.document.create_time<assert_stmt>snapshot.update_time<eq>response_pb._pb.document.update_time<block_end><def_stmt>_make_credentials <block_start><import_stmt>google.auth.credentials<line_sep><return>mock.Mock(spec=google.auth.credentials.Credentials)<block_end><def_stmt>_make_client project="project-project"<block_start><import_from_stmt>google.cloud.firestore_v1.client Client<line_sep>credentials=_make_credentials()<line_sep><return>Client(project=project credentials=credentials)<block_end><def_stmt>_make_order_pb field_path direction<block_start><import_from_stmt>google.cloud.firestore_v1.types query<line_sep><return>query.StructuredQuery.Order(field=query.StructuredQuery.FieldReference(field_path=field_path) direction=direction )<block_end><def_stmt>_make_query_response **kwargs# kwargs supported are ``skipped_results``, ``name`` and ``data`` <block_start><import_from_stmt>google.cloud.firestore_v1.types document<import_from_stmt>google.cloud.firestore_v1.types firestore<import_from_stmt>google.cloud._helpers _datetime_to_pb_timestamp<import_from_stmt>google.cloud.firestore_v1 _helpers<line_sep>now=datetime.datetime.utcnow()<line_sep>read_time=_datetime_to_pb_timestamp(now)<line_sep>kwargs["read_time"]=read_time<line_sep>name=kwargs.pop("name" <none>)<line_sep>data=kwargs.pop("data" <none>)<if_stmt>name<is><not><none><and>data<is><not><none><block_start>document_pb=document.Document(name=name fields=_helpers.encode_dict(data))<line_sep>delta=datetime.timedelta(seconds=100)<line_sep>update_time=_datetime_to_pb_timestamp(now-delta)<line_sep>create_time=_datetime_to_pb_timestamp(now-2<times>delta)<line_sep>document_pb._pb.update_time.CopyFrom(update_time)<line_sep>document_pb._pb.create_time.CopyFrom(create_time)<line_sep>kwargs["document"]=document_pb<block_end><return>firestore.RunQueryResponse(**kwargs)<block_end><def_stmt>_make_cursor_pb pair<block_start><import_from_stmt>google.cloud.firestore_v1 _helpers<import_from_stmt>google.cloud.firestore_v1.types query<line_sep>values,before=pair<line_sep>value_pbs=[_helpers.encode_value(value)<for>value values]<line_sep><return>query.Cursor(values=value_pbs before=before)<block_end><def_stmt>_make_query_partition *args **kwargs<block_start><import_from_stmt>google.cloud.firestore_v1.base_query QueryPartition<line_sep><return>QueryPartition(*args **kwargs)<block_end><def_stmt>test_constructor <block_start>partition=_make_query_partition(mock.sentinel.query "start" "end")<assert_stmt>partition._query<is>mock.sentinel.query<assert_stmt>partition.start_at<eq>"start"<assert_stmt>partition.end_at<eq>"end"<block_end><def_stmt>test_query_begin <block_start>partition=_make_query_partition(DummyQuery("PARENT") <none> "end")<line_sep>query=partition.query()<assert_stmt>query._parent<eq>"PARENT"<assert_stmt>query.all_descendants<eq>"YUP"<assert_stmt>query.orders<eq>"ORDER"<assert_stmt>query.start_at<is><none><assert_stmt>query.end_at<eq>(["end"] <true>)<block_end><def_stmt>test_query_middle <block_start>partition=_make_query_partition(DummyQuery("PARENT") "start" "end")<line_sep>query=partition.query()<assert_stmt>query._parent<eq>"PARENT"<assert_stmt>query.all_descendants<eq>"YUP"<assert_stmt>query.orders<eq>"ORDER"<assert_stmt>query.start_at<eq>(["start"] <true>)<assert_stmt>query.end_at<eq>(["end"] <true>)<block_end><def_stmt>test_query_end <block_start>partition=_make_query_partition(DummyQuery("PARENT") "start" <none>)<line_sep>query=partition.query()<assert_stmt>query._parent<eq>"PARENT"<assert_stmt>query.all_descendants<eq>"YUP"<assert_stmt>query.orders<eq>"ORDER"<assert_stmt>query.start_at<eq>(["start"] <true>)<assert_stmt>query.end_at<is><none><block_end><class_stmt>DummyQuery<block_start>_all_descendants="YUP"<line_sep>_PARTITION_QUERY_ORDER="ORDER"<def_stmt>__init__ self parent * all_descendants=<none> orders=<none> start_at=<none> end_at=<none><block_start>self._parent=parent<line_sep>self.all_descendants=all_descendants<line_sep>self.orders=orders<line_sep>self.start_at=start_at<line_sep>self.end_at=end_at<block_end><block_end><def_stmt>_make_projection_for_select field_paths<block_start><import_from_stmt>google.cloud.firestore_v1.types query<line_sep><return>query.StructuredQuery.Projection(fields=[query.StructuredQuery.FieldReference(field_path=field_path)<for>field_path field_paths])<block_end><def_stmt>_make_collection *path **kw<block_start><import_from_stmt>google.cloud.firestore_v1 collection<line_sep><return>collection.CollectionReference(*path **kw)<block_end><def_stmt>_make_docref *path **kw<block_start><import_from_stmt>google.cloud.firestore_v1 document<line_sep><return>document.DocumentReference(*path **kw)<block_end><def_stmt>_make_snapshot docref values<block_start><import_from_stmt>google.cloud.firestore_v1 document<line_sep><return>document.DocumentSnapshot(docref values <true> <none> <none> <none>)<block_end>
# SPDX-License-Identifier: Apache-2.0 """ Save pre-trained model. """<import_stmt>tensorflow<as>tf<import_stmt>numpy<as>np<line_sep># pylint: disable=redefined-outer-name,reimported,import-outside-toplevel <def_stmt>save_pretrained_model sess outputs feeds out_dir model_name="pretrained"<block_start>"""Save pretrained model and config"""<try_stmt><block_start><import_stmt>os<import_stmt>sys<import_stmt>tensorflow<as>tf<import_stmt>subprocess<line_sep>to_onnx_path="{}/to_onnx".format(out_dir)<if_stmt><not>os.path.isdir(to_onnx_path)<block_start>os.makedirs(to_onnx_path)<block_end>saved_model="{}/saved_model".format(to_onnx_path)<line_sep>inputs_path="{}/inputs.npy".format(to_onnx_path)<line_sep>pretrained_model_yaml_path="{}/pretrained.yaml".format(to_onnx_path)<line_sep>envars_path="{}/environment.txt".format(to_onnx_path)<line_sep>pip_requirement_path="{}/requirements.txt".format(to_onnx_path)<line_sep>print("===============Save Saved Model========================")<if_stmt>os.path.exists(saved_model)<block_start>print("{} already exists, SKIP".format(saved_model))<line_sep><return><block_end>print("Save tf version, python version and installed packages")<line_sep>tf_version=tf.__version__<line_sep>py_version=sys.version<line_sep>pip_packages=subprocess.check_output([sys.executable "-m" "pip" "freeze" "--all"])<line_sep>pip_packages=pip_packages.decode("UTF-8")<with_stmt>open(envars_path "w")<as>fp<block_start>fp.write(tf_version+os.linesep)<line_sep>fp.write(py_version)<block_end><with_stmt>open(pip_requirement_path "w")<as>fp<block_start>fp.write(pip_packages)<block_end>print("Save model for tf2onnx: {}".format(to_onnx_path))<line_sep># save inputs inputs={}<for_stmt>inp,value feeds.items()<block_start><if_stmt>isinstance(inp str)<block_start>inputs[inp]=value<block_end><else_stmt><block_start>inputs[inp.name]=value<block_end><block_end>np.save(inputs_path inputs)<line_sep>print("Saved inputs to {}".format(inputs_path))<line_sep># save graph and weights <import_from_stmt>tensorflow.saved_model simple_save<line_sep># pylint: disable=unnecessary-comprehension simple_save(sess saved_model {n:i<for>n,i zip(inputs.keys() feeds.keys())} {op.name:op<for>op outputs})<line_sep>print("Saved model to {}".format(saved_model))<line_sep># generate config pretrained_model_yaml=''' {}: model: ./saved_model model_type: saved_model input_get: get_ramp '''.format(model_name)<line_sep>pretrained_model_yaml<augadd>" inputs:\n"<for_stmt>inp,_ inputs.items()<block_start>pretrained_model_yaml<augadd>" \"{input}\": np.array(np.load(\"./inputs.npy\")[()][\"{input}\"])\n".format(input=inp)<block_end>outputs=[op.name<for>op outputs]<line_sep>pretrained_model_yaml<augadd>" outputs:\n"<for_stmt>out outputs<block_start>pretrained_model_yaml<augadd>" - {}\n".format(out)<block_end><with_stmt>open(pretrained_model_yaml_path "w")<as>f<block_start>f.write(pretrained_model_yaml)<block_end>print("Saved pretrained model yaml to {}".format(pretrained_model_yaml_path))<line_sep>print("=========================================================")<block_end><except_stmt>Exception<as>ex# pylint: disable=broad-except <block_start>print("Error: {}".format(ex))<block_end><block_end><def_stmt>test <block_start>"""Test sample."""<line_sep>x_val=np.random.rand(5 20).astype(np.float32)<line_sep>y_val=np.random.rand(20 10).astype(np.float32)<line_sep>x=tf.placeholder(tf.float32 x_val.shape name="x")<line_sep>y=tf.placeholder(tf.float32 y_val.shape name="y")<line_sep>z=tf.matmul(x y)<line_sep>w=tf.get_variable("weight" [5 10] dtype=tf.float32)<line_sep>init=tf.global_variables_initializer()<line_sep>outputs=[z+w]<line_sep>feeds={x:x_val y:y_val}<with_stmt>tf.Session()<as>sess<block_start>sess.run(init)<line_sep>sess.run(outputs feeds)<line_sep># NOTE: NOT override the saved model, so put below snippet after testing the BEST model. # if you perform testing several times. save_pretrained_model(sess outputs feeds "./tests" model_name="test")<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>test()<block_end>
<def_stmt>test_malware_have_actors attck_fixture<block_start>""" All MITRE Enterprise ATT&CK Malware should have Actors Args: attck_fixture ([type]): our default MITRE Enterprise ATT&CK JSON fixture """<for_stmt>malware attck_fixture.enterprise.malwares<block_start><if_stmt>malware.actors<block_start><assert_stmt>getattr(malware 'actors')<block_end><block_end><block_end><def_stmt>test_malware_have_techniques attck_fixture<block_start>""" All MITRE Enterprise ATT&CK Malware should havre techniques Args: attck_fixture ([type]): our default MITRE Enterprise ATT&CK JSON fixture """<for_stmt>malware attck_fixture.enterprise.malwares<block_start><if_stmt>malware.techniques<block_start><assert_stmt>getattr(malware 'techniques')<block_end><block_end><block_end>
# -*- coding: utf-8 -*- # adapted from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/transforms/augmentation.py <import_stmt>sys<import_stmt>inspect<import_stmt>random<import_stmt>numpy<as>np<import_stmt>pprint<import_from_stmt>abc ABCMeta abstractmethod<import_from_stmt>typing List Optional Tuple Union<import_from_stmt>PIL Image<import_from_stmt>.transform Transform TransformList BlendTransform CropTransform HFlipTransform NoOpTransform Transform VFlipTransform ExtentTransform ResizeTransform RotationTransform <line_sep>__all__=["Augmentation" "TransformGen" "apply_transform_gens" "AugInput" "StandardAugInput" "apply_augmentations" "RandomApply" "RandomBrightness" "RandomContrast" "RandomCrop" "RandomExtent" "RandomFlip" "RandomSaturation" "RandomLighting" "RandomRotation" "Resize" "ResizeShortestEdge" "RandomCropWithInstance" "PadAugmentation" ]<line_sep>""" Overview of the augmentation system: We have a design goal that aims at allowing: (1) Arbitrary structures of input data (e.g. list[list[boxes]], dict[str, boxes], multiple semantic segmentations for each image, etc) and arbitrary new data types (rotated boxes, 3D meshes, densepose, etc) (2) A list of augmentation to be applied sequentially `Augmentation` defines policies to create deterministic transforms from input data. An augmentation policy may need to access arbitrary input data, so it declares the input data needed, to be provided by users when calling its `get_transform` method. `Augmentation` is not able to apply transforms to data: data associated with one sample may be much more than what `Augmentation` gets. For example, most augmentation policies only need an image, but the actual input samples can be much more complicated. `AugInput` manages all inputs needed by `Augmentation` and implements the logic to apply a sequence of augmentation. It has to define how the inputs are transformed, because arguments needed by one `Augmentation` needs to be transformed to become arguments of the next `Augmentation` in the sequence. `AugInput` does not need to contain all input data, because most augmentation policies only need very few fields (e.g., most only need "image"). We provide `StandardAugInput` that only contains "images", "boxes", "sem_seg", that are enough to create transforms for most cases. In this way, users keep the responsibility to apply transforms to other potentially new data types and structures, e.g. keypoints, proposals boxes. To extend the system, one can do: 1. To add a new augmentation policy that only needs to use standard inputs ("image", "boxes", "sem_seg"), writing a subclass of `Augmentation` is sufficient. 2. To use new data types or custom data structures, `StandardAugInput` can still be used as long as the new data types or custom data structures are not needed by any augmentation policy. The new data types or data structures can be transformed using the transforms returned by `AugInput.apply_augmentations`. 3. To add new augmentation policies that need new data types or data structures, in addition to implementing new `Augmentation`, a new `AugInput` is needed as well. """<def_stmt>_check_img_dtype img<block_start><assert_stmt>isinstance(img np.ndarray) "[Augmentation] Needs an numpy array, but got a {}!".format(type(img))<assert_stmt><not>isinstance(img.dtype np.integer)<or>(img.dtype<eq>np.uint8) "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format(img.dtype)<assert_stmt>img.ndim<in>[2 3] img.ndim<block_end><class_stmt>Augmentation(metaclass=ABCMeta)<block_start>""" Augmentation defines policies/strategies to generate :class:`Transform` from data. It is often used for pre-processing of input data. A policy typically contains randomness, but it can also choose to deterministically generate a :class:`Transform`. A "policy" that generates a :class:`Transform` may, in the most general case, need arbitrary information from input data in order to determine what transforms to apply. Therefore, each :class:`Augmentation` instance defines the arguments needed by its :meth:`get_transform` method with the :attr:`input_args` attribute. When called with the positional arguments defined by the :attr:`input_args`, the :meth:`get_transform` method executes the policy. Examples: :: # if a policy needs to know both image and semantic segmentation assert aug.input_args == ("image", "sem_seg") tfm: Transform = aug.get_transform(image, sem_seg) new_image = tfm.apply_image(image) To implement a custom :class:`Augmentation`, define its :attr:`input_args` and implement :meth:`get_transform`. Note that :class:`Augmentation` defines the policies to create a :class:`Transform`, but not how to apply the actual transform to those data. """<line_sep>input_args:Tuple[str]=("image" )<line_sep>""" Attribute of class instances that defines the argument(s) needed by :meth:`get_transform`. Default to only "image", because most policies only require knowing the image in order to determine the transform. Users can freely define arbitrary new args and their types in custom :class:`Augmentation`. In detectron2 we use the following convention: * image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or floating point in range [0, 1] or [0, 255]. * boxes: (N,4) ndarray of float32. It represents the instance bounding boxes of N instances. Each is in XYXY format in unit of absolute coordinates. * sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel. We do not specify convention for other types and do not include builtin :class:`Augmentation` that uses other types in detectron2. """<def_stmt>_init self params=<none><block_start><if_stmt>params<block_start><for_stmt>k,v params.items()<block_start><if_stmt>k<ne>"self"<and><not>k.startswith("_")<block_start>setattr(self k v)<block_end><block_end><block_end><block_end># NOTE: in the future, can allow it to return list[Augmentation], # to delegate augmentation to others @abstractmethod<def_stmt>get_transform self *args<arrow>Transform<block_start>""" Execute the policy to use input data to create transform(s). Args: arguments must follow what's defined in :attr:`input_args`. Returns: Return a :class:`Transform` instance, which is the transform to apply to inputs. """<line_sep><pass><block_end><def_stmt>_rand_range self low=1.0 high=<none> size=<none><block_start>""" Uniform float random number between low and high. """<if_stmt>high<is><none><block_start>low,high=0 low<block_end><if_stmt>size<is><none><block_start>size=[]<block_end><return>np.random.uniform(low high size)<block_end><def_stmt>__repr__ self<block_start>""" Produce something like: "MyAugmentation(field1={self.field1}, field2={self.field2})" """<try_stmt><block_start>sig=inspect.signature(self.__init__)<line_sep>classname=type(self).__name__<line_sep>argstr=[]<for_stmt>name,param sig.parameters.items()<block_start><assert_stmt>(param.kind<ne>param.VAR_POSITIONAL<and>param.kind<ne>param.VAR_KEYWORD) "The default __repr__ doesn't support *args or **kwargs"<assert_stmt>hasattr(self name) ("Attribute {} not found! "<concat>"Default __repr__ only works if attributes match the constructor.".format(name))<line_sep>attr=getattr(self name)<line_sep>default=param.default<if_stmt>default<is>attr<block_start><continue><block_end>argstr.append("{}={}".format(name pprint.pformat(attr)))<block_end><return>"{}({})".format(classname ", ".join(argstr))<block_end><except_stmt>AssertionError<block_start><return>super().__repr__()<block_end><block_end>__str__=__repr__<block_end>TransformGen=Augmentation<line_sep>""" Alias for Augmentation, since it is something that generates :class:`Transform`s """<class_stmt>AugInput<block_start>""" A base class for anything on which a list of :class:`Augmentation` can be applied. This class provides input arguments for :class:`Augmentation` to use, and defines how to apply transforms to these data. An instance of this class must satisfy the following: * :class:`Augmentation` declares some data it needs as arguments. A :class:`AugInput` must provide access to these data in the form of attribute access (``getattr``). For example, if a :class:`Augmentation` to be applied needs "image" and "sem_seg" arguments, this class must have the attribute "image" and "sem_seg" whose content is as required by the :class:`Augmentation`s. * This class must have a :meth:`transform(tfm: Transform) -> None` method which in-place transforms all attributes stored in the class. """<def_stmt>transform self tfm:Transform<arrow><none><block_start><raise>NotImplementedError<block_end><def_stmt>apply_augmentations self augmentations:List[Union[Augmentation Transform]]<arrow>TransformList<block_start>""" Apply a list of Transform/Augmentation in-place and returned the applied transform. Attributes of this class will be modified. Returns: TransformList: returns transformed inputs and the list of transforms applied. The TransformList can then be applied to other data associated with the inputs. """<line_sep>tfms=[]<for_stmt>aug augmentations<block_start><if_stmt>isinstance(aug Augmentation)<block_start>args=[]<for_stmt>f aug.input_args<block_start><try_stmt><block_start>args.append(getattr(self f))<block_end><except_stmt>AttributeError<block_start><raise>AttributeError(f"Augmentation {aug} needs '{f}', which is not an attribute of {self}!")<block_end><block_end>tfm=aug.get_transform(*args)<assert_stmt>isinstance(tfm Transform) (f"{type(aug)}.get_transform must return an instance of Transform! "<concat>"Got {type(tfm)} instead.")<block_end><else_stmt><block_start>tfm=aug<block_end>self.transform(tfm)<line_sep>tfms.append(tfm)<block_end><return>TransformList(tfms)<block_end><block_end><class_stmt>StandardAugInput(AugInput)<block_start>""" A standard implementation of :class:`AugInput` for the majority of use cases. This class provides the following standard attributes that are common to use by Augmentation (augmentation policies). These are chosen because most :class:`Augmentation` won't need anything more to define a augmentation policy. After applying augmentations to these special attributes, the returned transforms can then be used to transform other data structures that users have. Attributes: image (ndarray): image in HW or HWC format. The meaning of C is up to users boxes (ndarray or None): Nx4 boxes in XYXY_ABS mode sem_seg (ndarray or None): HxW semantic segmentation mask Examples: :: input = StandardAugInput(image, boxes=boxes) tfms = input.apply_augmentations(list_of_augmentations) transformed_image = input.image transformed_boxes = input.boxes transformed_other_data = tfms.apply_other(other_data) An extended project that works with new data types may require augmentation policies that need more inputs. An algorithm may need to transform inputs in a way different from the standard approach defined in this class. In those situations, users can implement new subclasses of :class:`AugInput` with differnt attributes and the :meth:`transform` method. """<def_stmt>__init__ self image:np.ndarray * boxes:Optional[np.ndarray]=<none> sem_seg:Optional[np.ndarray]=<none> <block_start>""" Args: image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or floating point in range [0, 1] or [0, 255]. boxes: (N,4) ndarray of float32. It represents the instance bounding boxes of N instances. Each is in XYXY format in unit of absolute coordinates. sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel. """<line_sep>_check_img_dtype(image)<line_sep>self.image=image<line_sep>self.boxes=boxes<line_sep>self.sem_seg=sem_seg<block_end><def_stmt>transform self tfm:Transform<arrow><none><block_start>""" In-place transform all attributes of this class. """<line_sep>self.image=tfm.apply_image(self.image)<if_stmt>self.boxes<is><not><none><block_start>self.boxes=tfm.apply_box(self.boxes)<block_end><if_stmt>self.sem_seg<is><not><none><block_start>self.sem_seg=tfm.apply_segmentation(self.sem_seg)<block_end><block_end><block_end><def_stmt>apply_augmentations augmentations:List[Union[Transform Augmentation]] inputs<block_start>""" Use :meth:`AugInput.apply_augmentations` instead. """<if_stmt>isinstance(inputs np.ndarray)# handle the common case of image-only Augmentation, also for backward compatibility <block_start>image_only=<true><line_sep>inputs=StandardAugInput(inputs)<block_end><else_stmt><block_start>image_only=<false><block_end>tfms=inputs.apply_augmentations(augmentations)<line_sep><return>inputs.image<if>image_only<else>inputs tfms<block_end>apply_transform_gens=apply_augmentations<line_sep>""" Alias for backward-compatibility. """<class_stmt>RandomApply(Augmentation)<block_start>""" Randomly apply the wrapper transformation with a given probability. """<def_stmt>__init__ self transform prob=0.5<block_start>""" Args: transform (Transform, Augmentation): the transform to be wrapped by the `RandomApply`. The `transform` can either be a `Transform` or `Augmentation` instance. prob (float): probability between 0.0 and 1.0 that the wrapper transformation is applied """<line_sep>super().__init__()<assert_stmt>isinstance(transform (Transform Augmentation)) (f"The given transform must either be a Transform or Augmentation instance. "<concat>f"Not {type(transform)}")<assert_stmt>0.0<le>prob<le>1.0 f"Probablity must be between 0.0 and 1.0 (given: {prob})"<line_sep>self.prob=prob<line_sep>self.transform=transform<if_stmt>isinstance(transform Augmentation)<block_start>self.input_args=transform.input_args<block_end><block_end><def_stmt>get_transform self img<block_start>do=self._rand_range()<l>self.prob<if_stmt>do<block_start><if_stmt>isinstance(self.transform Augmentation)<block_start><return>self.transform.get_transform(img)<block_end><else_stmt><block_start><return>self.transform<block_end><block_end><else_stmt><block_start><return>NoOpTransform()<block_end><block_end><block_end><class_stmt>RandomFlip(Augmentation)<block_start>""" Flip the image horizontally or vertically with the given probability. """<def_stmt>__init__ self prob=0.5 * horizontal=<true> vertical=<false><block_start>""" Args: prob (float): probability of flip. horizontal (boolean): whether to apply horizontal flipping vertical (boolean): whether to apply vertical flipping """<line_sep>super().__init__()<if_stmt>horizontal<and>vertical<block_start><raise>ValueError("Cannot do both horiz and vert. Please use two Flip instead.")<block_end><if_stmt><not>horizontal<and><not>vertical<block_start><raise>ValueError("At least one of horiz or vert has to be True!")<block_end>self._init(locals())<block_end><def_stmt>get_transform self img<block_start>h,w=img.shape[:2]<line_sep>do=self._rand_range()<l>self.prob<if_stmt>do<block_start><if_stmt>self.horizontal<block_start><return>HFlipTransform(w)<block_end><elif_stmt>self.vertical<block_start><return>VFlipTransform(h)<block_end><block_end><else_stmt><block_start><return>NoOpTransform()<block_end><block_end><block_end><class_stmt>Resize(Augmentation)<block_start>""" Resize image to a fixed target size"""<def_stmt>__init__ self shape interp=Image.BILINEAR<block_start>""" Args: shape: (h, w) tuple or a int interp: PIL interpolation method """<if_stmt>isinstance(shape int)<block_start>shape=(shape shape)<block_end>shape=tuple(shape)<line_sep>self._init(locals())<block_end><def_stmt>get_transform self img<block_start><return>ResizeTransform(img.shape[0] img.shape[1] self.shape[0] self.shape[1] self.interp)<block_end><block_end><class_stmt>ResizeShortestEdge(Augmentation)<block_start>""" Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge. If `max_size` is reached, then downscale so that the longer edge does not exceed max_size. """<def_stmt>__init__ self short_edge_length max_size=sys.maxsize sample_style="range" interp=Image.BILINEAR<block_start>""" Args: short_edge_length (list[int]): If ``sample_style=="range"``, a [min, max] interval from which to sample the shortest edge length. If ``sample_style=="choice"``, a list of shortest edge lengths to sample from. max_size (int): maximum allowed longest edge length. sample_style (str): either "range" or "choice". """<line_sep>super().__init__()<assert_stmt>sample_style<in>["range" "choice"] sample_style<line_sep>self.is_range=sample_style<eq>"range"<if_stmt>isinstance(short_edge_length int)<block_start>short_edge_length=(short_edge_length short_edge_length)<block_end>self._init(locals())<block_end><def_stmt>get_transform self img<block_start>h,w=img.shape[:2]<if_stmt>self.is_range<block_start>size=np.random.randint(self.short_edge_length[0] self.short_edge_length[1]+1)<block_end><else_stmt><block_start>size=np.random.choice(self.short_edge_length)<block_end><if_stmt>size<eq>0<block_start><return>NoOpTransform()<block_end>scale=size<times>1.0/min(h w)<if_stmt>h<l>w<block_start>newh,neww=size scale<times>w<block_end><else_stmt><block_start>newh,neww=scale<times>h size<block_end><if_stmt>max(newh neww)<g>self.max_size<block_start>scale=self.max_size<times>1.0/max(newh neww)<line_sep>newh=newh<times>scale<line_sep>neww=neww<times>scale<block_end>neww=int(neww+0.5)<line_sep>newh=int(newh+0.5)<line_sep><return>ResizeTransform(h w newh neww self.interp)<block_end><block_end><class_stmt>RandomRotation(Augmentation)<block_start>""" This method returns a copy of this image, rotated the given number of degrees counter clockwise around the given center. """<def_stmt>__init__ self angle expand=<true> center=<none> sample_style="range" interp=<none><block_start>""" Args: angle (list[float]): If ``sample_style=="range"``, a [min, max] interval from which to sample the angle (in degrees). If ``sample_style=="choice"``, a list of angles to sample from expand (bool): choose if the image should be resized to fit the whole rotated image (default), or simply cropped center (list[[float, float]]): If ``sample_style=="range"``, a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center, [0, 0] being the top left of the image and [1, 1] the bottom right. If ``sample_style=="choice"``, a list of centers to sample from Default: None, which means that the center of rotation is the center of the image center has no effect if expand=True because it only affects shifting """<line_sep>super().__init__()<assert_stmt>sample_style<in>["range" "choice"] sample_style<line_sep>self.is_range=sample_style<eq>"range"<if_stmt>isinstance(angle (float int))<block_start>angle=(angle angle)<block_end><if_stmt>center<is><not><none><and>isinstance(center[0] (float int))<block_start>center=(center center)<block_end>self._init(locals())<block_end><def_stmt>get_transform self img<block_start>h,w=img.shape[:2]<line_sep>center=<none><if_stmt>self.is_range<block_start>angle=np.random.uniform(self.angle[0] self.angle[1])<if_stmt>self.center<is><not><none><block_start>center=(np.random.uniform(self.center[0][0] self.center[1][0]) np.random.uniform(self.center[0][1] self.center[1][1]) )<block_end><block_end><else_stmt><block_start>angle=np.random.choice(self.angle)<if_stmt>self.center<is><not><none><block_start>center=np.random.choice(self.center)<block_end><block_end><if_stmt>center<is><not><none><block_start>center=(w<times>center[0] h<times>center[1])<block_end># Convert to absolute coordinates <if_stmt>angle%360<eq>0<block_start><return>NoOpTransform()<block_end><return>RotationTransform(h w angle expand=self.expand center=center interp=self.interp)<block_end><block_end><class_stmt>RandomCrop(Augmentation)<block_start>""" Randomly crop a subimage out of an image. """<def_stmt>__init__ self crop_type:str crop_size<block_start>""" Args: crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range". See `config/defaults.py` for explanation. crop_size (tuple[float]): the relative ratio or absolute pixels of height and width """<line_sep>super().__init__()<assert_stmt>crop_type<in>["relative_range" "relative" "absolute" "absolute_range"]<line_sep>self._init(locals())<block_end><def_stmt>get_transform self img<block_start>h,w=img.shape[:2]<line_sep>croph,cropw=self.get_crop_size((h w))<assert_stmt>h<ge>croph<and>w<ge>cropw "Shape computation in {} has bugs.".format(self)<line_sep>h0=np.random.randint(h-croph+1)<line_sep>w0=np.random.randint(w-cropw+1)<line_sep><return>CropTransform(w0 h0 cropw croph)<block_end><def_stmt>get_crop_size self image_size<block_start>""" Args: image_size (tuple): height, width Returns: crop_size (tuple): height, width in absolute pixels """<line_sep>h,w=image_size<if_stmt>self.crop_type<eq>"relative"<block_start>ch,cw=self.crop_size<line_sep><return>int(h<times>ch+0.5) int(w<times>cw+0.5)<block_end><elif_stmt>self.crop_type<eq>"relative_range"<block_start>crop_size=np.asarray(self.crop_size dtype=np.float32)<line_sep>ch,cw=crop_size+np.random.rand(2)<times>(1-crop_size)<line_sep><return>int(h<times>ch+0.5) int(w<times>cw+0.5)<block_end><elif_stmt>self.crop_type<eq>"absolute"<block_start><return>(min(self.crop_size[0] h) min(self.crop_size[1] w))<block_end><elif_stmt>self.crop_type<eq>"absolute_range"<block_start><assert_stmt>self.crop_size[0]<le>self.crop_size[1]<line_sep>ch=np.random.randint(min(h self.crop_size[0]) min(h self.crop_size[1])+1)<line_sep>cw=np.random.randint(min(w self.crop_size[0]) min(w self.crop_size[1])+1)<line_sep><return>ch cw<block_end><else_stmt><block_start>NotImplementedError("Unknown crop type {}".format(self.crop_type))<block_end><block_end><block_end><class_stmt>RandomExtent(Augmentation)<block_start>""" Outputs an image by cropping a random "subrect" of the source image. The subrect can be parameterized to include pixels outside the source image, in which case they will be set to zeros (i.e. black). The size of the output image will vary with the size of the random subrect. """<def_stmt>__init__ self scale_range shift_range<block_start>""" Args: output_size (h, w): Dimensions of output image scale_range (l, h): Range of input-to-output size scaling factor shift_range (x, y): Range of shifts of the cropped subrect. The rect is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)], where (w, h) is the (width, height) of the input image. Set each component to zero to crop at the image's center. """<line_sep>super().__init__()<line_sep>self._init(locals())<block_end><def_stmt>get_transform self img<block_start>img_h,img_w=img.shape[:2]<line_sep># Initialize src_rect to fit the input image. src_rect=np.array([-0.5<times>img_w -0.5<times>img_h 0.5<times>img_w 0.5<times>img_h])<line_sep># Apply a random scaling to the src_rect. src_rect<augmul>np.random.uniform(self.scale_range[0] self.scale_range[1])<line_sep># Apply a random shift to the coordinates origin. src_rect[0::2]<augadd>self.shift_range[0]<times>img_w<times>(np.random.rand()-0.5)<line_sep>src_rect[1::2]<augadd>self.shift_range[1]<times>img_h<times>(np.random.rand()-0.5)<line_sep># Map src_rect coordinates into image coordinates (center at corner). src_rect[0::2]<augadd>0.5<times>img_w<line_sep>src_rect[1::2]<augadd>0.5<times>img_h<line_sep><return>ExtentTransform(src_rect=(src_rect[0] src_rect[1] src_rect[2] src_rect[3]) output_size=(int(src_rect[3]-src_rect[1]) int(src_rect[2]-src_rect[0])) )<block_end><block_end><class_stmt>RandomContrast(Augmentation)<block_start>""" Randomly transforms image contrast. Contrast intensity is uniformly sampled in (intensity_min, intensity_max). - intensity < 1 will reduce contrast - intensity = 1 will preserve the input image - intensity > 1 will increase contrast See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html """<def_stmt>__init__ self intensity_min intensity_max<block_start>""" Args: intensity_min (float): Minimum augmentation intensity_max (float): Maximum augmentation """<line_sep>super().__init__()<line_sep>self._init(locals())<block_end><def_stmt>get_transform self img<block_start>w=np.random.uniform(self.intensity_min self.intensity_max)<line_sep><return>BlendTransform(src_image=img.mean() src_weight=1-w dst_weight=w)<block_end><block_end><class_stmt>RandomBrightness(Augmentation)<block_start>""" Randomly transforms image brightness. Brightness intensity is uniformly sampled in (intensity_min, intensity_max). - intensity < 1 will reduce brightness - intensity = 1 will preserve the input image - intensity > 1 will increase brightness See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html """<def_stmt>__init__ self intensity_min intensity_max<block_start>""" Args: intensity_min (float): Minimum augmentation intensity_max (float): Maximum augmentation """<line_sep>super().__init__()<line_sep>self._init(locals())<block_end><def_stmt>get_transform self img<block_start>w=np.random.uniform(self.intensity_min self.intensity_max)<line_sep><return>BlendTransform(src_image=0 src_weight=1-w dst_weight=w)<block_end><block_end><class_stmt>RandomSaturation(Augmentation)<block_start>""" Randomly transforms saturation of an RGB image. Input images are assumed to have 'RGB' channel order. Saturation intensity is uniformly sampled in (intensity_min, intensity_max). - intensity < 1 will reduce saturation (make the image more grayscale) - intensity = 1 will preserve the input image - intensity > 1 will increase saturation See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html """<def_stmt>__init__ self intensity_min intensity_max<block_start>""" Args: intensity_min (float): Minimum augmentation (1 preserves input). intensity_max (float): Maximum augmentation (1 preserves input). """<line_sep>super().__init__()<line_sep>self._init(locals())<block_end><def_stmt>get_transform self img<block_start><assert_stmt>img.shape[-1]<eq>3 "RandomSaturation only works on RGB images"<line_sep>w=np.random.uniform(self.intensity_min self.intensity_max)<line_sep>grayscale=img.dot([0.299 0.587 0.114])[: : np.newaxis]<line_sep><return>BlendTransform(src_image=grayscale src_weight=1-w dst_weight=w)<block_end><block_end><class_stmt>RandomLighting(Augmentation)<block_start>""" The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet. Input images are assumed to have 'RGB' channel order. The degree of color jittering is randomly sampled via a normal distribution, with standard deviation given by the scale parameter. """<def_stmt>__init__ self scale<block_start>""" Args: scale (float): Standard deviation of principal component weighting. """<line_sep>super().__init__()<line_sep>self._init(locals())<line_sep>self.eigen_vecs=np.array([[-0.5675 0.7192 0.4009] [-0.5808 -0.0045 -0.8140] [-0.5836 -0.6948 0.4203]])<line_sep>self.eigen_vals=np.array([0.2175 0.0188 0.0045])<block_end><def_stmt>get_transform self img<block_start><assert_stmt>img.shape[-1]<eq>3 "RandomLighting only works on RGB images"<line_sep>weights=np.random.normal(scale=self.scale size=3)<line_sep><return>BlendTransform(src_image=self.eigen_vecs.dot(weights<times>self.eigen_vals) src_weight=1.0 dst_weight=1.0)<block_end><block_end><def_stmt>_gen_crop_transform_with_instance crop_size image_size instances crop_box=<true><block_start>""" Generate a CropTransform so that the cropping region contains the center of the given instance. Args: crop_size (tuple): h, w in pixels image_size (tuple): h, w instance (dict): an annotation dict of one instance, in Detectron2's dataset format. """<line_sep>bbox=random.choice(instances)<line_sep>crop_size=np.asarray(crop_size dtype=np.int32)<line_sep>center_yx=(bbox[1]+bbox[3])<times>0.5 (bbox[0]+bbox[2])<times>0.5<assert_stmt>(image_size[0]<ge>center_yx[0]<and>image_size[1]<ge>center_yx[1]) "The annotation bounding box is outside of the image!"<assert_stmt>(image_size[0]<ge>crop_size[0]<and>image_size[1]<ge>crop_size[1]) "Crop size is larger than image size!"<line_sep>min_yx=np.maximum(np.floor(center_yx).astype(np.int32)-crop_size 0)<line_sep>max_yx=np.maximum(np.asarray(image_size dtype=np.int32)-crop_size 0)<line_sep>max_yx=np.minimum(max_yx np.ceil(center_yx).astype(np.int32))<line_sep>y0=np.random.randint(min_yx[0] max_yx[0]+1)<line_sep>x0=np.random.randint(min_yx[1] max_yx[1]+1)<line_sep># if some instance is cropped extend the box <if_stmt><not>crop_box<block_start>num_modifications=0<line_sep>modified=<true><line_sep># convert crop_size to float crop_size=crop_size.astype(np.float32)<while_stmt>modified<block_start>modified,x0,y0,crop_size=adjust_crop(x0 y0 crop_size instances)<line_sep>num_modifications<augadd>1<if_stmt>num_modifications<g>100<block_start><raise>ValueError("Cannot finished cropping adjustment within 100 tries (#instances {}).".format(len(instances)))<line_sep><return>CropTransform(0 0 image_size[1] image_size[0])<block_end><block_end><block_end><return>CropTransform(*map(int (x0 y0 crop_size[1] crop_size[0])))<block_end><def_stmt>adjust_crop x0 y0 crop_size instances eps=1e-3<block_start>modified=<false><line_sep>x1=x0+crop_size[1]<line_sep>y1=y0+crop_size[0]<for_stmt>bbox instances<block_start><if_stmt>bbox[0]<l>x0-eps<and>bbox[2]<g>x0+eps<block_start>crop_size[1]<augadd>x0-bbox[0]<line_sep>x0=bbox[0]<line_sep>modified=<true><block_end><if_stmt>bbox[0]<l>x1-eps<and>bbox[2]<g>x1+eps<block_start>crop_size[1]<augadd>bbox[2]-x1<line_sep>x1=bbox[2]<line_sep>modified=<true><block_end><if_stmt>bbox[1]<l>y0-eps<and>bbox[3]<g>y0+eps<block_start>crop_size[0]<augadd>y0-bbox[1]<line_sep>y0=bbox[1]<line_sep>modified=<true><block_end><if_stmt>bbox[1]<l>y1-eps<and>bbox[3]<g>y1+eps<block_start>crop_size[0]<augadd>bbox[3]-y1<line_sep>y1=bbox[3]<line_sep>modified=<true><block_end><block_end><return>modified x0 y0 crop_size<block_end><class_stmt>RandomCropWithInstance(RandomCrop)<block_start>""" Instance-aware cropping. """<def_stmt>__init__ self crop_type crop_size crop_instance=<true><block_start>""" Args: crop_instance (bool): if False, extend cropping boxes to avoid cropping instances """<line_sep>super().__init__(crop_type crop_size)<line_sep>self.crop_instance=crop_instance<line_sep>self.input_args=("image" "boxes")<block_end><def_stmt>get_transform self img boxes<block_start>image_size=img.shape[:2]<line_sep>crop_size=self.get_crop_size(image_size)<line_sep><return>_gen_crop_transform_with_instance(crop_size image_size boxes crop_box=self.crop_instance)<block_end><block_end><class_stmt>PadAugmentation(Augmentation)<block_start><def_stmt>__init__ self crop_size<block_start>""" Args: crop_instance (bool): if False, extend cropping boxes to avoid cropping instances """<line_sep>super().__init__()<line_sep>self.crop_size=crop_size<block_end><def_stmt>get_crop_size self image_size<block_start>h,w=image_size<line_sep><return>(min(self.crop_size[0] h) min(self.crop_size[1] w))<block_end><def_stmt>get_transform self img<block_start>image_size=img.shape[:2]<line_sep>image_size=self.get_crop_size(image_size)<line_sep><return>_PadTransform(image_size[0] image_size[1] self.crop_size[1] self.crop_size[0])<block_end><block_end><class_stmt>_PadTransform(Transform)<block_start><def_stmt>__init__ self h:int w:int crop_h:int crop_w:int<block_start>super().__init__()<line_sep>self._set_attributes(locals())<block_end><def_stmt>apply_image self img:np.ndarray<arrow>np.ndarray<block_start>h,w=img.shape[:2]<assert_stmt>(self.h<eq>h<and>self.w<eq>w) "Input size mismatch h w {}:{} -> {}:{}".format(self.h self.w h w)<line_sep>padding=((0 self.crop_h-h) (0 self.crop_w-w) (0 0))<line_sep>img=np.pad(img pad_width=padding)<line_sep><return>img<block_end><def_stmt>apply_coords self coords:np.ndarray<arrow>np.ndarray<block_start><return>coords<block_end><block_end>
# -*- coding: utf-8 -*- """ Tencent is pleased to support the open source community by making 蓝鲸智云PaaS平台社区版 (BlueKing PaaS Community Edition) available. Copyright (C) 2017-2021 TH<NAME>, a Tencent company. All rights reserved. Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://opensource.org/licenses/MIT Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """<import_from_stmt>typing Dict List Optional Type<import_stmt>attr<import_from_stmt>backend.iam.permissions decorators<import_from_stmt>backend.iam.permissions.exceptions AttrValidationError<import_from_stmt>backend.iam.permissions.perm PermCtx Permission<import_from_stmt>backend.iam.permissions.request IAMResource ResourceRequest<import_from_stmt>backend.packages.blue_krill.data_types.enum EnumField StructuredEnum<import_from_stmt>backend.utils.basic md5_digest<import_from_stmt>.cluster ClusterPermission related_cluster_perm<import_from_stmt>.constants ResourceType<def_stmt>calc_iam_ns_id cluster_id:str name:str<arrow>Optional[str]<block_start>""" 计算(压缩)出注册到权限中心的命名空间 ID,具备唯一性. 当前的算法并不能完全避免冲突,但概率较低。 note: 权限中心对资源 ID 有长度限制,不超过32位。长度越长,处理性能越低 :param cluster_id: 集群 ID :param name: 命名空间名,k8s 限定最长63个字符 :return: iam_ns_id 是命名空间注册到权限中心的资源 ID,它是对结构`集群ID:命名空间name`的一个压缩, 如 `BCS-K8S-40000:default` 会被处理成 `40000:5f03d33dde`。其中,保留集群数字 ID 的目的是用于 NamespaceProvider 中的 fetch_instance_info 方法 """<line_sep>cluster_idx=cluster_id.split('-')[-1]<line_sep>iam_ns_id=f'{cluster_idx}:{md5_digest(name)[8:16]}{name[:2]}'<if_stmt>len(iam_ns_id)<g>32<block_start><raise>ValueError(f'iam_ns_id({iam_ns_id}) more than 32 characters')<block_end><return>iam_ns_id<block_end><class_stmt>NamespaceAction(str StructuredEnum)<block_start>CREATE=EnumField('namespace_create' label='namespace_create')<line_sep>VIEW=EnumField('namespace_view' label='namespace_view')<line_sep>UPDATE=EnumField('namespace_update' label='namespace_update')<line_sep>DELETE=EnumField('namespace_delete' label='namespace_delete')<line_sep>USE=EnumField('namespace_use' label='namespace_use')<block_end>@attr.dataclass<class_stmt>NamespacePermCtx(PermCtx)<block_start>project_id:str=''<line_sep>cluster_id:str=''<line_sep>name:Optional[str]=<none># 命名空间名 iam_ns_id:Optional[str]=<none># 注册到权限中心的命名空间ID <def_stmt>__attrs_post_init__ self<block_start>"""权限中心的 resource_id 长度限制为32位"""<if_stmt>self.name<block_start>self.iam_ns_id=calc_iam_ns_id(self.cluster_id self.name)<block_end><block_end>@property<def_stmt>resource_id self<arrow>str<block_start><return>self.iam_ns_id<block_end><def_stmt>validate self<block_start>super().validate()<if_stmt><not>self.project_id<block_start><raise>AttrValidationError('project_id must not be empty')<block_end><if_stmt><not>self.cluster_id<block_start><raise>AttrValidationError('cluster_id must not be empty')<block_end><block_end><block_end><class_stmt>NamespaceRequest(ResourceRequest)<block_start>resource_type:str=ResourceType.Namespace<line_sep>attr={'_bk_iam_path_':f'/project,{{project_id}}/cluster,{{cluster_id}}/'}<def_stmt>_make_attribute self res_id:str<arrow>Dict<block_start><return>{'_bk_iam_path_':self.attr['_bk_iam_path_'].format(project_id=self.attr_kwargs['project_id'] cluster_id=self.attr_kwargs['cluster_id'])}<block_end><def_stmt>_validate_attr_kwargs self<block_start><if_stmt><not>self.attr_kwargs.get('project_id')<block_start><raise>AttrValidationError('missing project_id or project_id is invalid')<block_end><if_stmt><not>self.attr_kwargs.get('cluster_id')<block_start><raise>AttrValidationError('missing cluster_id or cluster_id is invalid')<block_end><block_end><block_end><class_stmt>related_namespace_perm(decorators.RelatedPermission)<block_start>module_name:str=ResourceType.Namespace<def_stmt>_convert_perm_ctx self instance args kwargs<arrow>PermCtx<block_start>"""仅支持第一个参数是 PermCtx 子类实例"""<if_stmt>len(args)<le>0<block_start><raise>TypeError('missing NamespacePermCtx instance argument')<block_end><if_stmt>isinstance(args[0] PermCtx)<block_start><return>NamespacePermCtx(username=args[0].username project_id=args[0].project_id cluster_id=args[0].cluster_id name=args[0].name )<block_end><else_stmt><block_start><raise>TypeError('missing NamespacePermCtx instance argument')<block_end><block_end><block_end><class_stmt>namespace_perm(decorators.Permission)<block_start>module_name:str=ResourceType.Namespace<block_end><class_stmt>NamespacePermission(Permission)<block_start>"""命名空间权限"""<line_sep>resource_type:str=ResourceType.Namespace<line_sep>resource_request_cls:Type[ResourceRequest]=NamespaceRequest<line_sep>parent_res_perm=ClusterPermission()<line_sep>@related_cluster_perm(method_name='can_use')<def_stmt>can_create self perm_ctx:NamespacePermCtx raise_exception:bool=<true><arrow>bool<block_start><return>self.can_action(perm_ctx NamespaceAction.CREATE raise_exception)<block_end>@related_cluster_perm(method_name='can_view')<def_stmt>can_view self perm_ctx:NamespacePermCtx raise_exception:bool=<true><arrow>bool<block_start>perm_ctx.validate_resource_id()<line_sep><return>self.can_action(perm_ctx NamespaceAction.VIEW raise_exception)<block_end>@related_cluster_perm(method_name='can_use')<def_stmt>can_update self perm_ctx:NamespacePermCtx raise_exception:bool=<true><arrow>bool<block_start>perm_ctx.validate_resource_id()<line_sep><return>self.can_action_with_view(perm_ctx NamespaceAction.UPDATE NamespaceAction.VIEW raise_exception)<block_end>@related_cluster_perm(method_name='can_use')<def_stmt>can_delete self perm_ctx:NamespacePermCtx raise_exception:bool=<true><arrow>bool<block_start>perm_ctx.validate_resource_id()<line_sep><return>self.can_action_with_view(perm_ctx NamespaceAction.DELETE NamespaceAction.VIEW raise_exception)<block_end>@related_cluster_perm(method_name='can_use')<def_stmt>can_use self perm_ctx:NamespacePermCtx raise_exception:bool=<true><arrow>bool<block_start>perm_ctx.validate_resource_id()<line_sep><return>self.can_action_with_view(perm_ctx NamespaceAction.USE NamespaceAction.VIEW raise_exception)<block_end><def_stmt>make_res_request self res_id:str perm_ctx:NamespacePermCtx<arrow>ResourceRequest<block_start><return>self.resource_request_cls(res_id project_id=perm_ctx.project_id cluster_id=perm_ctx.cluster_id)<block_end><def_stmt>get_parent_chain self perm_ctx:NamespacePermCtx<arrow>List[IAMResource]<block_start><return>[IAMResource(ResourceType.Project perm_ctx.project_id) IAMResource(ResourceType.Cluster perm_ctx.cluster_id) ]<block_end><def_stmt>get_resource_id self perm_ctx:NamespacePermCtx<arrow>Optional[str]<block_start><return>perm_ctx.iam_ns_id<block_end><block_end>
# Generated by Django 3.0.7 on 2020-11-07 14:04 <import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('jobsapp' '0009_favorite') ]<line_sep>operations=[migrations.AddField(model_name='applicant' name='comment' field=models.TextField(blank=<true> null=<true>) ) migrations.AddField(model_name='applicant' name='status' field=models.SmallIntegerField(default=1) ) ]<block_end>
# Copyright (C) 2020. Huawei Technologies Co., Ltd. All rights reserved. # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. <import_stmt>itertools<import_stmt>random<import_stmt>numpy<as>np<import_stmt>psutil<import_from_stmt>.lidar_sensor_params SensorParams<import_from_stmt>.utils pybullet<import_from_stmt>.utils.math batches rotate_quat<import_from_stmt>.utils.pybullet bullet_client<as>bc<class_stmt>Lidar<block_start><def_stmt>__init__ self origin sensor_params:SensorParams bullet_client:bc.BulletClient<block_start>self._origin=origin<line_sep>self._sensor_params=sensor_params<line_sep>self._bullet_client=bullet_client<line_sep>self._n_threads=psutil.cpu_count(logical=<false>)<line_sep># As an optimization we compute a set of "base rays" once and shift translate # them to follow the user, and then trace for collisions. self._base_rays=<none><line_sep>self._static_lidar_noise=self._compute_static_lidar_noise()<block_end>@property<def_stmt>origin self<block_start><return>self._origin<block_end>@origin.setter<def_stmt>origin self value<block_start>self._origin=value<block_end><def_stmt>_compute_static_lidar_noise self<block_start>n_rays=int((self._sensor_params.end_angle-self._sensor_params.start_angle)/self._sensor_params.angle_resolution)<line_sep>n_points=n_rays<times>len(self._sensor_params.laser_angles)<line_sep>static_lidar_noise=[]<for_stmt>_ range(n_points)<block_start>static_lidar_noise.append(random.gauss(self._sensor_params.noise_mu self._sensor_params.noise_sigma))<block_end><return>np.array(static_lidar_noise dtype=np.float)<block_end><def_stmt>compute_point_cloud self<block_start>rays=self._compute_rays()<line_sep>point_cloud,hits=self._trace_rays(rays)<line_sep># point_cloud = self._apply_noise(point_cloud) <assert_stmt>(len(point_cloud)<eq>len(hits)<eq>len(rays)<eq>len(self._static_lidar_noise))<line_sep><return>point_cloud hits rays<block_end><def_stmt>_compute_rays self<block_start><if_stmt>self._base_rays<is><none><block_start>self._base_rays=[]<line_sep>n_rays=int((self._sensor_params.end_angle-self._sensor_params.start_angle)/self._sensor_params.angle_resolution)<line_sep>yaws=-self._sensor_params.laser_angles<line_sep>rolls=np.arange(n_rays)<times>self._sensor_params.angle_resolution<for_stmt>yaw,roll itertools.product(yaws rolls)<block_start>rot=pybullet.getQuaternionFromEuler((roll 0 yaw))<line_sep>origin=np.array([0 0 0])<line_sep>direction=rotate_quat(np.asarray(rot dtype=float) np.asarray((0 self._sensor_params.max_distance 0) dtype=float) )<line_sep>self._base_rays.append((origin direction))<block_end><block_end>rays=[(origin+self._origin direction+self._origin)<for>origin,direction self._base_rays]<line_sep><return>rays<block_end><def_stmt>_trace_rays self rays<block_start>results=[]<for_stmt>batched_rays batches(rays int(pybullet.MAX_RAY_INTERSECTION_BATCH_SIZE-1))<block_start>origins,directions=zip(*batched_rays)<line_sep>results.extend(self._bullet_client.rayTestBatch(origins directions self._n_threads))<block_end>hit_ids,_,_,positions,_=zip(*results)<line_sep>positions=list(positions)<line_sep>hits=[]<for_stmt>i,position enumerate(positions)<block_start>hit=hit_ids[i]<ne>-1<line_sep>hits.append(hit)<line_sep>positions[i]=(np.array(position)<if>hit<else>np.array([np.inf np.inf np.inf]))<block_end><return>positions hits<block_end><def_stmt>_apply_noise self point_cloud<block_start>dynamic_noise=np.random.normal(self._sensor_params.noise_mu self._sensor_params.noise_sigma size=len(point_cloud) )<line_sep>local_pc=point_cloud-self._origin<line_sep>noise=self._static_lidar_noise+dynamic_noise<line_sep><return>point_cloud+(local_pc/np.linalg.norm(local_pc axis=1)[: np.newaxis]<times>noise[: np.newaxis])<block_end><block_end>
# pylint: disable=missing-function-docstring, missing-module-docstring/ <import_from_stmt>mpi4py MPI<import_from_stmt>numpy zeros<import_from_stmt>numpy ones<if_stmt>__name__<eq>'__main__'<block_start>rank=-1<line_sep>comm=MPI.COMM_WORLD<line_sep>rank=comm.Get_rank()<line_sep># passing MPI datatypes explicitly <if_stmt>rank<eq>0<block_start>data=ones(5 'int')<line_sep>comm.Send([data MPI.INT] dest=1 tag=77)<block_end><elif_stmt>rank<eq>1<block_start>data=zeros(5 'int')<line_sep>comm.Recv([data MPI.INT] source=0 tag=77)<line_sep>print(data)<block_end># automatic MPI datatype discovery <if_stmt>rank<eq>0<block_start>data_=ones(5 'double')<line_sep>comm.Send(data dest=1 tag=13)<block_end><elif_stmt>rank<eq>1<block_start>data_=zeros(5 'double')<line_sep>comm.Recv(data source=0 tag=13)<line_sep>print(data)<block_end><block_end>
# !/usr/bin/env python # -- coding: utf-8 -- # @Time : 2020/10/28 16:41 # @Author : liumin # @File : ICNet.py <import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>torchvision<line_sep>__all__=["ICNet"]<def_stmt>Conv1x1BN in_channels out_channels<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=1 stride=1 bias=<false>) nn.BatchNorm2d(out_channels))<block_end><def_stmt>Conv1x1BNReLU in_channels out_channels<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=1 stride=1 bias=<false>) nn.BatchNorm2d(out_channels) nn.ReLU(inplace=<true>))<block_end><def_stmt>Conv3x3BN in_channels out_channels stride dilation=1<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=3 stride=stride padding=dilation dilation=dilation bias=<false>) nn.BatchNorm2d(out_channels))<block_end><def_stmt>Conv3x3BNReLU in_channels out_channels stride dilation=1<block_start><return>nn.Sequential(nn.Conv2d(in_channels=in_channels out_channels=out_channels kernel_size=3 stride=stride padding=dilation dilation=dilation bias=<false>) nn.BatchNorm2d(out_channels) nn.ReLU(inplace=<true>))<block_end><class_stmt>CascadeFeatureFusion(nn.Module)<block_start><def_stmt>__init__ self low_channels high_channels out_channels num_classes<block_start>super(CascadeFeatureFusion self).__init__()<line_sep>self.conv_low=Conv3x3BNReLU(low_channels out_channels 1 dilation=2)<line_sep>self.conv_high=Conv3x3BNReLU(high_channels out_channels 1 dilation=1)<line_sep>self.relu=nn.ReLU(inplace=<true>)<line_sep>self.conv_low_cls=nn.Conv2d(out_channels num_classes 1 bias=<false>)<block_end><def_stmt>forward self x_low x_high<block_start>x_low=F.interpolate(x_low size=x_high.size()[2:] mode='bilinear' align_corners=<true>)<line_sep>x_low=self.conv_low(x_low)<line_sep>x_high=self.conv_high(x_high)<line_sep>out=self.relu(x_low+x_high)<line_sep>x_low_cls=self.conv_low_cls(x_low)<line_sep><return>out x_low_cls<block_end><block_end><class_stmt>Backbone(nn.Module)<block_start><def_stmt>__init__ self pyramids=[1 2 3 6]<block_start>super(Backbone self).__init__()<line_sep>self.pretrained=torchvision.models.resnet50(pretrained=<true>)<block_end><def_stmt>forward self x<block_start>x=self.pretrained.conv1(x)<line_sep>x=self.pretrained.bn1(x)<line_sep>x=self.pretrained.relu(x)<line_sep>x=self.pretrained.maxpool(x)<line_sep>c1=self.pretrained.layer1(x)<line_sep>c2=self.pretrained.layer2(c1)<line_sep>c3=self.pretrained.layer3(c2)<line_sep>c4=self.pretrained.layer4(c3)<line_sep><return>c1 c2 c3 c4<block_end><block_end><class_stmt>PyramidPoolingModule(nn.Module)<block_start><def_stmt>__init__ self pyramids=[1 2 3 6]<block_start>super(PyramidPoolingModule self).__init__()<line_sep>self.pyramids=pyramids<block_end><def_stmt>forward self x<block_start>feat=x<line_sep>height,width=x.shape[2:]<for_stmt>bin_size self.pyramids<block_start>feat_x=F.adaptive_avg_pool2d(x output_size=bin_size)<line_sep>feat_x=F.interpolate(feat_x size=(height width) mode='bilinear' align_corners=<true>)<line_sep>feat=feat+feat_x<block_end><return>feat<block_end><block_end><class_stmt>ICNet(nn.Module)<block_start><def_stmt>__init__ self num_classes<block_start>super(ICNet self).__init__()<line_sep>self.conv_sub1=nn.Sequential(Conv3x3BNReLU(3 32 2) Conv3x3BNReLU(32 32 2) Conv3x3BNReLU(32 64 2))<line_sep>self.backbone=Backbone()<line_sep>self.ppm=PyramidPoolingModule()<line_sep>self.cff_12=CascadeFeatureFusion(128 64 128 num_classes)<line_sep>self.cff_24=CascadeFeatureFusion(2048 512 128 num_classes)<line_sep>self.conv_cls=nn.Conv2d(128 num_classes 1 bias=<false>)<block_end><def_stmt>forward self x# sub 1 <block_start>x_sub1=self.conv_sub1(x)<line_sep># sub 2 x_sub2=F.interpolate(x scale_factor=0.5 mode='bilinear')<line_sep>_,x_sub2,_,_=self.backbone(x_sub2)<line_sep># sub 4 x_sub4=F.interpolate(x scale_factor=0.25 mode='bilinear')<line_sep>_,_,_,x_sub4=self.backbone(x_sub4)<line_sep># add PyramidPoolingModule x_sub4=self.ppm(x_sub4)<line_sep>outs=list()<line_sep>x_cff_24,x_24_cls=self.cff_24(x_sub4 x_sub2)<line_sep>outs.append(x_24_cls)<line_sep># x_cff_12, x_12_cls = self.cff_12(x_sub2, x_sub1) x_cff_12,x_12_cls=self.cff_12(x_cff_24 x_sub1)<line_sep>outs.append(x_12_cls)<line_sep>up_x2=F.interpolate(x_cff_12 scale_factor=2 mode='bilinear')<line_sep>up_x2=self.conv_cls(up_x2)<line_sep>outs.append(up_x2)<line_sep>up_x8=F.interpolate(up_x2 scale_factor=4 mode='bilinear')<line_sep>outs.append(up_x8)<line_sep># 1 -> 1/4 -> 1/8 -> 1/16 outs.reverse()<line_sep><return>outs<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>model=ICNet(num_classes=19)<line_sep>print(model)<line_sep>input=torch.randn(1 3 512 512)<line_sep>output=model(input)<line_sep>print(output[0].shape)<line_sep>print(output[1].shape)<line_sep>print(output[2].shape)<line_sep>print(output[3].shape)<block_end>
# Copyright (c) 2009 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. {'variables':{'chromium_code':1 } 'targets':[{'target_name':'security_tests' 'type':'shared_library' 'sources':['../../../sandbox/win/tests/validation_tests/commands.cc' '../../../sandbox/win/tests/validation_tests/commands.h' 'ipc_security_tests.cc' 'ipc_security_tests.h' 'security_tests.cc' ] } ] }<line_sep>
<import_stmt>functools<import_from_stmt>typing Iterable List<line_sep>__all__=['DocGroundtruthPair']<if_stmt><false><block_start><import_from_stmt>. Document<block_end><class_stmt>DocGroundtruthPair<block_start>""" Helper class to expose common interface to the traversal logic of the BaseExecutable Driver. It is important to note that it checks the matching structure of `docs` and `groundtruths`. It is important while traversing to ensure that then the driver can be applied at a comparable level of granularity and adjacency. This does not imply that you can't compare at the end a document with 10 matches with a groundtruth with 20 matches :param doc: Target `Document`. :param groundtruth: The :class:`Document` with desired state. """<def_stmt>__init__ self doc:'Document' groundtruth:'Document'<block_start>"""Set constructor method. :param doc: actual Document :param groundtruth: groundtruth Document """<line_sep>self.doc=doc<line_sep>self.groundtruth=groundtruth<block_end>@property<def_stmt>matches self<arrow>Iterable['DocGroundtruthPair']<block_start>"""Get the pairs between matches and Groundtruth. :yields: DocGroundtruthPair object """<assert_stmt>len(self.doc.matches)<eq>len(self.groundtruth.matches)<for_stmt>doc,groundtruth zip(self.doc.matches self.groundtruth.matches)<block_start><yield>DocGroundtruthPair(doc groundtruth)<block_end><block_end>@property<def_stmt>chunks self<arrow>Iterable['DocGroundtruthPair']<block_start>"""Get the pairs between chunks and Groundtruth. :yields: DocGroundtruthPair object """<assert_stmt>len(self.doc.chunks)<eq>len(self.groundtruth.chunks)<for_stmt>doc,groundtruth zip(self.doc.chunks self.groundtruth.chunks)<block_start><yield>DocGroundtruthPair(doc groundtruth)<block_end><block_end><block_end><class_stmt>VersionedMixin<block_start>""" Helper class to add versioning to an object. The version number is incremented each time an attribute is set. """<line_sep>version=0<line_sep>ON_GETATTR:List=[]<def_stmt>_increase_version self<block_start>super().__setattr__('version' self.version+1)<block_end><def_stmt>__setattr__ self attr value<block_start>super().__setattr__(attr value)<line_sep>self._increase_version()<block_end><def_stmt>__delattr__ self attr<block_start>super(VersionedMixin self).__delattr__(attr)<line_sep>self._increase_version()<block_end><block_end><def_stmt>versioned fn<block_start>""" Decorator function that increases the version number each time the decorated method is called. The class of the decorated method must be a subclass of :class:`VersionedMixin` :param fn: the method to decorate :return: decorated function """<line_sep>@functools.wraps(fn)<def_stmt>wrapper self *args **kwargs<block_start>self._increase_version()<line_sep><return>fn(self *args **kwargs)<block_end><return>wrapper<block_end>
<import_from_stmt>flair.embeddings DocumentPoolEmbeddings WordEmbeddings<import_stmt>flair<import_stmt>torch<line_sep>flair.device=torch.device('cpu')<class_stmt>DocEmbeddings<block_start>__instance=<none><line_sep>@staticmethod<def_stmt>getInstance <block_start>""" Static access method. """<if_stmt>DocEmbeddings.__instance<is><none><block_start>DocEmbeddings()<block_end><return>DocEmbeddings.__instance<block_end><def_stmt>__init__ self<block_start>""" Virtually private constructor. """<if_stmt>DocEmbeddings.__instance<is><not><none><block_start><raise>Exception("This class is a singleton!")<block_end><else_stmt><block_start>doc_embeddings=DocumentPoolEmbeddings([WordEmbeddings("glove")])<line_sep>DocEmbeddings.__instance=doc_embeddings<block_end><block_end><block_end>