content
stringlengths
0
1.55M
<import_stmt>ast<import_from_stmt>flake8_plugin_utils Visitor<import_from_stmt>flake8_pytest_style.config Config<import_from_stmt>flake8_pytest_style.errors AssertAlwaysFalse FailWithoutMessage<import_from_stmt>flake8_pytest_style.utils get_simple_call_args is_empty_string is_fail_call is_falsy_constant <class_stmt>FailVisitor(Visitor[Config])<block_start><def_stmt>_check_fail_call self node:ast.Call<arrow><none><block_start>"""Checks for PT016."""<line_sep>args=get_simple_call_args(node)<line_sep>msg_argument=args.get_argument('msg' 0)<if_stmt><not>msg_argument<or>is_empty_string(msg_argument)<block_start>self.error_from_node(FailWithoutMessage node)<block_end><block_end><def_stmt>visit_Assert self node:ast.Assert<arrow><none><block_start>"""Checks for PT015."""<if_stmt>is_falsy_constant(node.test)<block_start>self.error_from_node(AssertAlwaysFalse node)<block_end><block_end><def_stmt>visit_Call self node:ast.Call<arrow><none><block_start><if_stmt>is_fail_call(node)<block_start>self._check_fail_call(node)<block_end><block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>isolationInputParameters=cms.PSet(barrelBasicCluster=cms.InputTag("islandBasicClusters" "islandBarrelBasicClusters") endcapBasicCluster=cms.InputTag("islandBasicClusters" "islandEndcapBasicClusters") horeco=cms.InputTag("horeco") hfreco=cms.InputTag("hfreco") hbhereco=cms.InputTag("hbhereco") track=cms.InputTag("hiGeneralTracks") photons=cms.InputTag("cleanPhotons"))<line_sep>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_stmt>django<line_sep>django.setup()<import_from_stmt>course.models GradeChange<for_stmt>gchange GradeChange.objects.all()<block_start><if_stmt>gchange.flow_session<is><not><none><block_start>gchange.attempt_id="flow-session-%d"%gchange.flow_session.id<line_sep>gchange.save()<block_end><block_end>
<import_from_stmt>typing Optional Dict<import_stmt>jwt<import_stmt>sentry_sdk<import_from_stmt>fastapi HTTPException<import_from_stmt>starlette status<import_from_stmt>starlette.requests Request<import_from_stmt>auth.models Role<import_from_stmt>auth.models User<import_from_stmt>config cfg<def_stmt>get_user request:Request<arrow>User<block_start>""" Protect route from anonymous access, requiring and returning current authenticated user. :param request: web request :return: current user, otherwise raise an HTTPException (status=401) """<line_sep><return>_check_and_extract_user(request)<block_end><def_stmt>get_admin request:Request<arrow>User<block_start>""" Allow access only to an 'admin' account, returning current authenticated admin account data. :param request: web request :return: current admin user, otherwise raise an HTTPException (status=401) """<line_sep>user=_check_and_extract_user(request)<if_stmt>user.role<ne>Role.ADMIN<block_start><raise>HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)<block_end><return>user<block_end><def_stmt>get_optional_user request:Request<arrow>Optional[User]<block_start>""" Return authenticated user or None if session is anonymous. :param request: web request :return: current user or None for anonymous sessions """<try_stmt><block_start><return>_check_and_extract_user(request)<block_end><except_stmt>HTTPException<block_start><if_stmt>request.headers.get("Authorization")<block_start><raise><block_end><block_end><block_end><def_stmt>extract_user_from_token access_token:str verify_exp:bool=<true><arrow>User<block_start>""" Extract User object from jwt token, with optional expiration check. :param access_token: encoded access token string :param verify_exp: whether to perform verification or not :return: User object stored inside the jwt """<line_sep><return>User(**jwt.decode(access_token key=cfg.jwt_secret algorithms=[cfg.jwt_algorithm] options={"verify_exp":verify_exp})["user"])<block_end><def_stmt>decode_jwt_refresh_token encoded_refresh_token:str verify_exp:bool=<true><arrow>Dict<block_start>""" Decode an encoded refresh token, with optional expiration check. :param encoded_refresh_token: encoded refresh token string :param verify_exp: whether to perform verification or not :return: decoded jwt refresh token as dictionary """<line_sep><return>jwt.decode(encoded_refresh_token key=cfg.jwt_secret algorithms=[cfg.jwt_algorithm] options={"verify_exp":verify_exp})<block_end><def_stmt>_check_and_extract_user request:Request<arrow>User<block_start>authorization_header=request.headers.get("Authorization")<if_stmt><not>authorization_header<block_start><raise>HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)<block_end><try_stmt><block_start>access_token=authorization_header.replace("Bearer " "")<line_sep>user=extract_user_from_token(access_token )<if_stmt>cfg.sentry_dsn<block_start>sentry_sdk.set_user({"id":user.id "username":user.username "email":user.email "ip_address":request.client.host})<block_end><return>user<block_end><except_stmt>jwt.exceptions.ExpiredSignatureError<block_start><raise>HTTPException(status_code=status.HTTP_401_UNAUTHORIZED)<block_end><block_end>
<import_stmt>sublime sublime_plugin<import_stmt>os<import_from_stmt>..libs util<import_from_stmt>..libs NodeJS<import_from_stmt>..libs javaScriptEnhancements<import_from_stmt>..libs.global_vars *<class_stmt>JavascriptEnhancementsGetAstCommand(sublime_plugin.TextCommand)<block_start><def_stmt>run self edit **args<block_start>view=self.view<line_sep>flow_cli="flow"<line_sep>is_from_bin=<true><line_sep>chdir=""<line_sep>use_node=<true><line_sep>bin_path=""<line_sep>node=NodeJS(check_local=<true>)<line_sep>result=node.execute_check_output(flow_cli ['ast' '--from' 'sublime_text' '--pretty'] is_from_bin=is_from_bin use_fp_temp=<true> fp_temp_contents=view.substr(sublime.Region(0 view.size())) is_output_json=<false> chdir=chdir bin_path=bin_path use_node=use_node)<line_sep>print(result[1])<block_end><def_stmt>is_enabled self **args<block_start>view=self.view<if_stmt><not>util.selection_in_js_scope(view)<or><not>DEVELOPER_MODE<block_start><return><false><block_end><return><true><block_end><def_stmt>is_visible self **args<block_start>view=self.view<if_stmt><not>util.selection_in_js_scope(view)<or><not>DEVELOPER_MODE<block_start><return><false><block_end><return><true><block_end><block_end>
# Imports <import_stmt>os<import_stmt>random<import_from_stmt>collections Counter defaultdict<import_stmt>random<import_from_stmt>nltk.tag StanfordNERTagger<import_from_stmt>nltk.tokenize word_tokenize<import_from_stmt>nltk pos_tag<import_from_stmt>nltk.chunk conlltags2tree<import_from_stmt>nltk.tree Tree<import_stmt>pandas<as>pd<import_from_stmt>htrc_features FeatureReader<import_stmt>geocoder<import_stmt>folium<import_from_stmt>pprint pprint<import_from_stmt>tqdm tqdm<line_sep># Set environment variable # Geonames requires a username to access the API but we do not want to expose personal info in code # # Run this locally by adding USERNAME to environment variables, e.g. to .env, as follows: # > export USERNAME=<insert username here> USERNAME=os.getenv('USERNAME')<line_sep># Setup Stanford NER Tagger # Ignore deprecation warning for now; we'll deal with it when the time comes! st=StanfordNERTagger('/usr/local/share/stanford-ner/classifiers/english.all.3class.distsim.crf.ser.gz' '/usr/local/share/stanford-ner/stanford-ner.jar' encoding='utf-8')<line_sep># Functions for putting together with inside-outside-beginning (IOB) logic # Cf. https://stackoverflow.com/a/30666949 # # For more information on IOB tagging, see https://en.wikipedia.org/wiki/Inside–outside–beginning_(tagging) # Sample HathiTrust ID # This is the HTID for... # "Ancient Corinth: A guide to the excavations," <NAME>, <NAME>, and <NAME> htid="wu.89079728994"<line_sep># Get HTEF data for this ID; specifically tokenlist fr=FeatureReader(ids=[htid])<for_stmt>vol fr<block_start>tokens=vol.tokenlist()<block_end># Create pandas dataframe with relevant data temp=tokens.index.values.tolist()<line_sep>counts=pd.DataFrame.from_records(temp columns=['page' 'section' 'token' 'pos'])<line_sep>counts['count']=tokens['count'].tolist()<line_sep>counts[:10]<line_sep># Reconstruct text using tokens and counts text_data=list(zip(counts['token'].tolist() counts['count'].tolist()))<line_sep># Loop through and multiply words by counts text_list=[]<for_stmt>w,c text_data<block_start><for_stmt>i range(0 c)<block_start>text_list.append(w)<block_end><block_end>random.shuffle(text_list)# Necessary? text_reconstruction=" ".join(text_list)<line_sep>#page_words_extended = page_words+page_ner tokens=word_tokenize(text_reconstruction)<line_sep>tagged_tokens=st.tag(tokens)<line_sep>tagged_tokens=[item<for>item tagged_tokens<if>item[0]<ne>'']<line_sep>ne_tree=stanfordNE2tree(tagged_tokens)<line_sep>ne_in_sent=[]<for_stmt>subtree ne_tree<block_start><if_stmt>type(subtree)<eq>Tree# If subtree is a noun chunk, i.e. NE != "O" <block_start>ne_label=subtree.label()<line_sep>ne_string=" ".join([token<for>token,pos subtree.leaves()])<line_sep>ne_in_sent.append((ne_string ne_label))<block_end><block_end>locations=[tag[0].title()<for>tag ne_in_sent<if>tag[1]<eq>'LOCATION']<line_sep>print(locations)<line_sep>most_common_locations=Counter(locations).most_common(10)<line_sep>pprint(most_common_locations)<line_sep># Organize some data for map info places_list=[name<for>name,_ most_common_locations][:3]# Limit to top three most_common_locations=dict(most_common_locations)# Turn mcl into dictionary # Retrieve json from geonames API (for fun this time using geocoder) geocoder_results=[]<for_stmt>place places_list<block_start>results=geocoder.geonames(place maxRows=5 key=USERNAME)<line_sep>jsons=[]<for_stmt>result results<block_start>jsons.append(result.json)<block_end>geocoder_results.append(jsons)<block_end># Create a list of 'country' from the geonames json results countries=[]<for_stmt>results geocoder_results<block_start><for_stmt>item results<block_start><if_stmt>'country'<in>item.keys()<block_start>countries.append(item['country'])<block_end><block_end><block_end># Determine which country appears most often top_country=sorted(Counter(countries))[0]<line_sep>print(top_country)<line_sep># Iterate over geocoder_results and keep the first lat/long that matches the top country coordinates=[]<for_stmt>i,results enumerate(geocoder_results)<block_start><for_stmt>item results<block_start><if_stmt>item['country']<eq>top_country<block_start>coordinates.append((float(item['lat']) float(item['lng'])))<line_sep><break># Only get the first item for now <block_end><block_end><block_end>print(places_list)<line_sep>print(coordinates)<line_sep># Set up Folium and populate with weighted coordinates basemap=folium.Map(location=[37.97945 23.71622] zoom_start=8 tiles='cartodbpositron' width=960 height=512)<for_stmt>i,c enumerate(coordinates)<block_start>folium.CircleMarker([c[0] c[1]] radius=most_common_locations[places_list[i]]<times>.25 color='#3186cc' fill=<true> fill_opacity=0.5 fill_color='#3186cc' popup='{} ({}, {}) appears {} times in book.'.format(places_list[i] c[0] c[1] most_common_locations[places_list[i]])).add_to(basemap)<block_end>print('Map of relevant locations in Broneer et al.\'s "Ancient Corinth: A guide to the excavations," weighted by frequency.')<line_sep>basemap<line_sep>page=87<line_sep>test=counts[counts['page']<eq>page]['token'].tolist()<line_sep>print(test)<line_sep>print(len(test))<import_from_stmt>nltk.corpus stopwords<line_sep>stops=set(stopwords.words('english'))<line_sep>pns_list=[]<for_stmt>i range(1 max(counts['page'])+1)<block_start>tokens=counts[counts['page']<eq>i]['token'].tolist()<line_sep>tokens=[token<for>token tokens<if>token.lower()<not><in>stops<and>len(token)<g>2]<line_sep>pns=[token<for>token tokens<if>token[0].isupper()]<line_sep>combs=[f'{x} {y}'<for>x,y combinations(pns 2)]<line_sep>pns_list.extend(combs)<block_end>
# Copyright 2018 the V8 project authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. <import_stmt>os<import_stmt>re<import_from_stmt>testrunner.local testsuite<import_from_stmt>testrunner.objects testcase<line_sep>ANY_JS=".any.js"<line_sep>WPT_ROOT="/wasm/jsapi/"<line_sep>META_SCRIPT_REGEXP=re.compile(r"META:\s*script=(.*)")<class_stmt>TestSuite(testsuite.TestSuite)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(TestSuite self).__init__(*args **kwargs)<line_sep>self.testroot=os.path.join(self.root "data" "test" "js-api")<line_sep>self.mjsunit_js=os.path.join(os.path.dirname(self.root) "mjsunit" "mjsunit.js")<block_end><def_stmt>ListTests self<block_start>tests=[]<for_stmt>dirname,dirs,files os.walk(self.testroot)<block_start><for_stmt>dotted [x<for>x dirs<if>x.startswith(".")]<block_start>dirs.remove(dotted)<block_end>dirs.sort()<line_sep>files.sort()<for_stmt>filename files<block_start><if_stmt>(filename.endswith(ANY_JS))<block_start>fullpath=os.path.join(dirname filename)<line_sep>relpath=fullpath[len(self.testroot)+1:-len(ANY_JS)]<line_sep>testname=relpath.replace(os.path.sep "/")<line_sep>test=self._create_test(testname)<line_sep>tests.append(test)<block_end><block_end><block_end><return>tests<block_end><def_stmt>_test_class self<block_start><return>TestCase<block_end><block_end><class_stmt>TestCase(testcase.D8TestCase)<block_start><def_stmt>_get_files_params self<block_start>files=[os.path.join(self.suite.mjsunit_js) os.path.join(self.suite.root "testharness.js")]<line_sep>source=self.get_source()<for_stmt>script META_SCRIPT_REGEXP.findall(source)<block_start><if_stmt>script.startswith(WPT_ROOT)# Matched an absolute path, strip the root and replace it with our # local root. <block_start>script=os.path.join(self.suite.testroot script[len(WPT_ROOT):])<block_end><elif_stmt><not>script.startswith("/")# Matched a relative path, prepend this test's directory. <block_start>thisdir=os.path.dirname(self._get_source_path())<line_sep>script=os.path.join(thisdir script)<block_end><else_stmt><block_start><raise>Exception("Unexpected absolute path for script: \"%s\""%script)<line_sep><block_end>files.append(script)<block_end>files.extend([self._get_source_path() os.path.join(self.suite.root "testharness-after.js")])<line_sep><return>files<block_end><def_stmt>_get_source_path self# All tests are named `path/name.any.js` <block_start><return>os.path.join(self.suite.testroot self.path+ANY_JS)<block_end><block_end><def_stmt>GetSuite *args **kwargs<block_start><return>TestSuite(*args **kwargs)<block_end>
# Copyright 2019 Huawei Technologies Co., Ltd # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================ <import_stmt>numpy<as>np<import_stmt>pytest<import_stmt>mindspore.context<as>context<import_stmt>mindspore.nn<as>nn<import_from_stmt>mindspore Tensor<import_from_stmt>mindspore.ops operations<as>P<import_from_stmt>mindspore.ops.operations _inner_ops<as>inner<class_stmt>NetRelu(nn.Cell)<block_start><def_stmt>__init__ self<block_start>super(NetRelu self).__init__()<line_sep>self.relu=P.ReLU()<block_end><def_stmt>construct self x<block_start><return>self.relu(x)<block_end><block_end><class_stmt>NetReluDynamic(nn.Cell)<block_start><def_stmt>__init__ self<block_start>super(NetReluDynamic self).__init__()<line_sep>self.conv=inner.GpuConvertToDynamicShape()<line_sep>self.relu=P.ReLU()<block_end><def_stmt>construct self x<block_start>x_conv=self.conv(x)<line_sep><return>self.relu(x_conv)<block_end><block_end>@[email protected][email protected]_onecard<def_stmt>test_relu_float32 <block_start>x=Tensor(np.array([[[[-1 1 10] [1 -1 1] [10 1 -1]]]]).astype(np.float32))<line_sep>expect=np.array([[[[0 1 10 ] [1 0 1 ] [10 1 0.]]]]).astype(np.float32)<line_sep>context.set_context(mode=context.PYNATIVE_MODE device_target="GPU")<line_sep>relu=NetRelu()<line_sep>output=relu(x)<assert_stmt>(output.asnumpy()<eq>expect).all()<line_sep>context.set_context(mode=context.GRAPH_MODE device_target="GPU")<line_sep>relu=NetRelu()<line_sep>output=relu(x)<assert_stmt>(output.asnumpy()<eq>expect).all()<block_end>@[email protected][email protected]_onecard<def_stmt>test_relu_int8 <block_start>x=Tensor(np.array([[[[-1 1 10] [1 -1 1] [10 1 -1]]]]).astype(np.int8))<line_sep>expect=np.array([[[[0 1 10 ] [1 0 1 ] [10 1 0.]]]]).astype(np.int8)<line_sep>context.set_context(mode=context.PYNATIVE_MODE device_target="GPU")<line_sep>relu=NetRelu()<line_sep>output=relu(x)<assert_stmt>(output.asnumpy()<eq>expect).all()<line_sep>context.set_context(mode=context.GRAPH_MODE device_target="GPU")<line_sep>relu=NetRelu()<line_sep>output=relu(x)<assert_stmt>(output.asnumpy()<eq>expect).all()<block_end>@[email protected][email protected]_onecard<def_stmt>test_relu_int32 <block_start>x=Tensor(np.array([[[[-1 1 10] [1 -1 1] [10 1 -1]]]]).astype(np.int32))<line_sep>expect=np.array([[[[0 1 10 ] [1 0 1 ] [10 1 0.]]]]).astype(np.int32)<line_sep>context.set_context(mode=context.PYNATIVE_MODE device_target="GPU")<line_sep>relu=NetRelu()<line_sep>output=relu(x)<assert_stmt>(output.asnumpy()<eq>expect).all()<line_sep>context.set_context(mode=context.GRAPH_MODE device_target="GPU")<line_sep>relu=NetRelu()<line_sep>output=relu(x)<assert_stmt>(output.asnumpy()<eq>expect).all()<block_end>@[email protected][email protected]_onecard<def_stmt>test_relu_int64 <block_start>x=Tensor(np.array([[[[-1 1 10] [1 -1 1] [10 1 -1]]]]).astype(np.int64))<line_sep>expect=np.array([[[[0 1 10 ] [1 0 1 ] [10 1 0.]]]]).astype(np.int64)<line_sep>context.set_context(mode=context.PYNATIVE_MODE device_target="GPU")<line_sep>relu=NetRelu()<line_sep>output=relu(x)<line_sep>print(output.asnumpy() expect)<assert_stmt>(output.asnumpy()<eq>expect).all()<line_sep>context.set_context(mode=context.GRAPH_MODE device_target="GPU")<line_sep>relu=NetRelu()<line_sep>output=relu(x)<assert_stmt>(output.asnumpy()<eq>expect).all()<block_end>@[email protected][email protected]_onecard<def_stmt>test_relu_int64_dynamic_shape <block_start>x=Tensor(np.array([[[[-1 1 10] [1 -1 1] [10 1 -1]]]]).astype(np.int64))<line_sep>expect=np.array([[[[0 1 10 ] [1 0 1 ] [10 1 0.]]]]).astype(np.int64)<line_sep>context.set_context(mode=context.GRAPH_MODE device_target="GPU")<line_sep>relu_dynamic=NetReluDynamic()<line_sep>output=relu_dynamic(x)<assert_stmt>(output.asnumpy()<eq>expect).all()<block_end>
# -*- coding: utf-8 -*- <import_from_future_stmt> unicode_literals<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('wagtailmenus' '0009_auto_20160201_0859') ]<line_sep>operations=[migrations.RenameField(model_name='mainmenuitem' old_name='add_subnav' new_name='allow_subnav' ) ]<block_end>
# Zed Attack Proxy (ZAP) and its related class files. # # ZAP is an HTTP/HTTPS proxy for assessing web application security. # # Copyright 2017 the ZAP development team # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ This file was automatically generated. """<import_stmt>six<class_stmt>users(object)<block_start><def_stmt>__init__ self zap<block_start>self.zap=zap<block_end><def_stmt>users_list self contextid=<none><block_start>""" Gets a list of users that belong to the context with the given ID, or all users if none provided. """<line_sep>params={}<if_stmt>contextid<is><not><none><block_start>params['contextId']=contextid<block_end><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/view/usersList/' params)))<block_end><def_stmt>get_user_by_id self contextid userid<block_start>""" Gets the data of the user with the given ID that belongs to the context with the given ID. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/view/getUserById/' {'contextId':contextid 'userId':userid})))<block_end><def_stmt>get_authentication_credentials_config_params self contextid<block_start>""" Gets the configuration parameters for the credentials of the context with the given ID. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/view/getAuthenticationCredentialsConfigParams/' {'contextId':contextid})))<block_end><def_stmt>get_authentication_credentials self contextid userid<block_start>""" Gets the authentication credentials of the user with given ID that belongs to the context with the given ID. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/view/getAuthenticationCredentials/' {'contextId':contextid 'userId':userid})))<block_end><def_stmt>get_authentication_state self contextid userid<block_start>""" Gets the authentication state information for the user identified by the Context and User Ids. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/view/getAuthenticationState/' {'contextId':contextid 'userId':userid})))<block_end><def_stmt>get_authentication_session self contextid userid<block_start>""" Gets the authentication session information for the user identified by the Context and User Ids, e.g. cookies and realm credentials. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/view/getAuthenticationSession/' {'contextId':contextid 'userId':userid})))<block_end><def_stmt>new_user self contextid name apikey=''<block_start>""" Creates a new user with the given name for the context with the given ID. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/action/newUser/' {'contextId':contextid 'name':name 'apikey':apikey})))<block_end><def_stmt>remove_user self contextid userid apikey=''<block_start>""" Removes the user with the given ID that belongs to the context with the given ID. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/action/removeUser/' {'contextId':contextid 'userId':userid 'apikey':apikey})))<block_end><def_stmt>set_user_enabled self contextid userid enabled apikey=''<block_start>""" Sets whether or not the user, with the given ID that belongs to the context with the given ID, should be enabled. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/action/setUserEnabled/' {'contextId':contextid 'userId':userid 'enabled':enabled 'apikey':apikey})))<block_end><def_stmt>set_user_name self contextid userid name apikey=''<block_start>""" Renames the user with the given ID that belongs to the context with the given ID. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/action/setUserName/' {'contextId':contextid 'userId':userid 'name':name 'apikey':apikey})))<block_end><def_stmt>set_authentication_credentials self contextid userid authcredentialsconfigparams=<none> apikey=''<block_start>""" Sets the authentication credentials for the user with the given ID that belongs to the context with the given ID. """<line_sep>params={'contextId':contextid 'userId':userid 'apikey':apikey}<if_stmt>authcredentialsconfigparams<is><not><none><block_start>params['authCredentialsConfigParams']=authcredentialsconfigparams<block_end><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/action/setAuthenticationCredentials/' params)))<block_end><def_stmt>authenticate_as_user self contextid userid apikey=''<block_start>""" Tries to authenticate as the identified user, returning the authentication request and whether it appears to have succeeded. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/action/authenticateAsUser/' {'contextId':contextid 'userId':userid 'apikey':apikey})))<block_end><def_stmt>poll_as_user self contextid userid apikey=''<block_start>""" Tries to poll as the identified user, returning the authentication request and whether it appears to have succeeded. This will only work if the polling verification strategy has been configured. """<line_sep><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/action/pollAsUser/' {'contextId':contextid 'userId':userid 'apikey':apikey})))<block_end><def_stmt>set_authentication_state self contextid userid lastpollresult=<none> lastpolltimeinms=<none> requestssincelastpoll=<none> apikey=''<block_start>""" Sets fields in the authentication state for the user identified by the Context and User Ids. """<line_sep>params={'contextId':contextid 'userId':userid 'apikey':apikey}<if_stmt>lastpollresult<is><not><none><block_start>params['lastPollResult']=lastpollresult<block_end><if_stmt>lastpolltimeinms<is><not><none><block_start>params['lastPollTimeInMs']=lastpolltimeinms<block_end><if_stmt>requestssincelastpoll<is><not><none><block_start>params['requestsSinceLastPoll']=requestssincelastpoll<block_end><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/action/setAuthenticationState/' params)))<block_end><def_stmt>set_cookie self contextid userid domain name value path=<none> secure=<none> apikey=''<block_start>""" Sets the specified cookie for the user identified by the Context and User Ids. """<line_sep>params={'contextId':contextid 'userId':userid 'domain':domain 'name':name 'value':value 'apikey':apikey}<if_stmt>path<is><not><none><block_start>params['path']=path<block_end><if_stmt>secure<is><not><none><block_start>params['secure']=secure<block_end><return>six.next(six.itervalues(self.zap._request(self.zap.base+'users/action/setCookie/' params)))<block_end><block_end>
<import_from_stmt>sofi.ui Inserted<def_stmt>test_basic <block_start><assert_stmt>(str(Inserted())<eq>"<ins></ins>")<block_end><def_stmt>test_text <block_start><assert_stmt>(str(Inserted("text"))<eq>"<ins>text</ins>")<block_end><def_stmt>test_custom_class_ident_style_and_attrs <block_start><assert_stmt>(str(Inserted("text" cl='abclass' ident='123' style="font-size:0.9em;" attrs={"data-test":'abc'}))<eq>"<ins id=\"123\" class=\"abclass\" style=\"font-size:0.9em;\" data-test=\"abc\">text</ins>")<block_end>
""" IsOpenStackCompute ================== The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute node. It checks if 'nova-compute' process exist, if not raises ``SkipComponent`` so that the dependent component will not fire. Can be added as a dependency of a parser so that the parser only fires if the ``IsIsOpenStackCompute`` dependency is met. """<import_from_stmt>insights.core.plugins component<import_from_stmt>insights.parsers.ps PsAuxcww<import_from_stmt>insights.core.dr SkipComponent<line_sep>@component(PsAuxcww)<class_stmt>IsOpenStackCompute(object)<block_start>"""The ``IsOpenStackCompute`` component uses ``PsAuxcww`` parser to determine OpenStack Compute node. It checks if ``nova-compute`` process exist, if not raises ``SkipComponent``. Raises: SkipComponent: When ``nova-compute`` process does not exist. """<def_stmt>__init__ self ps<block_start><if_stmt>'nova-compute'<not><in>ps.running<block_start><raise>SkipComponent('Not OpenStack Compute node')<block_end><block_end><block_end>
<class_stmt>Pessoa<block_start><def_stmt>__init__ self n s<block_start>self.n=n<line_sep>self.s=s<block_end><def_stmt>__hash__ self<block_start><return>hash((self.n self.s))<block_end><block_end>ll=Pessoa('Lugão' 'Ricardo')<line_sep>lulu=Pessoa('Lugão' 'Ricardinho')<line_sep>print(hash(ll))# True print(hash(lulu))# True
<import_stmt>logging<def_stmt>_exclude fields excluded<block_start><return>[field<for>field fields<if>field<not><in>excluded]<block_end><def_stmt>_combine_dicts *args<block_start>results={}<for_stmt>arg args<block_start>results.update(arg)<block_end><return>results<block_end><class_stmt>FeatureSetBase<block_start>""" Generic interface for feature sets """<def_stmt>__init__ self identifier_field target_field# fields to be filled out in derived class <block_start>self.logger=logging.getLogger(__name__)<line_sep>self.params=<none><line_sep>self.info=<none><line_sep>self.identifier_field=identifier_field<line_sep>self.target_field=target_field<block_end><def_stmt>fields_excluded_from_features self<block_start>id_target=[self.identifier_field self.target_field]<line_sep><return>id_target+self.params['extra_information_fields']<block_end><def_stmt>_exclude_non_features self fields<block_start><return>_exclude(fields self.fields_excluded_from_features())<block_end><def_stmt>base_feature_fields_numerical self<block_start>fields=self.params['base_fields_numerical']<line_sep><return>self._exclude_non_features(fields)<block_end><def_stmt>base_feature_fields_categorical self<block_start>fields=sorted(self.params['base_categorical_n_levels_dict'].keys())<line_sep><return>self._exclude_non_features(fields)<block_end><def_stmt>base_feature_fields self<block_start><return>self.base_feature_fields_numerical()+self.base_feature_fields_categorical()<block_end><def_stmt>derived_feature_fields_numerical self<block_start><return>self.params['derived_fields_numerical']<block_end><def_stmt>derived_feature_fields_categorical self<block_start><return>sorted(self.params['derived_categorical_n_levels_dict'].keys())<block_end><def_stmt>derived_feature_fields self<block_start><return>self.derived_feature_fields_numerical()+self.derived_feature_fields_categorical()<block_end><def_stmt>available_feature_fields_numerical self<block_start><return>self.base_feature_fields_numerical()+self.derived_feature_fields_numerical()<block_end><def_stmt>available_feature_fields_categorical self<block_start><return>self.base_feature_fields_categorical()+self.derived_feature_fields_categorical()<block_end><def_stmt>encoded_feature_fields_numerical self<block_start><return>_exclude(self.available_feature_fields_numerical() self.params['encoder_excluded_fields'])<block_end><def_stmt>encoded_feature_fields_categorical self<block_start><return>_exclude(self.available_feature_fields_categorical() self.params['encoder_excluded_fields'])<block_end><def_stmt>encoded_feature_fields self<block_start><return>self.encoded_feature_fields_numerical()+self.encoded_feature_fields_categorical()<block_end><def_stmt>omitted_feature_fields_for_input self<block_start>encoded=self.encoded_feature_fields()<line_sep><return>[field<for>field encoded<if>field<not><in>self.base_feature_fields()]<block_end># feature transformations <def_stmt>base_features_numerical self processed_row<block_start><return>{k:processed_row[k]<for>k self.base_feature_fields_numerical()}<block_end><def_stmt>base_features_categorical self processed_row<block_start><return>{k:processed_row[k]<for>k self.base_feature_fields_categorical()}<block_end><def_stmt>base_features self processed_row<block_start><return>{k:processed_row[k]<for>k self.base_feature_fields()}<block_end><def_stmt>derived_features_categorical self processed_row# TODO: override <block_start><assert_stmt>isinstance(processed_row dict)<line_sep><return>{}<block_end><def_stmt>derived_features_numerical self processed_row# TODO: override <block_start><assert_stmt>isinstance(processed_row dict)<line_sep><return>{}<block_end><def_stmt>derived_features self processed_row<block_start>num=self.derived_features_numerical(processed_row)<line_sep>cat=self.derived_features_categorical(processed_row)<line_sep><return>_combine_dicts(num cat)<block_end><def_stmt>features self processed_row<block_start>base=self.base_features(processed_row)<line_sep>derv=self.derived_features(processed_row)<line_sep><return>_combine_dicts(base derv)<block_end><def_stmt>ml_fields self<block_start>categorical_n_levels_dict=self.params['base_categorical_n_levels_dict'].copy()<line_sep>categorical_n_levels_dict.update(self.params['derived_categorical_n_levels_dict'])<line_sep>cat_encoded={k:v<for>k,v categorical_n_levels_dict.items()<if>k<in>self.encoded_feature_fields_categorical()}<line_sep>numeric_fields=self.encoded_feature_fields_numerical()<line_sep>intersection=set(cat_encoded.keys()).intersection(numeric_fields)<if_stmt>intersection<block_start>self.logger.info('categorical')<line_sep>self.logger.info(cat_encoded)<line_sep>self.logger.info('numerical')<line_sep>self.logger.info(numeric_fields)<line_sep>self.logger.info('intersection')<line_sep>self.logger.info(intersection)<line_sep><raise>ValueError('categorical and numeric overlap')<block_end><return>{'categorical':cat_encoded 'numerical':numeric_fields 'target_name':self.target_field}<block_end><block_end>
"""Utilities for asyncio-friendly file handling."""<import_from_stmt>.threadpool open<import_from_stmt>. tempfile<line_sep>__all__=["open" "tempfile"]<line_sep>
# Written by <NAME> and <NAME> <<EMAIL>> # # Copyright (c) 2016, Emlid Limited # All rights reserved. # # Redistribution and use in source and binary forms, # with or without modification, # are permitted provided that the following conditions are met: # # 1. Redistributions of source code must retain the above copyright notice, # this list of conditions and the following disclaimer. # # 2. Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # 3. Neither the name of the copyright holder nor the names of its contributors # may be used to endorse or promote products derived from this software # without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, # THE IMPLIED WARRANTIES OF MERCHANTABILITY AND # FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. # IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS # BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, # OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, # PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, # DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED # AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, # STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, # EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. <import_stmt>pytest<import_stmt>mock<import_from_stmt>wificontrol WiFiControl<line_sep>@pytest.fixture<def_stmt>ssid <block_start>network={'ssid':'Test'}<line_sep><return>network<block_end><class_stmt>FakeWiFiControl(WiFiControl)<block_start><def_stmt>__init__ self<block_start>self.wifi=mock.MagicMock()<line_sep>self.wpasupplicant=mock.MagicMock()<line_sep>self.hotspot=mock.MagicMock()<block_end><block_end><class_stmt>TestWiFiControl<block_start><def_stmt>setup_method self<block_start>self.manager=FakeWiFiControl()<block_end><def_stmt>test_host_mode self<block_start>self.manager.hotspot.started=mock.Mock(return_value=<false>)<line_sep>self.manager.start_host_mode()<assert_stmt>self.manager.wpasupplicant.stop.call_count<eq>1<assert_stmt>self.manager.hotspot.started.call_count<eq>1<assert_stmt>self.manager.hotspot.start.call_count<eq>1<block_end><def_stmt>test_client_mode self<block_start>self.manager.wpasupplicant.started=mock.Mock(return_value=<false>)<line_sep>self.manager.start_client_mode()<assert_stmt>self.manager.hotspot.stop.call_count<eq>1<assert_stmt>self.manager.wpasupplicant.started.call_count<eq>1<assert_stmt>self.manager.wpasupplicant.start.call_count<eq>1<block_end><def_stmt>test_wifi_turn_on self<block_start>self.manager.wpasupplicant.started=mock.Mock(return_value=<false>)<line_sep>self.manager.hotspot.started=mock.Mock(return_value=<false>)<line_sep>self.manager.turn_on_wifi()<assert_stmt>self.manager.wifi.unblock.call_count<eq>1<assert_stmt>self.manager.wpasupplicant.started.call_count<eq>1<assert_stmt>self.manager.wpasupplicant.start.call_count<eq>1<line_sep>self.manager.wpasupplicant.started.return_value=<true><assert_stmt>self.manager.get_wifi_turned_on()<is><true><block_end><def_stmt>test_wifi_turn_off self<block_start>self.manager.wpasupplicant.started=mock.Mock(return_value=<true>)<line_sep>self.manager.hotspot.started=mock.Mock(return_value=<false>)<line_sep>self.manager.turn_off_wifi()<assert_stmt>self.manager.wifi.block.call_count<eq>1<assert_stmt>self.manager.hotspot.stop.call_count<eq>1<assert_stmt>self.manager.wpasupplicant.stop.call_count<eq>1<line_sep>self.manager.wpasupplicant.started.return_value=<false><assert_stmt>self.manager.get_wifi_turned_on()<is><false><block_end><def_stmt>test_wifi_turn_on_if_wifi_is_on self<block_start>self.manager.wpasupplicant.started=mock.Mock(return_value=<false>)<line_sep>self.manager.hotspot.started=mock.Mock(return_value=<true>)<line_sep>self.manager.turn_on_wifi()<assert_stmt>self.manager.wifi.unblock.call_count<eq>0<assert_stmt>self.manager.wpasupplicant.started.call_count<eq>1<assert_stmt>self.manager.hotspot.started.call_count<eq>1<assert_stmt>self.manager.wpasupplicant.start.call_count<eq>0<assert_stmt>self.manager.hotspot.start.call_count<eq>0<block_end><def_stmt>test_network_add self ssid<block_start>self.manager.add_network(ssid)<assert_stmt>self.manager.wpasupplicant.add_network.is_called_once_with(ssid)<block_end><def_stmt>test_network_remove self ssid<block_start>self.manager.remove_network(ssid)<assert_stmt>self.manager.wpasupplicant.remove_network.is_called_once_with(ssid)<block_end><def_stmt>test_status_get self ssid<block_start>self.manager.wpasupplicant.started=mock.Mock(return_value=<false>)<line_sep>self.manager.hotspot.started=mock.Mock(return_value=<true>)<line_sep>state,status=self.manager.get_status()<assert_stmt>state<eq>self.manager.HOST_STATE<assert_stmt>status<is><none><line_sep>self.manager.wpasupplicant.started.return_value=<true><line_sep>self.manager.hotspot.started.return_value=<false><line_sep>self.manager.wpasupplicant.get_status=mock.Mock(return_value=ssid)<line_sep>state,status=self.manager.get_status()<assert_stmt>state<eq>self.manager.WPA_STATE<assert_stmt>status<eq>ssid<block_end><def_stmt>test_start_connection self ssid<block_start><def_stmt>start_connecting *args<block_start>self.manager.hotspot.started.return_value=<false><line_sep>self.manager.revert_on_connect_failure(result=<none>)<block_end>self.manager.wpasupplicant.started=mock.Mock(return_value=<false>)<line_sep>self.manager.wpasupplicant.start_connecting.side_effect=start_connecting<line_sep>self.manager.hotspot.started=mock.Mock(return_value=<true>)<line_sep>self.manager.start_connecting(ssid)<assert_stmt>self.manager.wpasupplicant.started.call_count<eq>1<assert_stmt>self.manager.hotspot.stop.call_count<eq>1<assert_stmt>self.manager.wpasupplicant.start.call_count<eq>1<line_sep>args=(ssid self.manager.revert_on_connect_failure <none> 10)<assert_stmt>self.manager.wpasupplicant.start_connecting.is_called_once_with(args)<assert_stmt>self.manager.hotspot.started.call_count<eq>1<assert_stmt>self.manager.wpasupplicant.stop.call_count<eq>1<assert_stmt>self.manager.hotspot.start.call_count<eq>1<block_end><def_stmt>test_reconnection self ssid<block_start><def_stmt>start_connecting result callback args timeout<block_start>self.manager.hotspot.started.return_value=<false><if_stmt>args<block_start>callback({} *args)<block_end><else_stmt><block_start>callback(result)<block_end><block_end>self.manager.wpasupplicant.started=mock.Mock(return_value=<false>)<line_sep>self.manager.wpasupplicant.start_connecting.side_effect=start_connecting<line_sep>self.manager.hotspot.started=mock.Mock(return_value=<true>)<line_sep>self.manager.start_connecting(ssid callback=self.manager.reconnect args=(ssid ))<assert_stmt>self.manager.wpasupplicant.start_connecting.call_count<eq>2<block_end><def_stmt>test_supplicant_functions self<block_start>self.manager.scan()<assert_stmt>self.manager.wpasupplicant.scan.call_count<eq>1<line_sep>self.manager.get_scan_results()<assert_stmt>self.manager.wpasupplicant.get_scan_results.call_count<eq>1<line_sep>self.manager.get_added_networks()<assert_stmt>self.manager.wpasupplicant.get_added_networks.call_count<eq>1<line_sep>self.manager.get_ip()<assert_stmt>self.manager.wifi.get_device_ip.call_count<eq>1<line_sep>self.manager.stop_connecting()<assert_stmt>self.manager.wpasupplicant.stop_connecting.call_count<eq>1<line_sep>self.manager.disconnect()<assert_stmt>self.manager.wpasupplicant.disconnect.call_count<eq>1<line_sep>self.manager.get_device_name()<assert_stmt>self.manager.hotspot.get_host_name.call_count<eq>1<line_sep>self.manager.get_hostap_name()<assert_stmt>self.manager.hotspot.get_hostap_name.call_count<eq>1<line_sep>name='test'<line_sep>self.manager.set_device_names(name)<assert_stmt>self.manager.wpasupplicant.set_p2p_name.call_count<eq>1<assert_stmt>self.manager.wpasupplicant.set_p2p_name.is_called_once_with(name)<assert_stmt>self.manager.hotspot.set_hostap_name.call_count<eq>1<assert_stmt>self.manager.hotspot.set_hostap_name.is_called_once_with(name)<assert_stmt>self.manager.hotspot.set_host_name.call_count<eq>1<assert_stmt>self.manager.hotspot.set_host_name.is_called_once_with(name)<assert_stmt>self.manager.wifi.restart_dns.call_count<eq>1<line_sep>self.manager.set_hostap_password(name)<assert_stmt>self.manager.hotspot.set_hostap_password.is_called_once_with(name)<block_end><def_stmt>test_verify_names self<block_start>name='test'<line_sep>mac_addr='11:22:33:44:55:66'<line_sep>self.manager.hotspot.get_host_name.return_value=name<line_sep>self.manager.wpasupplicant.get_p2p_name.return_value=name<line_sep>self.manager.hotspot.get_hostap_name.return_value="{}{}".format(name mac_addr[-6:])<line_sep>self.manager.hotspot.get_device_mac.return_value=mac_addr[-6:]<assert_stmt>self.manager.verify_hostap_name(name)<assert_stmt>self.manager.verify_device_names(name)<assert_stmt>self.manager.hotspot.get_host_name.call_count<eq>1<assert_stmt>self.manager.wpasupplicant.get_p2p_name.call_count<eq>1<block_end><block_end>
<import_from_stmt>twitter_ads.campaign Tweet<import_from_stmt>twitter_ads.client Client<import_from_stmt>twitter_ads.creative MediaLibrary PollCard<import_from_stmt>twitter_ads.enum MEDIA_TYPE<line_sep>CONSUMER_KEY=''<line_sep>CONSUMER_SECRET=''<line_sep>ACCESS_TOKEN=''<line_sep>ACCESS_TOKEN_SECRET=''<line_sep>ACCOUNT_ID=''<line_sep># initialize the client client=Client(CONSUMER_KEY CONSUMER_SECRET ACCESS_TOKEN ACCESS_TOKEN_SECRET)<line_sep># load the advertiser account instance account=client.accounts(ACCOUNT_ID)<line_sep># most recent Media Library video ml=MediaLibrary(account).all(account media_type=MEDIA_TYPE.VIDEO)<line_sep>media_key=ml.first.media_key<line_sep># create Poll Card with video pc=PollCard(account)<line_sep>pc.duration_in_minutes=10080# one week pc.first_choice='Northern'<line_sep>pc.second_choice='Southern'<line_sep>pc.name=ml.first.name+' poll card from SDK'<line_sep>pc.media_key=media_key<line_sep>pc.save()<line_sep># create Tweet Tweet.create(account text='Which hemisphere do you prefer?' card_uri=pc.card_uri)<line_sep># https://twitter.com/apimctestface/status/973002610033610753
<import_stmt>sys<import_stmt>logging<import_stmt>click<import_stmt>entrypoints<line_sep>LOG_LEVEL_CODES={"debug":logging.DEBUG "info":logging.INFO "warning":logging.WARNING "error":logging.ERROR }<def_stmt>merge_extensions click_group<block_start>""" Each extension is called with click group for ultimate agility while preserving cli context. """<for_stmt>extension load_extensions()<block_start>extension(click_group)<block_end><return>click_group<block_end><def_stmt>load_extensions <block_start>"""Return list of Kibitzr CLI extensions"""<line_sep><return>[point.load()<for>point entrypoints.get_group_all("kibitzr.cli")]<block_end>@click.group()@click.option("-l" "--log-level" default="info" type=click.Choice(LOG_LEVEL_CODES.keys()) help="Logging level")@click.pass_context<def_stmt>cli ctx log_level<block_start>"""Run kibitzr COMMAND --help for detailed descriptions"""<line_sep>ctx.obj={'log_level':LOG_LEVEL_CODES[log_level.lower()]}<block_end>@cli.command()<def_stmt>version <block_start>"""Print version"""<import_from_stmt>kibitzr __version__<as>kibitzr_version<line_sep>print(kibitzr_version)<block_end>@cli.command()<def_stmt>firefox <block_start>"""Launch Firefox with persistent profile"""<import_from_stmt>kibitzr.app Application<line_sep>Application().run_firefox()<block_end>@cli.command()@click.argument('name' nargs=-1)@click.pass_context<def_stmt>once ctx name<block_start>"""Run kibitzr checks once and exit"""<import_from_stmt>kibitzr.app Application<line_sep>app=Application()<line_sep>sys.exit(app.run(once=<true> log_level=ctx.obj['log_level'] names=name))<block_end>@cli.command()@click.argument('name' nargs=-1)@click.pass_context<def_stmt>run ctx name<block_start>"""Run kibitzr in the foreground mode"""<import_from_stmt>kibitzr.app Application<line_sep>app=Application()<line_sep>sys.exit(app.run(once=<false> log_level=ctx.obj['log_level'] names=name))<block_end>@cli.command()<def_stmt>init <block_start>"""Create boilerplate configuration files"""<import_from_stmt>kibitzr.app Application<line_sep>Application.bootstrap()<block_end>@cli.command()<def_stmt>telegram_chat <block_start>"""Return chat id for the last message sent to Telegram Bot"""<line_sep># rename import to escape name clashing: <import_from_stmt>kibitzr.app Application<line_sep>app=Application()<line_sep>app.telegram_chat()<block_end>@cli.command()<def_stmt>clean <block_start>"""Clean change history"""<import_from_stmt>kibitzr.storage PageHistory<line_sep>PageHistory.clean()<block_end>@cli.command()<def_stmt>stash <block_start>"""Print stash contents"""<import_from_stmt>kibitzr.stash Stash<line_sep>Stash.print_content()<block_end>extended_cli=merge_extensions(cli)<if_stmt>__name__<eq>"__main__"<block_start>extended_cli()<block_end>
"""Trajectory Generator for in-place stepping motion for quadruped robot."""<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>math<import_stmt>numpy<as>np<line_sep>TWO_PI=2<times>math.pi<def_stmt>_get_actions_asymmetric_sine phase tg_params<block_start>"""Returns the leg extension given current phase of TG and parameters. Args: phase: a number in [0, 2pi) representing current leg phase tg_params: a dictionary of tg parameters: stance_lift_cutoff -- switches the TG between stance (phase < cutoff) and lift (phase > cutoff) phase amplitude_swing -- amplitude in swing phase amplitude_lift -- amplitude in lift phase center_extension -- center of leg extension """<line_sep>stance_lift_cutoff=tg_params['stance_lift_cutoff']<line_sep>a_prime=np.where(phase<l>stance_lift_cutoff tg_params['amplitude_stance'] tg_params['amplitude_lift'])<line_sep>scaled_phase=np.where(phase<g>stance_lift_cutoff np.pi+(phase-stance_lift_cutoff)/(TWO_PI-stance_lift_cutoff)<times>np.pi phase/stance_lift_cutoff<times>np.pi)<line_sep><return>tg_params['center_extension']+a_prime<times>np.sin(scaled_phase)<block_end><def_stmt>step current_phases leg_frequencies dt tg_params<block_start>"""Steps forward the in-place trajectory generator. Args: current_phases: phases of each leg. leg_frequencies: the frequency to proceed the phase of each leg. dt: amount of time (sec) between consecutive time steps. tg_params: a set of parameters for trajectory generator, see the docstring of "_get_actions_asymmetric_sine" for details. Returns: actions: leg swing/extensions as output by the trajectory generator. new_state: new swing/extension. """<line_sep>new_phases=np.fmod(current_phases+TWO_PI<times>leg_frequencies<times>dt TWO_PI)<line_sep>extensions=[]<for_stmt>leg_id range(4)<block_start>extensions.append(_get_actions_asymmetric_sine(new_phases[<ellipsis> leg_id] tg_params))<block_end><return>new_phases extensions<block_end><def_stmt>reset <block_start><return>np.array([0 np.pi<times>0.5 np.pi np.pi<times>1.5])<block_end>
""" semver package major release 3. A Python module for semantic versioning. Simplifies comparing versions. """<import_from_stmt>._deprecated bump_build bump_major bump_minor bump_patch bump_prerelease compare finalize_version format_version match max_ver min_ver parse parse_version_info replace cmd_bump cmd_compare cmd_nextver cmd_check createparser process main <import_from_stmt>.version Version VersionInfo<import_from_stmt>.__about__ __version__ __author__ __maintainer__ __author_email__ __description__ __maintainer_email__ SEMVER_SPEC_VERSION <line_sep>
# -*- encoding: utf-8 -*- # # Copyright © 2018–2021 Mergify SAS # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. <import_stmt>yaml<import_from_stmt>mergify_engine config<import_from_stmt>mergify_engine context<import_from_stmt>mergify_engine.tests.functional base<class_stmt>TestUpdateAction(base.FunctionalTestBase)<block_start><async_keyword><def_stmt>test_update_action self<block_start>rules={"pull_request_rules":[{"name":"update" "conditions":[f"base={self.main_branch_name}"] "actions":{"update":{}} } {"name":"merge" "conditions":[f"base={self.main_branch_name}" "label=merge"] "actions":{"merge":{}} } ]}<line_sep><await>self.setup_repo(yaml.dump(rules))<line_sep>p1,_=<await>self.create_pr()<line_sep>p2,_=<await>self.create_pr()<line_sep>commits=<await>self.get_commits(p2["number"])<assert_stmt>len(commits)<eq>1<line_sep><await>self.add_label(p1["number"] "merge")<line_sep><await>self.run_engine()<line_sep><await>self.wait_for("pull_request" {"action":"closed"})<line_sep>p1=<await>self.get_pull(p1["number"])<assert_stmt>p1["merged"]<line_sep><await>self.wait_for("push" {"ref":f"refs/heads/{self.main_branch_name}"})<line_sep><await>self.run_engine()<line_sep>commits=<await>self.get_commits(p2["number"])<assert_stmt>len(commits)<eq>2<assert_stmt>commits[-1]["commit"]["author"]["name"]<eq>config.BOT_USER_LOGIN<assert_stmt>commits[-1]["commit"]["message"].startswith("Merge branch")<block_end><async_keyword><def_stmt>test_update_action_on_closed_pr_deleted_branch self<block_start>rules={"pull_request_rules":[{"name":"update" "conditions":[f"base={self.main_branch_name}"] "actions":{"update":{}} } {"name":"merge" "conditions":[f"base={self.main_branch_name}" "label=merge"] "actions":{"merge":{} "delete_head_branch":{}} } ]}<line_sep><await>self.setup_repo(yaml.dump(rules))<line_sep>p1,_=<await>self.create_pr()<line_sep>p2,_=<await>self.create_pr()<line_sep>commits=<await>self.get_commits(p2["number"])<assert_stmt>len(commits)<eq>1<line_sep><await>self.add_label(p1["number"] "merge")<line_sep><await>self.run_engine()<line_sep>p1=<await>self.get_pull(p1["number"])<assert_stmt>p1["merged"]<line_sep><await>self.wait_for("push" {"ref":f"refs/heads/{self.main_branch_name}"})<line_sep><await>self.run_engine()<line_sep>commits=<await>self.get_commits(p2["number"])<assert_stmt>len(commits)<eq>2<assert_stmt>commits[-1]["commit"]["author"]["name"]<eq>config.BOT_USER_LOGIN<assert_stmt>commits[-1]["commit"]["message"].startswith("Merge branch")<line_sep># Now merge p2 so p1 is not up to date <await>self.add_label(p2["number"] "merge")<line_sep><await>self.run_engine()<line_sep>ctxt=<await>context.Context.create(self.repository_ctxt p1 [])<line_sep>checks=<await>ctxt.pull_engine_check_runs<for_stmt>check checks<block_start><assert_stmt>check["conclusion"]<eq>"success" check<block_end><block_end><block_end>
<import_from_stmt>itertools chain<import_from_stmt>.common EWSAccountService create_attachment_ids_element<import_from_stmt>..util create_element add_xml_child set_xml_value DummyResponse StreamingBase64Parser StreamingContentHandler ElementNotFound MNS<line_sep># https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/bodytype BODY_TYPE_CHOICES=('Best' 'HTML' 'Text')<class_stmt>GetAttachment(EWSAccountService)<block_start>"""MSDN: https://docs.microsoft.com/en-us/exchange/client-developer/web-service-reference/getattachment-operation"""<line_sep>SERVICE_NAME='GetAttachment'<line_sep>element_container_name='{%s}Attachments'%MNS<def_stmt>call self items include_mime_content body_type filter_html_content additional_fields<block_start><if_stmt>body_type<and>body_type<not><in>BODY_TYPE_CHOICES<block_start><raise>ValueError("'body_type' %s must be one of %s"%(body_type BODY_TYPE_CHOICES))<block_end><return>self._elems_to_objs(self._chunked_get_elements(self.get_payload items=items include_mime_content=include_mime_content body_type=body_type filter_html_content=filter_html_content additional_fields=additional_fields ))<block_end><def_stmt>_elems_to_objs self elems<block_start><import_from_stmt>..attachments FileAttachment ItemAttachment<line_sep>cls_map={cls.response_tag():cls<for>cls (FileAttachment ItemAttachment)}<for_stmt>elem elems<block_start><if_stmt>isinstance(elem Exception)<block_start><yield>elem<line_sep><continue><block_end><yield>cls_map[elem.tag].from_xml(elem=elem account=self.account)<block_end><block_end><def_stmt>get_payload self items include_mime_content body_type filter_html_content additional_fields<block_start>payload=create_element('m:%s'%self.SERVICE_NAME)<line_sep>shape_elem=create_element('m:AttachmentShape')<if_stmt>include_mime_content<block_start>add_xml_child(shape_elem 't:IncludeMimeContent' 'true')<block_end><if_stmt>body_type<block_start>add_xml_child(shape_elem 't:BodyType' body_type)<block_end><if_stmt>filter_html_content<is><not><none><block_start>add_xml_child(shape_elem 't:FilterHtmlContent' 'true'<if>filter_html_content<else>'false')<block_end><if_stmt>additional_fields<block_start>additional_properties=create_element('t:AdditionalProperties')<line_sep>expanded_fields=chain(*(f.expand(version=self.account.version)<for>f additional_fields))<line_sep>set_xml_value(additional_properties sorted(expanded_fields key=<lambda>f:(getattr(f.field 'field_uri' '') f.path)) version=self.account.version)<line_sep>shape_elem.append(additional_properties)<block_end><if_stmt>len(shape_elem)<block_start>payload.append(shape_elem)<block_end>attachment_ids=create_attachment_ids_element(items=items version=self.account.version)<line_sep>payload.append(attachment_ids)<line_sep><return>payload<block_end><def_stmt>_update_api_version self api_version header **parse_opts<block_start><if_stmt><not>parse_opts.get('stream_file_content' <false>)<block_start>super()._update_api_version(api_version header **parse_opts)<block_end># TODO: We're skipping this part in streaming mode because StreamingBase64Parser cannot parse the SOAP header <block_end>@classmethod<def_stmt>_get_soap_parts cls response **parse_opts<block_start><if_stmt><not>parse_opts.get('stream_file_content' <false>)<block_start><return>super()._get_soap_parts(response **parse_opts)<block_end># Pass the response unaltered. We want to use our custom streaming parser <return><none> response<block_end><def_stmt>_get_soap_messages self body **parse_opts<block_start><if_stmt><not>parse_opts.get('stream_file_content' <false>)<block_start><return>super()._get_soap_messages(body **parse_opts)<block_end><import_from_stmt>..attachments FileAttachment<line_sep># 'body' is actually the raw response passed on by '_get_soap_parts' r=body<line_sep>parser=StreamingBase64Parser()<line_sep>field=FileAttachment.get_field_by_fieldname('_content')<line_sep>handler=StreamingContentHandler(parser=parser ns=field.namespace element_name=field.field_uri)<line_sep>parser.setContentHandler(handler)<line_sep><return>parser.parse(r)<block_end><def_stmt>stream_file_content self attachment_id# The streaming XML parser can only stream content of one attachment <block_start>payload=self.get_payload(items=[attachment_id] include_mime_content=<false> body_type=<none> filter_html_content=<none> additional_fields=<none> )<line_sep>self.streaming=<true><try_stmt><block_start><yield><from>self._get_response_xml(payload=payload stream_file_content=<true>)<block_end><except_stmt>ElementNotFound<as>enf# When the returned XML does not contain a Content element, ElementNotFound is thrown by parser.parse(). # Let the non-streaming SOAP parser parse the response and hook into the normal exception handling. # Wrap in DummyResponse because _get_soap_parts() expects an iter_content() method. <block_start>response=DummyResponse(url=<none> headers=<none> request_headers=<none> content=enf.data)<line_sep>_,body=super()._get_soap_parts(response=response)<line_sep>res=super()._get_soap_messages(body=body)<for_stmt>e self._get_elements_in_response(response=res)<block_start><if_stmt>isinstance(e Exception)<block_start><raise>e<block_end><block_end># The returned content did not contain any EWS exceptions. Give up and re-raise the original exception. <raise>enf<block_end><finally_stmt><block_start>self.streaming=<false><line_sep>self.stop_streaming()<block_end><block_end><block_end>
<class_stmt>Solution<block_start><def_stmt>generateMatrix self n:int<arrow>List[List[int]]<block_start><if_stmt><not>n<block_start><return>[]<block_end>A,lo=[[n<times>n]] n<times>n<while_stmt>lo<g>1<block_start>lo,hi=lo-len(A) lo<line_sep>A=[[i<for>i range(lo hi)]]+[list(j)<for>j zip(*A[::-1])]<block_end><return>A<block_end><block_end>
# Copyright © 2020 Arm Ltd and Contributors. All rights reserved. # SPDX-License-Identifier: MIT <import_stmt>os<import_stmt>pytest<import_stmt>pyarmnn<as>ann<import_stmt>numpy<as>np<line_sep>@pytest.fixture()<def_stmt>parser shared_data_folder<block_start>""" Parse and setup the test network to be used for the tests below """<line_sep>parser=ann.IDeserializer()<line_sep>parser.CreateNetworkFromBinary(os.path.join(shared_data_folder 'mock_model.armnn'))<line_sep><yield>parser<block_end><def_stmt>test_deserializer_swig_destroy <block_start><assert_stmt>ann.IDeserializer.__swig_destroy__ "There is a swig python destructor defined"<assert_stmt>ann.IDeserializer.__swig_destroy__.__name__<eq>"delete_IDeserializer"<block_end><def_stmt>test_check_deserializer_swig_ownership parser# Check to see that SWIG has ownership for parser. This instructs SWIG to take # ownership of the return value. This allows the value to be automatically # garbage-collected when it is no longer in use <block_start><assert_stmt>parser.thisown<block_end><def_stmt>test_deserializer_get_network_input_binding_info parser# use 0 as a dummy value for layer_id, which is unused in the actual implementation <block_start>layer_id=0<line_sep>input_name='input_1'<line_sep>input_binding_info=parser.GetNetworkInputBindingInfo(layer_id input_name)<line_sep>tensor=input_binding_info[1]<assert_stmt>tensor.GetDataType()<eq>2<assert_stmt>tensor.GetNumDimensions()<eq>4<assert_stmt>tensor.GetNumElements()<eq>784<assert_stmt>tensor.GetQuantizationOffset()<eq>128<assert_stmt>tensor.GetQuantizationScale()<eq>0.007843137718737125<block_end><def_stmt>test_deserializer_get_network_output_binding_info parser# use 0 as a dummy value for layer_id, which is unused in the actual implementation <block_start>layer_id=0<line_sep>output_name="dense/Softmax"<line_sep>output_binding_info1=parser.GetNetworkOutputBindingInfo(layer_id output_name)<line_sep># Check the tensor info retrieved from GetNetworkOutputBindingInfo tensor1=output_binding_info1[1]<assert_stmt>tensor1.GetDataType()<eq>2<assert_stmt>tensor1.GetNumDimensions()<eq>2<assert_stmt>tensor1.GetNumElements()<eq>10<assert_stmt>tensor1.GetQuantizationOffset()<eq>0<assert_stmt>tensor1.GetQuantizationScale()<eq>0.00390625<block_end><def_stmt>test_deserializer_filenotfound_exception shared_data_folder<block_start>parser=ann.IDeserializer()<with_stmt>pytest.raises(RuntimeError)<as>err<block_start>parser.CreateNetworkFromBinary(os.path.join(shared_data_folder 'some_unknown_network.armnn'))<block_end># Only check for part of the exception since the exception returns # absolute path which will change on different machines. <assert_stmt>'Cannot read the file'<in>str(err.value)<block_end><def_stmt>test_deserializer_end_to_end shared_data_folder<block_start>parser=ann.IDeserializer()<line_sep>network=parser.CreateNetworkFromBinary(os.path.join(shared_data_folder "mock_model.armnn"))<line_sep># use 0 as a dummy value for layer_id, which is unused in the actual implementation layer_id=0<line_sep>input_name='input_1'<line_sep>output_name='dense/Softmax'<line_sep>input_binding_info=parser.GetNetworkInputBindingInfo(layer_id input_name)<line_sep>preferred_backends=[ann.BackendId('CpuAcc') ann.BackendId('CpuRef')]<line_sep>options=ann.CreationOptions()<line_sep>runtime=ann.IRuntime(options)<line_sep>opt_network,messages=ann.Optimize(network preferred_backends runtime.GetDeviceSpec() ann.OptimizerOptions())<assert_stmt>0<eq>len(messages)<line_sep>net_id,messages=runtime.LoadNetwork(opt_network)<assert_stmt>""<eq>messages<line_sep># Load test image data stored in input_lite.npy input_tensor_data=np.load(os.path.join(shared_data_folder 'deserializer/input_lite.npy'))<line_sep>input_tensors=ann.make_input_tensors([input_binding_info] [input_tensor_data])<line_sep>output_tensors=[]<line_sep>out_bind_info=parser.GetNetworkOutputBindingInfo(layer_id output_name)<line_sep>out_tensor_info=out_bind_info[1]<line_sep>out_tensor_id=out_bind_info[0]<line_sep>output_tensors.append((out_tensor_id ann.Tensor(out_tensor_info)))<line_sep>runtime.EnqueueWorkload(net_id input_tensors output_tensors)<line_sep>output_vectors=[]<for_stmt>index,out_tensor enumerate(output_tensors)<block_start>output_vectors.append(out_tensor[1].get_memory_area())<block_end># Load golden output file for result comparison. expected_outputs=np.load(os.path.join(shared_data_folder 'deserializer/golden_output_lite.npy'))<line_sep># Check that output matches golden output <assert_stmt>(expected_outputs<eq>output_vectors[0]).all()<block_end>
<import_from_stmt>cloudbio.galaxy.tools _install_application<def_stmt>install_tool options<block_start>version=options.get("galaxy_tool_version")<line_sep>name=options.get("galaxy_tool_name")<line_sep>install_dir=options.get("galaxy_tool_dir" <none>)<line_sep>_install_application(name version tool_install_dir=install_dir)<block_end>configure_actions={"install_galaxy_tool":install_tool }<line_sep>
"""Tools for exporting and importing FNSS data structures (topologies, event schedules and traffic matrices) to/from other simulators or emulators """<import_from_stmt>fnss.adapters.autonetkit *<import_from_stmt>fnss.adapters.mn *<import_from_stmt>fnss.adapters.ns2 *<import_from_stmt>fnss.adapters.omnetpp *<import_from_stmt>fnss.adapters.jfed *<line_sep>
<import_from_stmt>django.conf.urls url<import_from_stmt>django.http HttpResponse<import_from_stmt>tastypie.authentication ApiKeyAuthentication<import_from_stmt>tastypie.authorization Authorization<import_from_stmt>tastypie.http HttpForbidden<import_from_stmt>tastypie.resources ModelResource<import_from_stmt>news.models News<class_stmt>NewsResource(ModelResource)<block_start>""" Get and update user profile, also serves as login route for retrieving the ApiKey. This resource doesn't have any listing route, the root route /user/ is redirected to retrieving the authenticated user's data. """<class_stmt>Meta<block_start>authentication=ApiKeyAuthentication()<line_sep>authorization=Authorization()<line_sep>list_allowed_methods=['get']<line_sep>detail_allowed_methods=['patch']<line_sep>always_return_data=<true><line_sep>include_resource_uri=<false><line_sep>queryset=News.objects.all()<line_sep>fields=['id' 'title' 'content' 'news_date']<block_end><def_stmt>prepend_urls self<block_start><return>[url(r"^(?P<resource_name>%s)/(?P<pk>.*?)/read/$"%self._meta.resource_name self.wrap_view('mark_news_read') name="api_mark_news_read") ]<block_end><def_stmt>get_object_list self request<block_start><return>super(NewsResource self).get_object_list(request).exclude(user=request.user)<block_end><def_stmt>mark_news_read self request **kwargs<block_start>""" Special view which enables to override the root route /user/ for accessing the data of currently authenticated user and not the listing of all users. :param request: :param kwargs: :return: """<line_sep>self.method_check(request allowed=['patch'])<line_sep>self.is_authenticated(request)<line_sep>user=getattr(request 'user' <none>)<if_stmt><not>user<or>user.is_anonymous()<block_start><return>HttpForbidden()<block_end>News.objects.get(pk=int(kwargs['pk'])).user.add(user)<line_sep><return>HttpResponse(status=200)<block_end><block_end>
<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>shutil<import_stmt>sys<import_stmt>tempfile<import_from_stmt>observations.r.sparrows sparrows<def_stmt>test_sparrows <block_start>"""Test module sparrows.py by downloading sparrows.csv and testing shape of extracted data has 116 rows and 3 columns """<line_sep>test_path=tempfile.mkdtemp()<line_sep>x_train,metadata=sparrows(test_path)<try_stmt><block_start><assert_stmt>x_train.shape<eq>(116 3)<block_end><except_stmt><block_start>shutil.rmtree(test_path)<line_sep><raise>()<block_end><block_end>
# Copyright (c) 2020, Apple Inc. All rights reserved. # # Use of this source code is governed by a BSD-3-clause license that can be # found in the LICENSE.txt file or at https://opensource.org/licenses/BSD-3-Clause <import_from_stmt>coremltools.converters.mil.mil.types.symbolic any_symbolic<import_from_stmt>coremltools.converters.mil.mil get_new_symbol get_new_variadic_symbol<import_from_stmt>._op_reqs *<line_sep>""" Random Op Superclass """<class_stmt>RandomDistribution(Operation)<block_start>input_spec=InputSpec(shape=IntTensorInputType() )<def_stmt>__init__ self **kwargs<block_start>super(RandomDistribution self).__init__(**kwargs)<block_end><def_stmt>type_inference self<block_start><if_stmt>any_symbolic(self.shape.shape)# We can't infer any shape if shape has variable length. <block_start><return>types.tensor(types.fp32 (get_new_variadic_symbol() ))<block_end># shape has fixed length here. <if_stmt>self.shape.sym_val<is><none><block_start>shape=tuple([get_new_symbol()<for>_ range(self.shape.shape[0])])<line_sep><return>types.tensor(types.fp32 shape)<block_end><return>types.tensor(types.fp32 tuple(self.shape.sym_val.tolist()))<block_end><block_end>""" Random Op Implementation(s) """<line_sep>@register_op(doc_str=r""" Returns a tensor with specified shape with random values from a Bernoulli distribution. .. math:: f(k) = \begin{cases}1-p &\text{if } k = 0\\ p &\text{if } k = 1\end{cases} for :math:`k` in :math:`\{0, 1\}`. Parameters ---------- shape: <K, i32>, required Target output tensor shape. K is the rank of the output tensor. shape[k] > 0 for k = 0,..., K-1. prob: const<f32>, optional The probability of sampling 1. Defaults to 0.5. seed: const<i32>, optional Seed to create a reproducible sequence of values across multiple invokes. Returns ------- <*, T>, a tensor of given target output shape filled with random values. See Also -------- random_categorical, random_normal, random_uniform """)<class_stmt>random_bernoulli(RandomDistribution)<block_start>input_spec=(InputSpec(shape=IntTensorInputType() prob=FloatInputType(const=<true> default=0.5) seed=IntInputType(const=<true> default=-1) )+RandomDistribution.input_spec)<def_stmt>__init__ self **kwargs<block_start>super(random_bernoulli self).__init__(**kwargs)<block_end><block_end>@register_op(doc_str=r""" Returns random values from a categorical distribution. Parameters ---------- shape: <*D_in, T> N-dimensional tensor, one of logits (event log-probabilities) or probs (event probabilities). The first N - 1 dimensions specifies distributions, the last dimension represents a vector of probabilities. mode: const<str>, optional One of ['logits', 'probs']. Defaults to 'logits'. size: const<i32>, optional Number of samples to draw. Defaults to 1. seed: const<i32>, optional Seed to create a reproducible sequence of values across multiple invokes. Returns ------- <*D_in[:-1] + [size], T>, a tensor of given target output shape filled with random values. See Also -------- random_bernoulli, random_normal, random_uniform """)<class_stmt>random_categorical(Operation)<block_start>input_spec=InputSpec(x=TensorInputType() mode=StringInputType(const=<true> default="logits") size=IntInputType(const=<true> default=1) seed=IntInputType(const=<true> default=-1) )<def_stmt>__init__ self **kwargs<block_start>super(random_categorical self).__init__(**kwargs)<block_end><def_stmt>type_inference self<block_start>output_shape=self.x.shape[:-1]+(self.size.val )<line_sep><return>types.tensor(types.fp32 output_shape)<block_end><block_end>@register_op(doc_str=r""" Returns a tensor with specified shape with random values from a normal distribution. .. math:: f(x) = \frac{\exp(-x^2/2)}{\sqrt{2\pi}} for a real number :math:`x`. Parameters ---------- shape: <K, i32>, required Target output tensor shape. K is the rank of the output tensor. shape[k] > 0 for k = 0,..., K-1. mean: const<f32>, optional The mean (center) of the normal distribution. Defaults to 0.0. stddev: const<f32>, optional The standard deviation (width) of the normal distribution. Defaults to 1.0. seed: const<i32>, optional Seed to create a reproducible sequence of values across multiple invokes. Returns ------- <*, T>, a tensor of given target output shape filled with random values. See Also -------- random_categorical, random_bernoulli, random_uniform """)<class_stmt>random_normal(RandomDistribution)<block_start>input_spec=(InputSpec(shape=IntTensorInputType() mean=FloatInputType(const=<true> default=0.0) stddev=FloatInputType(const=<true> default=1.0) seed=IntInputType(const=<true> default=-1) )+RandomDistribution.input_spec)<def_stmt>__init__ self **kwargs<block_start>super(random_normal self).__init__(**kwargs)<block_end><block_end>@register_op(doc_str=r""" Returns a tensor with specified shape with random values from a normal distribution. .. math:: p(x) = \frac{1}{high - low} for a real number :math:`x`. Parameters ---------- shape: <K, i32>, required Target output tensor shape. K is the rank of the output tensor. shape[k] > 0 for k = 0,..., K-1. low: const<f32>, optional Lower boundary of the output interval (inclusive). Defaults to 0.0. high: const<f32>, optional Upper boundary of the output interval (exclusive). Defaults to 1.0. seed: const<i32>, optional Seed to create a reproducible sequence of values across multiple invokes. Returns ------- <*, T>, a tensor of given target output shape filled with random values. See Also -------- random_categorical, random_bernoulli, random_normal """)<class_stmt>random_uniform(RandomDistribution)<block_start>input_spec=(InputSpec(shape=IntTensorInputType() low=FloatInputType(const=<true> default=0.0) high=FloatInputType(const=<true> default=1.0) seed=IntInputType(const=<true> default=-1) )+RandomDistribution.input_spec)<def_stmt>__init__ self **kwargs<block_start>super(random_uniform self).__init__(**kwargs)<block_end><block_end>
<import_from_stmt>django forms<import_from_stmt>django.utils.safestring mark_safe<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>django.contrib.auth get_user_model<import_from_stmt>crispy_forms bootstrap helper layout<import_from_stmt>mptt.forms TreeNodeChoiceField<import_from_stmt>myproject.apps.categories1.models Category<import_from_stmt>.models Idea RATING_CHOICES<import_from_stmt>..core.form_fields MultipleChoiceTreeField<line_sep>User=get_user_model()<class_stmt>IdeaForm(forms.ModelForm)<block_start>categories=MultipleChoiceTreeField(label=_("Categories") required=<false> queryset=Category.objects.all() )<class_stmt>Meta<block_start>model=Idea<line_sep>exclude=["author"]<block_end><def_stmt>__init__ self request *args **kwargs<block_start>self.request=request<line_sep>super().__init__(*args **kwargs)<line_sep>title_field=layout.Field("title")<line_sep>content_field=layout.Field("content" rows="3")<line_sep>main_fieldset=layout.Fieldset(_("Main data") title_field content_field)<line_sep>picture_field=layout.Field("picture")<line_sep>format_html=layout.HTML("""{% include "ideas1/includes/picture_guidelines.html" %}""")<line_sep>picture_fieldset=layout.Fieldset(_("Picture") picture_field format_html title=_("Image upload") css_id="picture_fieldset" )<line_sep>categories_field=layout.Field("categories" template="core/includes/checkboxselectmultiple_tree.html")<line_sep>categories_fieldset=layout.Fieldset(_("Categories") categories_field css_id="categories_fieldset")<line_sep>submit_button=layout.Submit("save" _("Save"))<line_sep>actions=bootstrap.FormActions(submit_button css_class="my-4")<line_sep>self.helper=helper.FormHelper()<line_sep>self.helper.form_action=self.request.path<line_sep>self.helper.form_method="POST"<line_sep>self.helper.layout=layout.Layout(main_fieldset picture_fieldset categories_fieldset actions )<block_end><def_stmt>save self commit=<true><block_start>instance=super().save(commit=<false>)<line_sep>instance.author=self.request.user<if_stmt>commit<block_start>instance.save()<line_sep>self.save_m2m()<block_end><return>instance<block_end><block_end><class_stmt>IdeaFilterForm(forms.Form)<block_start>author=forms.ModelChoiceField(label=_("Author") required=<false> queryset=User.objects.all() )<line_sep>category=TreeNodeChoiceField(label=_("Category") required=<false> queryset=Category.objects.all() level_indicator=mark_safe("&nbsp;&nbsp;&nbsp;&nbsp;"))<line_sep>rating=forms.ChoiceField(label=_("Rating") required=<false> choices=RATING_CHOICES)<def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<line_sep>author_field=layout.Field("author")<line_sep>category_field=layout.Field("category")<line_sep>rating_field=layout.Field("rating")<line_sep>submit_button=layout.Submit("filter" _("Filter"))<line_sep>actions=bootstrap.FormActions(submit_button)<line_sep>main_fieldset=layout.Fieldset(_("Filter") author_field category_field rating_field actions )<line_sep>self.helper=helper.FormHelper()<line_sep>self.helper.form_method="GET"<line_sep>self.helper.layout=layout.Layout(main_fieldset)<block_end><block_end>
<import_from_stmt>asn1crypto core<import_from_stmt>minikerberos.protocol.asn1_structs krb5int32 APOptions Ticket EncryptedData AP_REQ<line_sep>UNIVERSAL=0<line_sep>APPLICATION=1<line_sep>CONTEXT=2<line_sep>TAG='explicit'<class_stmt>MechType(core.ObjectIdentifier)<block_start>_map={#'': 'SNMPv2-SMI::enterprises.311.2.2.30', '1.3.6.1.4.1.311.2.2.10':'NTLMSSP - Microsoft NTLM Security Support Provider' '1.2.840.48018.1.2.2':'MS KRB5 - Microsoft Kerberos 5' '1.2.840.113554.1.2.2':'KRB5 - Kerberos 5' '1.2.840.113554.1.2.2.3':'KRB5 - Kerberos 5 - User to User' '1.3.6.1.4.1.311.2.2.30':'NEGOEX - SPNEGO Extended Negotiation Security Mechanism' }<block_end><class_stmt>InitialContextToken(core.Sequence)<block_start>class_=1<line_sep>tag=0<line_sep>_fields=[('thisMech' MechType {'optional':<false>}) ('unk_bool' core.Boolean {'optional':<false>}) ('innerContextToken' core.Any {'optional':<false>}) ]<line_sep>_oid_pair=('thisMech' 'innerContextToken')<line_sep>_oid_specs={'KRB5 - Kerberos 5':AP_REQ }<block_end>
# Copyright 2019-2020 Spotify AB # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>datetime<import_stmt>logging<import_stmt>time<import_stmt>emoji<import_from_stmt>googleapiclient discovery<line_sep>JOB_STATE_MAP={"cancel":"JOB_STATE_CANCELLED" "drain":"JOB_STATE_DRAINED"}<class_stmt>StopJob(object)<block_start><def_stmt>__init__ self api_version=<none><block_start>self._set_dataflow_client(api_version)<block_end><def_stmt>_set_dataflow_client self api_version<block_start><if_stmt><not>api_version<block_start>api_version="v1b3"<block_end>self._client=discovery.build("dataflow" api_version)<block_end><def_stmt>_check_job_running self job_name project region<block_start>request=(self._client.projects().locations().jobs().list(projectId=project location=region filter="ACTIVE" ))<try_stmt><block_start>response=request.execute()<block_end><except_stmt>Exception<as>e<block_start>logging.warning("Could not find running job '{}' in project '{}': {}".format(job_name project e))<line_sep>logging.warning("Continuing to attempt deploying '{}'".format(job_name))<line_sep><return><block_end>job_results=response.get("jobs" [])<if_stmt>job_results<block_start><for_stmt>result job_results<block_start><if_stmt>result["name"]<eq>job_name<block_start><return>result<block_end><block_end><block_end><block_end><def_stmt>_update_job_state self job req_state=<none> retries=<none><block_start><if_stmt>retries<is><none><block_start>retries=0<block_end>_req_state=JOB_STATE_MAP.get(req_state JOB_STATE_MAP["cancel"])<if_stmt>job.get("requestedState")<is><not>_req_state<block_start>job["requestedState"]=_req_state<block_end>request=(self._client.projects().locations().jobs().update(jobId=job["id"] projectId=job["projectId"] location=job["location"] body=job ))<try_stmt><block_start>request.execute()<block_end><except_stmt>Exception<as>e# generic catch if 4xx error - probably shouldn't retry <block_start><if_stmt>getattr(e "resp" <none>)<block_start><if_stmt>e.resp.status<l>500<block_start>msg="Failed to {} job '{}': {}".format(req_state job["name"] e)<line_sep>logging.error(msg)<line_sep><raise>SystemExit(1)<block_end><block_end><if_stmt>retries<g>2<block_start>msg="Max retries reached: could not {} job '{}': {}".format(req_state job["name"] e)<line_sep>logging.error(msg)<line_sep><raise>SystemExit(1)<block_end>logging.info("Failed to {} job '{}'. Trying again after 30s...".format(req_state job["name"]))<line_sep>retries<augadd>1<line_sep>time.sleep(30)<line_sep>self._update_job_state(job req_state retries)<block_end><block_end><def_stmt>_watch_job_state self job timeout=600<block_start>timeout=datetime.datetime.now()+datetime.timedelta(seconds=timeout)<line_sep>request=(self._client.projects().locations().jobs().get(jobId=job["id"] projectId=job["projectId"] location=job["location"] ))<while_stmt>datetime.datetime.now()<l>timeout<block_start><try_stmt><block_start>resp=request.execute()<block_end><except_stmt>Exception<as>e<block_start>msg=("Failed to get current status for job '{}'. Error: {}.\n"<concat>"Trying again after 5s...".format(job["name"] e))<line_sep>logging.info(msg)<line_sep>time.sleep(5)<line_sep><continue><block_end><if_stmt>resp["currentState"]<in>JOB_STATE_MAP.values()<block_start><return><block_end><else_stmt><block_start>msg="Waiting for job '{}' to reach terminal state...".format(job["name"])<line_sep>logging.info(msg)<line_sep>time.sleep(5)<block_end><block_end>msg="Job '{}' did not reach terminal state after '{}' secs.".format(job["name"] timeout)<line_sep>logging.error(msg)<line_sep><raise>SystemExit(1)<block_end><def_stmt>stop self job_name project region strategy api_version=<none><block_start>self._set_dataflow_client(api_version)<line_sep>current_running_job=self._check_job_running(job_name project region)<if_stmt><not>current_running_job<block_start><return><block_end>self._update_job_state(current_running_job req_state=strategy)<line_sep>self._watch_job_state(current_running_job)<line_sep>verb="cancelled"<if>strategy<eq>"cancel"<else>"drained"<line_sep>msg="Successfully {} job '{}' :smile_cat:".format(verb job_name)<line_sep>logging.info(emoji.emojize(msg use_aliases=<true>))<block_end><block_end>
# -*- coding: utf-8 -*- """ transistor.persistence ~~~~~~~~~~~~ This module implements classes and methods to aid persistence, including database, spreadsheet export, write to file. :copyright: Copyright (C) 2018 by BOM Quote Limited :license: The MIT License, see LICENSE for more details. ~~~~~~~~~~~~ """<import_from_stmt>.exporters PprintItemExporter PickleItemExporter PythonItemExporter CsvItemExporter MarshalItemExporter BaseItemExporter <import_from_stmt>.containers SplashScraperItems<import_from_stmt>.item Item Field<import_from_stmt>.newt_db.newt_crud get_job_results delete_job<line_sep>__all__=['delete_job' 'Field' 'get_job_results' 'Item' 'PprintItemExporter' 'PickleItemExporter' 'PythonItemExporter' 'CsvItemExporter' 'MarshalItemExporter' 'BaseItemExporter' 'SplashScraperItems']<line_sep>
"""Coordinate frames definitions. """<import_from_stmt>enum Enum<class_stmt>Planes(Enum)<block_start>EARTH_EQUATOR="Earth mean Equator and Equinox of epoch (J2000.0)"<line_sep>EARTH_ECLIPTIC="Earth mean Ecliptic and Equinox of epoch (J2000.0)"<line_sep>BODY_FIXED="Rotating body mean Equator and node of date"<block_end>
# Copyright 2021, Google LLC. All rights reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Run script for distributed mean estimation."""<import_stmt>os<import_stmt>pprint<import_from_stmt>absl app<import_from_stmt>absl flags<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<import_stmt>scipy.stats<import_stmt>tensorflow<as>tf<import_stmt>tensorflow_privacy<as>tfp<import_from_stmt>distributed_dp accounting_utils<import_from_stmt>distributed_dp ddpquery_utils<import_from_stmt>distributed_dp dme_utils<line_sep>flags.DEFINE_boolean('show_plot' <false> 'Whether to plot the results.')<line_sep>flags.DEFINE_boolean('print_output' <false> 'Whether to print the outputs.')<line_sep>flags.DEFINE_integer('run_id' 1 'ID of the run, useful for identifying '<concat>'the run when parallelizing this script.')<line_sep>flags.DEFINE_integer('repeat' 5 'Number of times to repeat (sequentially).')<line_sep>flags.DEFINE_string('output_dir' '/tmp/ddp_dme_outputs' 'Output directory.')<line_sep>flags.DEFINE_string('tag' '' 'Extra subfolder for the output result files.')<line_sep>flags.DEFINE_enum('mechanism' 'ddgauss' ['ddgauss'] 'DDP mechanism to use.')<line_sep>flags.DEFINE_float('norm' 10.0 'Norm of the randomly generated vectors.')<line_sep>flags.DEFINE_integer('k_stddevs' 2 'Number of standard deviations of the '<concat>'noised, quantized, aggregated siginal to bound.')<line_sep>flags.DEFINE_boolean('sqrtn_norm_growth' <false> 'Whether to assume the bound '<concat>'norm(sum_i x_i) <= sqrt(n) * c.')<line_sep>FLAGS=flags.FLAGS<def_stmt>experiment bits clip beta client_data epsilons delta mechanism k_stddevs=2 sqrtn_norm_growth=<false><block_start>"""Run a distributed mean estimation experiment. Args: bits: A list of compression bits to use. clip: The initial L2 norm clip. beta: A hyperparameter controlling the concentration inequality for the probabilistic norm bound after randomized rounding. client_data: A Python list of `n` np.array vectors, each with shape (d,). epsilons: A list of target epsilon values for comparison (serve as x-axis). delta: The delta for approximate DP. mechanism: A string specifying the mechanism to compare against Gaussian. k_stddevs: The number of standard deviations to keep for modular clipping. Defaults to 2. sqrtn_norm_growth: Whether to assume the norm of the sum of the vectors grow at a rate of `sqrt(n)` (i.e. norm(sum_i x_i) <= sqrt(n) * c). If `False`, we use the upper bound `norm(sum_i x_i) <= n * c`. Returns: Experiment results as lists of MSE. """<def_stmt>mse a b<block_start><assert_stmt>a.shape<eq>b.shape<line_sep><return>np.square(a-b).mean()<block_end># Initial fixed params. num_clients=len(client_data)<line_sep>d=len(client_data[0])<line_sep>padded_dim=np.math.pow(2 np.ceil(np.log2(d)))<line_sep>client_template=tf.zeros_like(client_data[0])<line_sep># `client_data` has shape (n, d). true_avg_vector=np.mean(client_data axis=0)<line_sep># 1. Baseline: central continuous Gaussian. gauss_mse_list=[]<for_stmt>eps epsilons# Analytic Gaussian. <block_start>gauss_stddev=accounting_utils.analytic_gauss_stddev(eps delta clip)<line_sep>gauss_query=tfp.GaussianSumQuery(l2_norm_clip=clip stddev=gauss_stddev)<line_sep>gauss_avg_vector=dme_utils.compute_dp_average(client_data gauss_query is_compressed=<false> bits=<none>)<line_sep>gauss_mse_list.append(mse(gauss_avg_vector true_avg_vector))<block_end># 2. Distributed DP: try each `b` separately. ddp_mse_list_per_bit=[]<for_stmt>bit bits<block_start>discrete_mse_list=[]<for_stmt>eps epsilons<block_start><if_stmt>mechanism<eq>'ddgauss'<block_start>gamma,local_stddev=accounting_utils.ddgauss_params(q=1 epsilon=eps l2_clip_norm=clip bits=bit num_clients=num_clients dim=padded_dim delta=delta beta=beta steps=1 k=k_stddevs sqrtn_norm_growth=sqrtn_norm_growth)<line_sep>scale=1.0/gamma<block_end><else_stmt><block_start><raise>ValueError(f'Unsupported mechanism: {mechanism}')<block_end>ddp_query=ddpquery_utils.build_ddp_query(mechanism local_stddev l2_norm_bound=clip beta=beta padded_dim=padded_dim scale=scale client_template=client_template)<line_sep>distributed_avg_vector=dme_utils.compute_dp_average(client_data ddp_query is_compressed=<true> bits=bit)<line_sep>discrete_mse_list.append(mse(distributed_avg_vector true_avg_vector))<block_end>ddp_mse_list_per_bit.append(discrete_mse_list)<block_end># Convert to np arrays and do some checks gauss_mse_list=np.array(gauss_mse_list)<line_sep>ddp_mse_list_per_bit=np.array(ddp_mse_list_per_bit)<assert_stmt>gauss_mse_list.shape<eq>(len(epsilons) )<assert_stmt>ddp_mse_list_per_bit.shape<eq>(len(bits) len(epsilons))<line_sep><return>gauss_mse_list ddp_mse_list_per_bit<block_end><def_stmt>experiment_repeated bits clip beta client_data_list repeat epsilons delta mechanism k_stddevs=2 sqrtn_norm_growth=<false><block_start>"""Sequentially repeat the experiment (see `experiment()` for parameters)."""<assert_stmt>len(client_data_list)<eq>repeat<line_sep>n,d=len(client_data_list[0]) len(client_data_list[0][0])<line_sep>print(f'Sequentially repeating the experiment {len(client_data_list)} times '<concat>f'for n={n}, d={d}, mechanism={mechanism}, c={clip}, bits={bits}, beta='<concat>f'{beta:.3f}, eps={epsilons}, k={k_stddevs}, sng={sqrtn_norm_growth}')<line_sep>repeat_results=[]<for_stmt>client_data client_data_list<block_start>repeat_results.append(experiment(bits=bits clip=clip beta=beta client_data=client_data epsilons=epsilons delta=delta mechanism=mechanism k_stddevs=k_stddevs sqrtn_norm_growth=sqrtn_norm_growth))<block_end>repeat_gauss_mse_list,repeat_ddp_mse_list_per_bit=zip(*repeat_results)<line_sep>repeat_gauss_mse_list=np.array(repeat_gauss_mse_list)<line_sep>repeat_ddp_mse_list_per_bit=np.array(repeat_ddp_mse_list_per_bit)<assert_stmt>len(repeat_results)<eq>repeat<assert_stmt>repeat_gauss_mse_list.shape<eq>(repeat len(epsilons))<assert_stmt>(repeat_ddp_mse_list_per_bit.shape<eq>(repeat len(bits) len(epsilons)))<line_sep><return>repeat_gauss_mse_list repeat_ddp_mse_list_per_bit<block_end><def_stmt>mean_confidence_interval data confidence=0.95# `data` should have shape (repeat, len(x-axis)). <block_start>n=len(data)<line_sep>m,se=np.mean(data axis=0) scipy.stats.sem(data axis=0)<line_sep>h=se<times>scipy.stats.t.ppf((1+confidence)/2. n-1)<line_sep><return>m m-h m+h<block_end><def_stmt>plot_curve subplot epsilons data label<block_start><assert_stmt>len(data.shape)<eq>2 'data should be (repeat, len(x-axis))'<line_sep>means,lower,upper=mean_confidence_interval(data)<line_sep>subplot.plot(epsilons means label=label)<line_sep>subplot.fill_between(epsilons lower upper alpha=0.2 edgecolor='face')<block_end><def_stmt>main _<block_start>"""Run distributed mean estimation experiments."""<line_sep>clip=FLAGS.norm<line_sep>delta=1e-5<line_sep>use_log=<true># Whether to use log-scale for y-axis. k_stddevs=FLAGS.k_stddevs<line_sep>sqrtn_norm_growth=FLAGS.sqrtn_norm_growth<line_sep>repeat=FLAGS.repeat<line_sep># Parallel subplots for different n=num_clients and d=dimension. nd_zip=[(100 250) (1000 250)]<line_sep># nd_zip = [(10000, 2000)] # Curves within a subplot. bits=[10 12 14 16]<line_sep># bits = [14, 16, 18, 20] # X-axis: epsilons. epsilons=[0.75]+list(np.arange(1 6.5 0.5))<line_sep>_,ax=plt.subplots(1 max(2 len(nd_zip)) figsize=(20 5))<line_sep>results=[]<for_stmt>j,(n d) enumerate(nd_zip)<block_start>client_data_list=[dme_utils.generate_client_data(d n l2_norm=clip)<for>_ range(repeat)]<line_sep>beta=np.exp(-0.5)<line_sep># Run experiment with repetition. rep_gauss_mse_list,rep_ddp_mse_list_per_bit=experiment_repeated(bits clip beta client_data_list repeat epsilons delta mechanism=FLAGS.mechanism k_stddevs=k_stddevs sqrtn_norm_growth=sqrtn_norm_growth)<line_sep># Generate some basic plots here. Use the saved results to generate plots # with custom style if needed. <if_stmt>FLAGS.show_plot<block_start>subplot=ax[j]<line_sep># Continuous Gaussian. plot_curve(subplot epsilons rep_gauss_mse_list label='Continuous Gaussian')<line_sep># Distributed DP. <for_stmt>index,bit enumerate(bits)<block_start>plot_curve(subplot epsilons rep_ddp_mse_list_per_bit[: index] label=f'{FLAGS.mechanism} (B = {bit})')<block_end>subplot.set(xlabel='Epsilon' ylabel='MSE')<line_sep>subplot.set_title(f'(n={n}, d={d}, k={k_stddevs})')<line_sep>subplot.set_yscale('log'<if>use_log<else>'linear')<line_sep>subplot.legend()<block_end>result_dic={'n':n 'd':d 'rep':repeat 'c':clip 'bits':bits 'k_stddevs':k_stddevs 'epsilons':epsilons 'mechanism':FLAGS.mechanism 'sqrtn_norm_growth':sqrtn_norm_growth 'gauss':rep_gauss_mse_list FLAGS.mechanism:rep_ddp_mse_list_per_bit }<line_sep>results.append(result_dic)<if_stmt>FLAGS.print_output<block_start>print(f'n={n}, d={d}:')<line_sep>pprint.pprint(result_dic)<block_end><block_end># Save to file. fname=f'rp={repeat},rid={FLAGS.run_id}.txt'<line_sep>fname=fname.replace(' ' '')<line_sep>result_str=pprint.pformat(results)<line_sep>dirname=os.path.join(FLAGS.output_dir FLAGS.tag)<if_stmt><not>os.path.exists(dirname)<block_start>os.makedirs(dirname)<block_end>out_path=os.path.join(dirname fname)<with_stmt>open(out_path 'w')<as>f<block_start>f.write(result_str)<block_end>print('Results saved to' out_path)<if_stmt>FLAGS.print_output<block_start>print('*'<times>80)<line_sep>print(fname)<line_sep>print('*'<times>10+'Results (copy and `eval()` in Python):')<line_sep>print(result_str)<line_sep>print('*'<times>80)<line_sep>print('Copy the above results and `eval()` them as a string in Python.')<block_end><if_stmt>FLAGS.show_plot<block_start>plt.show()<block_end>print(f'Run {FLAGS.run_id} done.')<block_end><if_stmt>__name__<eq>'__main__'<block_start>app.run(main)<block_end>
<class_stmt>Solution<block_start><def_stmt>smallerNumbersThanCurrent self nums:List[int]<arrow>List[int]<block_start>ans=[]<for_stmt>i range(0 len(nums))<block_start>soln=0<for_stmt>j range(0 len(nums))<block_start><if_stmt>(nums[j]<l>nums[i]<and>j<ne>i)<block_start>soln<augadd>1<block_end><block_end>ans.append(soln)<block_end><return>ans<block_end><block_end>
<class_stmt>DataSet<block_start><def_stmt>__init__ self *datas<block_start>self.datas=list(datas)<block_end><def_stmt>__len__ self<block_start><return>len(self.datas[0])<block_end><def_stmt>__getitem__ self item<block_start>ret_list=[]<for_stmt>data self.datas<block_start>ret_list.append(data[item])<block_end><return>ret_list<block_end><block_end>
# Copyright (c) OpenMMLab. All rights reserved. <import_from_stmt>.dist_utils DistOptimizerHook all_reduce_dict allreduce_grads reduce_mean <import_from_stmt>.misc center_of_mass flip_tensor generate_coordinate mask2ndarray multi_apply unmap <line_sep>__all__=['allreduce_grads' 'DistOptimizerHook' 'reduce_mean' 'multi_apply' 'unmap' 'mask2ndarray' 'flip_tensor' 'all_reduce_dict' 'center_of_mass' 'generate_coordinate']<line_sep>
<import_from_stmt>seldon_core.version __version__<import_from_stmt>.storage Storage<line_sep>
# -*- coding: utf-8 -*- # Define your item pipelines here # # Don't forget to add your pipeline to the ITEM_PIPELINES setting # See: http://doc.scrapy.org/en/latest/topics/item-pipeline.html <import_stmt>pymongo<import_from_stmt>scrapy.conf settings<class_stmt>BaiduPipeline(object)<block_start><def_stmt>__init__ self<block_start>host=settings['MONGODB_HOST']<line_sep>port=settings['MONGODB_PORT']<line_sep>db_name=settings['MONGODB_DBNAME']<line_sep>client=pymongo.MongoClient(host=host port=port)<line_sep>db=client[db_name]<line_sep>self.post=db[settings['MONGODB_DOCNAME']]<block_end><def_stmt>process_item self item spider<block_start>person_info=dict(item)<line_sep>self.post.insert(person_info)<line_sep><return>item<block_end><block_end>
<import_stmt>importlib<import_stmt>json<import_stmt>logging<import_stmt>os<import_stmt>subprocess<import_from_stmt>typing Any Dict NamedTuple Tuple<import_from_stmt>django.apps AppConfig<import_from_stmt>django.conf settings<import_from_stmt>. definitions_registry extract_views_from_urlpatterns global_types template_registry type_registry value_registry <import_from_stmt>.serialization create_schema<line_sep>logger=logging.getLogger("django.server")<def_stmt>get_urls_schema <arrow>Dict[str Any]<block_start>urlconf=importlib.import_module(settings.ROOT_URLCONF)<line_sep>urlpatterns=urlconf.urlpatterns# type: ignore[attr-defined] <import_from_stmt>django.urls converters<import_from_stmt>django.urls.resolvers RoutePattern<line_sep>converter_mapping={converters.IntConverter:"number" converters.StringConverter:"string" converters.UUIDConverter:"string" converters.SlugConverter:"string" converters.PathConverter:"string" }<line_sep>urls=extract_views_from_urlpatterns(urlpatterns)# type: ignore[no-untyped-call] reverse={}<for_stmt>_,regex,name,pattern urls<block_start><if_stmt><not>isinstance(pattern RoutePattern)<block_start><continue><block_end>reverse[name<or>regex]={"route":f"/{regex}" "args":{arg_name:converter_mapping.get(arg_converter.__class__ "string")<for>arg_name,arg_converter pattern.converters.items()} }<block_end><return>reverse<block_end><def_stmt>get_types_schema <arrow>Any<block_start>""" The package json-schema-to-typescript does expose a way to automatically export any interface it sees. However, this can bloat our generated files. Instead, while creating the schema, we occasionally run into types that we want available globally but are not directly referenced by templates. These aren't exported by `json-schem-to-typescript` because they're referenced using `tsType`, so the libraary is unaware of their usage. So we register them in `globals` and force `json-schema-to-typescript` to expose them. We can't just add these types to the `type_registry` because that's only parsed once when generating the parent tuple. We could explore doing two passes in the future. See `unreachableDefinitions` in json-schema-to-typescript """<line_sep>type_registry["globals"]=Any# type: ignore[assignment] context_processors=[]<import_from_stmt>.serialization.context_processors create_context_processor_type<for_stmt>engine settings.TEMPLATES<block_start><if_stmt>engine["BACKEND"]<eq>"reactivated.backend.JSX"<block_start>context_processors.extend(engine["OPTIONS"]["context_processors"])<block_end><block_end># type: ignore[index] type_registry["Context"]=create_context_processor_type(context_processors)<line_sep>ParentTuple=NamedTuple("ParentTuple" type_registry.items())# type: ignore[misc] parent_schema,definitions=create_schema(ParentTuple definitions_registry)<line_sep>definitions_registry.update(definitions)<line_sep><return>{"definitions":definitions **{**definitions["reactivated.apps.ParentTuple"] "properties":{**definitions["reactivated.apps.ParentTuple"]["properties"] "globals":{"type":"object" "additionalProperties":<false> "required":list(global_types.keys()) "properties":global_types } } } }<block_end><def_stmt>get_templates <arrow>Dict[str Tuple[Any]]<block_start><return>template_registry<block_end><def_stmt>get_values <arrow>Dict[str Any]<block_start><return>value_registry<block_end><def_stmt>get_schema <arrow>str<block_start>schema={"urls":get_urls_schema() "templates":get_templates() "types":get_types_schema() "values":get_values() }<line_sep><return>json.dumps(schema indent=4)<block_end><class_stmt>ReactivatedConfig(AppConfig)<block_start>name="reactivated"<def_stmt>ready self<arrow><none><block_start>""" Django's dev server actually starts twice. So we prevent generation on the first start. TODO: handle noreload. """<line_sep>schema=get_schema()<if_stmt>(os.environ.get("WERKZEUG_RUN_MAIN")<eq>"true"<or>os.environ.get("RUN_MAIN")<eq>"true")# Triggers for the subprocess of the dev server after restarts or initial start. <block_start><pass><block_end>is_server_started="DJANGO_SEVER_STARTING"<in>os.environ<if_stmt>is_server_started<is><false><block_start>os.environ["DJANGO_SEVER_STARTING"]="true"<line_sep><return><block_end>generate_schema(schema)<block_end><block_end><def_stmt>generate_schema schema:str skip_cache:bool=<false><arrow><none><block_start>""" For development usage only, this requires Node and Python installed You can use this function for your E2E test prep. """<line_sep>logger.info("Generating interfaces and client side code")<line_sep>encoded_schema=schema.encode()<import_stmt>hashlib<line_sep>digest=hashlib.sha1(encoded_schema).hexdigest().encode()<if_stmt>skip_cache<is><false><and>os.path.exists("client/generated/index.tsx")<block_start><with_stmt>open("client/generated/index.tsx" "r+b")<as>existing<block_start>already_generated=existing.read()<if_stmt>digest<in>already_generated<block_start>logger.info("Skipping generation as nothing has changed")<line_sep><return><block_end><block_end><block_end>#: Note that we don't pass the file object to stdout, because otherwise # webpack gets confused with the half-written file when we make updates. # Maybe there's a way to force it to be a single atomic write? I tried # open('w+b', buffering=0) but no luck. process=subprocess.Popen(["node" "./node_modules/reactivated/generator.js"] stdout=subprocess.PIPE stdin=subprocess.PIPE )<line_sep>out,error=process.communicate(encoded_schema)<line_sep>os.makedirs("client/generated" exist_ok=<true>)<with_stmt>open("client/generated/index.tsx" "w+b")<as>output<block_start>output.write(b"// Digest: %s\n"%digest)<line_sep>output.write(out)<line_sep>logger.info("Finished generating.")<block_end><block_end>
<import_from_stmt>typing Any Callable Optional Sequence Set Tuple<def_stmt>foo a:Any b:Callable[[] Tuple[int int str]] c:Set[str] d:Optional[Sequence[int]]=<none> e:Any=<none> <arrow><none><block_start><pass><block_end>print("Hello world")<line_sep>foo(a=1 b=<lambda>:(1 2 "hoge") c=set() d=<none> e=<none>)<line_sep>
<import_from_stmt>.corner_pool CornerPool<line_sep>__all__=['CornerPool']<line_sep>
<import_from_stmt>.coverage_reader CoverageReader<line_sep>
<import_stmt>torch<import_stmt>torch.autograd<as>autograd<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<import_stmt>numpy<as>np<import_from_stmt>.encoder BiLstmEncoder<import_from_stmt>.classifier AttClassifier<import_from_stmt>torch.autograd Variable<import_from_stmt>torch.nn functional init<class_stmt>MGLattice_model(nn.Module)<block_start><def_stmt>__init__ self data<block_start>super(MGLattice_model self).__init__()<line_sep># MG-Lattice encoder self.encoder=BiLstmEncoder(data)<line_sep># Attentive classifier self.classifier=AttClassifier(data)<block_end><def_stmt>forward self gaz_list word_inputs biword_inputs word_seq_lengths char_inputs char_seq_lengths char_seq_recover pos1_inputs pos2_inputs ins_label scope# ins_num * seq_len * hidden_dim <block_start>hidden_out=self.encoder.get_seq_features(gaz_list word_inputs biword_inputs word_seq_lengths char_inputs char_seq_lengths char_seq_recover pos1_inputs pos2_inputs)<line_sep># batch_size * num_classes logit=self.classifier.get_logit(hidden_out ins_label scope)<line_sep><return>logit<block_end><block_end>
<import_from_stmt>uuid uuid4<import_stmt>sqlite3<import_stmt>json<import_stmt>pandas<as>pd<import_from_stmt>sklearn_evaluation.table Table<class_stmt>SQLiteTracker<block_start>"""A simple experiment tracker using SQLite :doc:`Click here <../user_guide/SQLiteTracker>` to see the user guide. Parameters ---------- path Database location """<def_stmt>__init__ self path:str<block_start>self.conn=sqlite3.connect(path)<line_sep>cur=self.conn.cursor()<line_sep>cur.execute(""" CREATE TABLE IF NOT EXISTS experiments ( uuid TEXT NOT NULL UNIQUE, created TIMESTAMP default current_timestamp, parameters TEXT, comment TEXT ) """)<line_sep>cur.close()<block_end><def_stmt>__getitem__ self uuid<block_start>"""Get experiment with a given uuid """<line_sep># TODO: make it work for a list of uuids <return>pd.read_sql('SELECT * FROM experiments WHERE uuid = ?' self.conn params=[uuid] index_col='uuid')<block_end><def_stmt>recent self n=5 normalize=<false><block_start>"""Get most recent experiments as a pandas.DataFrame """<line_sep>query=""" SELECT uuid, created, parameters, comment FROM experiments ORDER BY created DESC LIMIT ? """<line_sep>df=pd.read_sql(query self.conn params=[n] index_col='uuid')<if_stmt>normalize# parse and normalize json <block_start>parameters=pd.json_normalize(df.pop('parameters').apply(<lambda>s:json.loads(s))).set_index(df.index)<line_sep>df=df.join(parameters)<line_sep># re order columns to show "comment" at the end comment=df.pop('comment')<line_sep>df.insert(len(df.columns) 'comment' comment)<block_end><return>df<block_end><def_stmt>query self code<block_start>"""Query the database, returns a pandas.DataFrame Examples -------- >>> from sklearn_evaluation import SQLiteTracker >>> tracker = SQLiteTracker(':memory:') # example in-memory db >>> tracker.insert('my_uuid', {'a': 1}) >>> df = tracker.query( ... "SELECT uuid, json_extract(parameters, '$.a') FROM experiments") """<line_sep>df=pd.read_sql(code self.conn)<if_stmt>'uuid'<in>df<block_start>df=df.set_index('uuid')<block_end><return>df<block_end><def_stmt>new self<block_start>"""Create a new experiment, returns a uuid """<line_sep>uuid=uuid4().hex<line_sep>cur=self.conn.cursor()<line_sep>cur.execute(""" INSERT INTO experiments (uuid) VALUES(?) """ [uuid])<line_sep>cur.close()<line_sep>self.conn.commit()<line_sep><return>uuid<block_end><def_stmt>update self uuid parameters<block_start>"""Update the parameters of an empty experiment given its uuid """<line_sep>self._can_update(uuid)<line_sep>cur=self.conn.cursor()<line_sep>cur.execute(""" UPDATE experiments SET parameters = ? WHERE uuid = ? """ [json.dumps(parameters) uuid])<line_sep>cur.close()<line_sep>self.conn.commit()<block_end><def_stmt>insert self uuid parameters<block_start>"""Insert a new experiment """<line_sep>cur=self.conn.cursor()<line_sep>cur.execute(""" INSERT INTO experiments (uuid, parameters) VALUES(?, ?) """ [uuid json.dumps(parameters)])<line_sep>cur.close()<line_sep>self.conn.commit()<block_end><def_stmt>comment self uuid comment<block_start>"""Add a comment to an experiment given its uuid """<line_sep># TODO: add overwrite (false by default) and append options cur=self.conn.cursor()<line_sep>cur.execute(""" UPDATE experiments SET comment = ? WHERE uuid = ? """ [comment uuid])<line_sep>cur.close()<line_sep>self.conn.commit()<block_end><def_stmt>_recent self n=5 fmt='html'<block_start><if_stmt>fmt<not><in>{'html' 'plain'}<block_start><raise>ValueError('fmt must be one "html" or "plain"')<block_end>cur=self.conn.cursor()<line_sep>cur.execute(""" SELECT uuid, created, parameters, comment FROM experiments ORDER BY created DESC LIMIT ? """ [n])<line_sep>res=cur.fetchall()<line_sep>table=Table(res header=['uuid' 'created' 'parameters' 'comment'])<line_sep>title_template='<h4> {} </h4>'<if>fmt<eq>'html'<else>'{}\n'<line_sep>title=title_template.format(type(self).__name__)<if_stmt><not>len(table)<block_start>title<augadd>'(No experiments saved yet)'<if_stmt>fmt<eq>'plain'<block_start>title<augadd>'\n'<block_end><block_end><if_stmt>len(table)<block_start>footer=(('<br>'<if>fmt<eq>'html'<else>'\n')+'(Most recent experiments)')<block_end><else_stmt><block_start>footer=''<block_end><return>(title+(table.to_html()<if>fmt<eq>'html'<else>str(table))+footer)<block_end><def_stmt>_can_update self uuid<block_start>"""Check if an experiment with a given uuid can be updated """<line_sep>cur=self.conn.cursor()<line_sep>cur.execute(""" SELECT parameters FROM experiments WHERE uuid = ? """ [uuid])<line_sep>row=cur.fetchone()<line_sep>exists=row<is><not><none><if_stmt>exists<block_start>empty=row[0]<is><none><if_stmt><not>empty<block_start><raise>ValueError('Cannot update non-empty experiment with '<concat>'uuid "{}"'.format(uuid))<block_end><block_end><else_stmt><block_start><raise>ValueError('Cannot update experiment with '<concat>'uuid "{}" because it does '<concat>'not exist'.format(uuid))<block_end><block_end><def_stmt>__repr__ self<block_start><return>self._recent(fmt='plain')<block_end><def_stmt>_repr_html_ self<block_start><return>self._recent(fmt='html')<block_end><def_stmt>__del__ self<block_start>self.conn.close()<block_end><block_end>
<import_stmt>torch<import_stmt>numpy<as>np<import_stmt>scipy.io<import_stmt>h5py<import_stmt>sklearn.metrics<import_from_stmt>torch_geometric.data Data<import_stmt>torch.nn<as>nn<import_from_stmt>scipy.ndimage gaussian_filter<line_sep>################################################# # # Utilities # ################################################# device=torch.device('cuda'<if>torch.cuda.is_available()<else>'cpu')<line_sep># reading data <class_stmt>MatReader(object)<block_start><def_stmt>__init__ self file_path to_torch=<true> to_cuda=<false> to_float=<true><block_start>super(MatReader self).__init__()<line_sep>self.to_torch=to_torch<line_sep>self.to_cuda=to_cuda<line_sep>self.to_float=to_float<line_sep>self.file_path=file_path<line_sep>self.data=<none><line_sep>self.old_mat=<none><line_sep>self._load_file()<block_end><def_stmt>_load_file self<block_start><try_stmt><block_start>self.data=scipy.io.loadmat(self.file_path)<line_sep>self.old_mat=<true><block_end><except_stmt><block_start>self.data=h5py.File(self.file_path)<line_sep>self.old_mat=<false><block_end><block_end><def_stmt>load_file self file_path<block_start>self.file_path=file_path<line_sep>self._load_file()<block_end><def_stmt>read_field self field<block_start>x=self.data[field]<if_stmt><not>self.old_mat<block_start>x=x[()]<line_sep>x=np.transpose(x axes=range(len(x.shape)-1 -1 -1))<block_end><if_stmt>self.to_float<block_start>x=x.astype(np.float32)<block_end><if_stmt>self.to_torch<block_start>x=torch.from_numpy(x)<if_stmt>self.to_cuda<block_start>x=x.cuda()<block_end><block_end><return>x<block_end><def_stmt>set_cuda self to_cuda<block_start>self.to_cuda=to_cuda<block_end><def_stmt>set_torch self to_torch<block_start>self.to_torch=to_torch<block_end><def_stmt>set_float self to_float<block_start>self.to_float=to_float<block_end><block_end># normalization, pointwise gaussian <class_stmt>UnitGaussianNormalizer(object)<block_start><def_stmt>__init__ self x eps=0.00001<block_start>super(UnitGaussianNormalizer self).__init__()<line_sep># x could be in shape of ntrain*n or ntrain*T*n or ntrain*n*T self.mean=torch.mean(x 0)<line_sep>self.std=torch.std(x 0)<line_sep>self.eps=eps<block_end><def_stmt>encode self x<block_start>x=(x-self.mean)/(self.std+self.eps)<line_sep><return>x<block_end><def_stmt>decode self x sample_idx=<none><block_start><if_stmt>sample_idx<is><none><block_start>std=self.std+self.eps# n mean=self.mean<block_end><else_stmt><block_start><if_stmt>len(self.mean.shape)<eq>len(sample_idx[0].shape)<block_start>std=self.std[sample_idx]+self.eps# batch*n mean=self.mean[sample_idx]<block_end><if_stmt>len(self.mean.shape)<g>len(sample_idx[0].shape)<block_start>std=self.std[: sample_idx]+self.eps# T*batch*n mean=self.mean[: sample_idx]<block_end><block_end># x is in shape of batch*n or T*batch*n x=(x<times>std)+mean<line_sep><return>x<block_end><def_stmt>cuda self<block_start>self.mean=self.mean.cuda()<line_sep>self.std=self.std.cuda()<block_end><def_stmt>cpu self<block_start>self.mean=self.mean.cpu()<line_sep>self.std=self.std.cpu()<block_end><block_end># normalization, Gaussian <class_stmt>GaussianNormalizer(object)<block_start><def_stmt>__init__ self x eps=0.00001<block_start>super(GaussianNormalizer self).__init__()<line_sep>self.mean=torch.mean(x)<line_sep>self.std=torch.std(x)<line_sep>self.eps=eps<block_end><def_stmt>encode self x<block_start>x=(x-self.mean)/(self.std+self.eps)<line_sep><return>x<block_end><def_stmt>decode self x sample_idx=<none><block_start>x=(x<times>(self.std+self.eps))+self.mean<line_sep><return>x<block_end><def_stmt>cuda self<block_start>self.mean=self.mean.cuda()<line_sep>self.std=self.std.cuda()<block_end><def_stmt>cpu self<block_start>self.mean=self.mean.cpu()<line_sep>self.std=self.std.cpu()<block_end><block_end># normalization, scaling by range <class_stmt>RangeNormalizer(object)<block_start><def_stmt>__init__ self x low=0.0 high=1.0<block_start>super(RangeNormalizer self).__init__()<line_sep>mymin=torch.min(x 0)[0].view(-1)<line_sep>mymax=torch.max(x 0)[0].view(-1)<line_sep>self.a=(high-low)/(mymax-mymin)<line_sep>self.b=-self.a<times>mymax+high<block_end><def_stmt>encode self x<block_start>s=x.size()<line_sep>x=x.view(s[0] -1)<line_sep>x=self.a<times>x+self.b<line_sep>x=x.view(s)<line_sep><return>x<block_end><def_stmt>decode self x<block_start>s=x.size()<line_sep>x=x.view(s[0] -1)<line_sep>x=(x-self.b)/self.a<line_sep>x=x.view(s)<line_sep><return>x<block_end><block_end>#loss function with rel/abs Lp loss <class_stmt>LpLoss(object)<block_start><def_stmt>__init__ self d=2 p=2 size_average=<true> reduction=<true><block_start>super(LpLoss self).__init__()<line_sep>#Dimension and Lp-norm type are postive <assert_stmt>d<g>0<and>p<g>0<line_sep>self.d=d<line_sep>self.p=p<line_sep>self.reduction=reduction<line_sep>self.size_average=size_average<block_end><def_stmt>abs self x y<block_start>num_examples=x.size()[0]<line_sep>#Assume uniform mesh h=1.0/(x.size()[1]-1.0)<line_sep>all_norms=(h<power>(self.d/self.p))<times>torch.norm(x.view(num_examples -1)-y.view(num_examples -1) self.p 1)<if_stmt>self.reduction<block_start><if_stmt>self.size_average<block_start><return>torch.mean(all_norms)<block_end><else_stmt><block_start><return>torch.sum(all_norms)<block_end><block_end><return>all_norms<block_end><def_stmt>rel self x y<block_start>num_examples=x.size()[0]<line_sep>diff_norms=torch.norm(x.reshape(num_examples -1)-y.reshape(num_examples -1) self.p 1)<line_sep>y_norms=torch.norm(y.reshape(num_examples -1) self.p 1)<if_stmt>self.reduction<block_start><if_stmt>self.size_average<block_start><return>torch.mean(diff_norms/y_norms)<block_end><else_stmt><block_start><return>torch.sum(diff_norms/y_norms)<block_end><block_end><return>diff_norms/y_norms<block_end><def_stmt>__call__ self x y<block_start><return>self.rel(x y)<block_end><block_end># A simple feedforward neural network <class_stmt>DenseNet(torch.nn.Module)<block_start><def_stmt>__init__ self layers nonlinearity out_nonlinearity=<none> normalize=<false><block_start>super(DenseNet self).__init__()<line_sep>self.n_layers=len(layers)-1<assert_stmt>self.n_layers<ge>1<line_sep>self.layers=nn.ModuleList()<for_stmt>j range(self.n_layers)<block_start>self.layers.append(nn.Linear(layers[j] layers[j+1]))<if_stmt>j<ne>self.n_layers-1<block_start><if_stmt>normalize<block_start>self.layers.append(nn.BatchNorm1d(layers[j+1]))<block_end>self.layers.append(nonlinearity())<block_end><block_end><if_stmt>out_nonlinearity<is><not><none><block_start>self.layers.append(out_nonlinearity())<block_end><block_end><def_stmt>forward self x<block_start><for_stmt>_,l enumerate(self.layers)<block_start>x=l(x)<block_end><return>x<block_end><block_end><class_stmt>DenseNet_sin(torch.nn.Module)<block_start><def_stmt>__init__ self layers nonlinearity out_nonlinearity=<none> normalize=<false><block_start>super(DenseNet_sin self).__init__()<line_sep>self.n_layers=len(layers)-1<assert_stmt>self.n_layers<ge>1<line_sep>self.layers=nn.ModuleList()<for_stmt>j range(self.n_layers)<block_start>self.layers.append(nn.Linear(layers[j] layers[j+1]))<block_end><block_end><def_stmt>forward self x<block_start><for_stmt>j,l enumerate(self.layers)<block_start>x=l(x)<if_stmt>j<ne>self.n_layers-1<block_start>x=torch.sin(x)<block_end><block_end><return>x<block_end><block_end># generate graphs on square domain <class_stmt>SquareMeshGenerator(object)<block_start><def_stmt>__init__ self real_space mesh_size<block_start>super(SquareMeshGenerator self).__init__()<line_sep>self.d=len(real_space)<line_sep>self.s=mesh_size[0]<assert_stmt>len(mesh_size)<eq>self.d<if_stmt>self.d<eq>1<block_start>self.n=mesh_size[0]<line_sep>self.grid=np.linspace(real_space[0][0] real_space[0][1] self.n).reshape((self.n 1))<block_end><else_stmt><block_start>self.n=1<line_sep>grids=[]<for_stmt>j range(self.d)<block_start>grids.append(np.linspace(real_space[j][0] real_space[j][1] mesh_size[j]))<line_sep>self.n<augmul>mesh_size[j]<block_end>self.grid=np.vstack([xx.ravel()<for>xx np.meshgrid(*grids)]).T<block_end><block_end><def_stmt>ball_connectivity self r<block_start>pwd=sklearn.metrics.pairwise_distances(self.grid)<line_sep>self.edge_index=np.vstack(np.where(pwd<le>r))<line_sep>self.n_edges=self.edge_index.shape[1]<line_sep><return>torch.tensor(self.edge_index dtype=torch.long)<block_end><def_stmt>gaussian_connectivity self sigma<block_start>pwd=sklearn.metrics.pairwise_distances(self.grid)<line_sep>rbf=np.exp(-pwd<power>2/sigma<power>2)<line_sep>sample=np.random.binomial(1 rbf)<line_sep>self.edge_index=np.vstack(np.where(sample))<line_sep>self.n_edges=self.edge_index.shape[1]<line_sep><return>torch.tensor(self.edge_index dtype=torch.long)<block_end><def_stmt>get_grid self<block_start><return>torch.tensor(self.grid dtype=torch.float)<block_end><def_stmt>attributes self f=<none> theta=<none><block_start><if_stmt>f<is><none><block_start><if_stmt>theta<is><none><block_start>edge_attr=self.grid[self.edge_index.T].reshape((self.n_edges -1))<block_end><else_stmt><block_start>edge_attr=np.zeros((self.n_edges 2<times>self.d+2))<line_sep>edge_attr[: 0:2<times>self.d]=self.grid[self.edge_index.T].reshape((self.n_edges -1))<line_sep>edge_attr[: 2<times>self.d]=theta[self.edge_index[0]]<line_sep>edge_attr[: 2<times>self.d+1]=theta[self.edge_index[1]]<block_end><block_end><else_stmt><block_start>xy=self.grid[self.edge_index.T].reshape((self.n_edges -1))<if_stmt>theta<is><none><block_start>edge_attr=f(xy[: 0:self.d] xy[: self.d:])<block_end><else_stmt><block_start>edge_attr=f(xy[: 0:self.d] xy[: self.d:] theta[self.edge_index[0]] theta[self.edge_index[1]])<block_end><block_end><return>torch.tensor(edge_attr dtype=torch.float)<block_end><def_stmt>get_boundary self<block_start>s=self.s<line_sep>n=self.n<line_sep>boundary1=np.array(range(0 s))<line_sep>boundary2=np.array(range(n-s n))<line_sep>boundary3=np.array(range(s n s))<line_sep>boundary4=np.array(range(2<times>s-1 n s))<line_sep>self.boundary=np.concatenate([boundary1 boundary2 boundary3 boundary4])<block_end><def_stmt>boundary_connectivity2d self stride=1<block_start>boundary=self.boundary[::stride]<line_sep>boundary_size=len(boundary)<line_sep>vertice1=np.array(range(self.n))<line_sep>vertice1=np.repeat(vertice1 boundary_size)<line_sep>vertice2=np.tile(boundary self.n)<line_sep>self.edge_index_boundary=np.stack([vertice2 vertice1] axis=0)<line_sep>self.n_edges_boundary=self.edge_index_boundary.shape[1]<line_sep><return>torch.tensor(self.edge_index_boundary dtype=torch.long)<block_end><def_stmt>attributes_boundary self f=<none> theta=<none># if self.edge_index_boundary == None: # self.boundary_connectivity2d() <block_start><if_stmt>f<is><none><block_start><if_stmt>theta<is><none><block_start>edge_attr_boundary=self.grid[self.edge_index_boundary.T].reshape((self.n_edges_boundary -1))<block_end><else_stmt><block_start>edge_attr_boundary=np.zeros((self.n_edges_boundary 2<times>self.d+2))<line_sep>edge_attr_boundary[: 0:2<times>self.d]=self.grid[self.edge_index_boundary.T].reshape((self.n_edges_boundary -1))<line_sep>edge_attr_boundary[: 2<times>self.d]=theta[self.edge_index_boundary[0]]<line_sep>edge_attr_boundary[: 2<times>self.d+1]=theta[self.edge_index_boundary[1]]<block_end><block_end><else_stmt><block_start>xy=self.grid[self.edge_index_boundary.T].reshape((self.n_edges_boundary -1))<if_stmt>theta<is><none><block_start>edge_attr_boundary=f(xy[: 0:self.d] xy[: self.d:])<block_end><else_stmt><block_start>edge_attr_boundary=f(xy[: 0:self.d] xy[: self.d:] theta[self.edge_index_boundary[0]] theta[self.edge_index_boundary[1]])<block_end><block_end><return>torch.tensor(edge_attr_boundary dtype=torch.float)<block_end><block_end># generate graphs with sampling <class_stmt>RandomMeshGenerator(object)<block_start><def_stmt>__init__ self real_space mesh_size sample_size attr_features=1<block_start>super(RandomMeshGenerator self).__init__()<line_sep>self.d=len(real_space)<line_sep>self.m=sample_size<line_sep>self.attr_features=attr_features<assert_stmt>len(mesh_size)<eq>self.d<if_stmt>self.d<eq>1<block_start>self.n=mesh_size[0]<line_sep>self.grid=np.linspace(real_space[0][0] real_space[0][1] self.n).reshape((self.n 1))<block_end><else_stmt><block_start>self.n=1<line_sep>grids=[]<for_stmt>j range(self.d)<block_start>grids.append(np.linspace(real_space[j][0] real_space[j][1] mesh_size[j]))<line_sep>self.n<augmul>mesh_size[j]<block_end>self.grid=np.vstack([xx.ravel()<for>xx np.meshgrid(*grids)]).T<block_end><if_stmt>self.m<g>self.n<block_start>self.m=self.n<block_end>self.idx=np.array(range(self.n))<line_sep>self.grid_sample=self.grid<block_end><def_stmt>sample self<block_start>perm=torch.randperm(self.n)<line_sep>self.idx=perm[:self.m]<line_sep>self.grid_sample=self.grid[self.idx]<line_sep><return>self.idx<block_end><def_stmt>get_grid self<block_start><return>torch.tensor(self.grid_sample dtype=torch.float)<block_end><def_stmt>ball_connectivity self r is_forward=<false><block_start>pwd=sklearn.metrics.pairwise_distances(self.grid_sample)<line_sep>self.edge_index=np.vstack(np.where(pwd<le>r))<line_sep>self.n_edges=self.edge_index.shape[1]<if_stmt>is_forward<block_start>print(self.edge_index.shape)<line_sep>self.edge_index=self.edge_index[: self.edge_index[0]<ge>self.edge_index[1]]<line_sep>print(self.edge_index.shape)<line_sep>self.n_edges=self.edge_index.shape[1]<block_end><return>torch.tensor(self.edge_index dtype=torch.long)<block_end><def_stmt>torus1d_connectivity self r<block_start>grid=self.grid_sample<line_sep>pwd0=sklearn.metrics.pairwise_distances(grid grid)<line_sep>grid1=grid<line_sep>grid1[: 0]=grid[: 0]+1<line_sep>pwd1=sklearn.metrics.pairwise_distances(grid grid1)<line_sep>PWD=np.stack([pwd0 pwd1] axis=2)<line_sep>pwd=np.min(PWD axis=2)<line_sep>self.edge_index=np.vstack(np.where(pwd<le>r))<line_sep>self.n_edges=self.edge_index.shape[1]<line_sep><return>torch.tensor(self.edge_index dtype=torch.long)<block_end><def_stmt>gaussian_connectivity self sigma<block_start>pwd=sklearn.metrics.pairwise_distances(self.grid_sample)<line_sep>rbf=np.exp(-pwd<power>2/sigma<power>2)<line_sep>sample=np.random.binomial(1 rbf)<line_sep>self.edge_index=np.vstack(np.where(sample))<line_sep>self.n_edges=self.edge_index.shape[1]<line_sep><return>torch.tensor(self.edge_index dtype=torch.long)<block_end><def_stmt>attributes self f=<none> theta=<none><block_start><if_stmt>f<is><none><block_start><if_stmt>theta<is><none><block_start>edge_attr=self.grid[self.edge_index.T].reshape((self.n_edges -1))<block_end><else_stmt><block_start>theta=theta[self.idx]<line_sep>edge_attr=np.zeros((self.n_edges 2<times>self.d+2<times>self.attr_features))<line_sep>edge_attr[: 0:2<times>self.d]=self.grid_sample[self.edge_index.T].reshape((self.n_edges -1))<line_sep>edge_attr[: 2<times>self.d:2<times>self.d+self.attr_features]=theta[self.edge_index[0]].view(-1 self.attr_features)<line_sep>edge_attr[: 2<times>self.d+self.attr_features:2<times>self.d+2<times>self.attr_features]=theta[self.edge_index[1]].view(-1 self.attr_features)<block_end><block_end><else_stmt><block_start>xy=self.grid_sample[self.edge_index.T].reshape((self.n_edges -1))<if_stmt>theta<is><none><block_start>edge_attr=f(xy[: 0:self.d] xy[: self.d:])<block_end><else_stmt><block_start>theta=theta[self.idx]<line_sep>edge_attr=f(xy[: 0:self.d] xy[: self.d:] theta[self.edge_index[0]] theta[self.edge_index[1]])<block_end><block_end><return>torch.tensor(edge_attr dtype=torch.float)<block_end><block_end># # generate two-level graph <class_stmt>RandomTwoMeshGenerator(object)<block_start><def_stmt>__init__ self real_space mesh_size sample_size induced_point<block_start>super(RandomTwoMeshGenerator self).__init__()<line_sep>self.d=len(real_space)<line_sep>self.m=sample_size<line_sep>self.m_i=induced_point<assert_stmt>len(mesh_size)<eq>self.d<if_stmt>self.d<eq>1<block_start>self.n=mesh_size[0]<line_sep>self.grid=np.linspace(real_space[0][0] real_space[0][1] self.n).reshape((self.n 1))<block_end><else_stmt><block_start>self.n=1<line_sep>grids=[]<for_stmt>j range(self.d)<block_start>grids.append(np.linspace(real_space[j][0] real_space[j][1] mesh_size[j]))<line_sep>self.n<augmul>mesh_size[j]<block_end>self.grid=np.vstack([xx.ravel()<for>xx np.meshgrid(*grids)]).T<block_end><if_stmt>self.m<g>self.n<block_start>self.m=self.n<block_end>self.idx=np.array(range(self.n))<line_sep>self.idx_i=self.idx<line_sep>self.idx_both=self.idx<line_sep>self.grid_sample=self.grid<line_sep>self.grid_sample_i=self.grid<line_sep>self.grid_sample_both=self.grid<block_end><def_stmt>sample self<block_start>perm=torch.randperm(self.n)<line_sep>self.idx=perm[:self.m]<line_sep>self.idx_i=perm[self.m:self.m+self.m_i]<line_sep>self.idx_both=perm[:self.m+self.m_i]<line_sep>self.grid_sample=self.grid[self.idx]<line_sep>self.grid_sample_i=self.grid[self.idx_i]<line_sep>self.grid_sample_both=self.grid[self.idx_both]<line_sep><return>self.idx self.idx_i self.idx_both<block_end><def_stmt>get_grid self<block_start><return>torch.tensor(self.grid_sample dtype=torch.float) torch.tensor(self.grid_sample_i dtype=torch.float) torch.tensor(self.grid_sample_both dtype=torch.float)<block_end><def_stmt>ball_connectivity self r11 r12 r22<block_start>pwd=sklearn.metrics.pairwise_distances(self.grid_sample)<line_sep>pwd12=sklearn.metrics.pairwise_distances(self.grid_sample self.grid_sample_i)<line_sep>pwd22=sklearn.metrics.pairwise_distances(self.grid_sample_i)<line_sep>self.edge_index=np.vstack(np.where(pwd<le>r11))<line_sep>self.edge_index_12=np.vstack(np.where(pwd12<le>r12))<line_sep>self.edge_index_12[1 :]=self.edge_index_12[1 :]+self.m<line_sep>self.edge_index_21=self.edge_index_12[[1 0] :]<line_sep>self.edge_index_22=np.vstack(np.where(pwd22<le>r22))+self.m<line_sep>self.n_edges=self.edge_index.shape[1]<line_sep>self.n_edges_12=self.edge_index_12.shape[1]<line_sep>self.n_edges_22=self.edge_index_22.shape[1]<line_sep><return>torch.tensor(self.edge_index dtype=torch.long) torch.tensor(self.edge_index_12 dtype=torch.long) torch.tensor(self.edge_index_21 dtype=torch.long) torch.tensor(self.edge_index_22 dtype=torch.long)<block_end><def_stmt>attributes self theta=<none><block_start><if_stmt>theta<is><none><block_start>edge_attr=self.grid_sample_both[self.edge_index.T].reshape((self.n_edges -1))<line_sep>edge_attr_12=self.grid_sample_both[self.edge_index_12.T].reshape((self.n_edges_12 -1))<line_sep>edge_attr_21=self.grid_sample_both[self.edge_index_21.T].reshape((self.n_edges_12 -1))<line_sep>edge_attr_22=self.grid_sample_both[self.edge_index_22.T].reshape((self.n_edges_22 -1))<block_end><else_stmt><block_start>theta=theta[self.idx_both]<line_sep>edge_attr=np.zeros((self.n_edges 3<times>self.d))<line_sep>edge_attr[: 0:2<times>self.d]=self.grid_sample_both[self.edge_index.T].reshape((self.n_edges -1))<line_sep>edge_attr[: 2<times>self.d]=theta[self.edge_index[0]]<line_sep>edge_attr[: 2<times>self.d+1]=theta[self.edge_index[1]]<line_sep>edge_attr_12=np.zeros((self.n_edges_12 3<times>self.d))<line_sep>edge_attr_12[: 0:2<times>self.d]=self.grid_sample_both[self.edge_index_12.T].reshape((self.n_edges_12 -1))<line_sep>edge_attr_12[: 2<times>self.d]=theta[self.edge_index_12[0]]<line_sep>edge_attr_12[: 2<times>self.d+1]=theta[self.edge_index_12[1]]<line_sep>edge_attr_21=np.zeros((self.n_edges_12 3<times>self.d))<line_sep>edge_attr_21[: 0:2<times>self.d]=self.grid_sample_both[self.edge_index_21.T].reshape((self.n_edges_12 -1))<line_sep>edge_attr_21[: 2<times>self.d]=theta[self.edge_index_21[0]]<line_sep>edge_attr_21[: 2<times>self.d+1]=theta[self.edge_index_21[1]]<line_sep>edge_attr_22=np.zeros((self.n_edges_22 3<times>self.d))<line_sep>edge_attr_22[: 0:2<times>self.d]=self.grid_sample_both[self.edge_index_22.T].reshape((self.n_edges_22 -1))<line_sep>edge_attr_22[: 2<times>self.d]=theta[self.edge_index_22[0]]<line_sep>edge_attr_22[: 2<times>self.d+1]=theta[self.edge_index_22[1]]<block_end><return>torch.tensor(edge_attr dtype=torch.float) torch.tensor(edge_attr_12 dtype=torch.float) torch.tensor(edge_attr_21 dtype=torch.float) torch.tensor(edge_attr_22 dtype=torch.float)<block_end><block_end># generate multi-level graph <class_stmt>RandomMultiMeshGenerator(object)<block_start><def_stmt>__init__ self real_space mesh_size level sample_sizes<block_start>super(RandomMultiMeshGenerator self).__init__()<line_sep>self.d=len(real_space)<line_sep>self.m=sample_sizes<line_sep>self.level=level<assert_stmt>len(sample_sizes)<eq>level<assert_stmt>len(mesh_size)<eq>self.d<if_stmt>self.d<eq>1<block_start>self.n=mesh_size[0]<line_sep>self.grid=np.linspace(real_space[0][0] real_space[0][1] self.n).reshape((self.n 1))<block_end><else_stmt><block_start>self.n=1<line_sep>grids=[]<for_stmt>j range(self.d)<block_start>grids.append(np.linspace(real_space[j][0] real_space[j][1] mesh_size[j]))<line_sep>self.n<augmul>mesh_size[j]<block_end>self.grid=np.vstack([xx.ravel()<for>xx np.meshgrid(*grids)]).T<block_end>self.idx=[]<line_sep>self.idx_all=<none><line_sep>self.grid_sample=[]<line_sep>self.grid_sample_all=<none><line_sep>self.edge_index=[]<line_sep>self.edge_index_down=[]<line_sep>self.edge_index_up=[]<line_sep>self.edge_attr=[]<line_sep>self.edge_attr_down=[]<line_sep>self.edge_attr_up=[]<line_sep>self.n_edges_inner=[]<line_sep>self.n_edges_inter=[]<block_end><def_stmt>sample self<block_start>self.idx=[]<line_sep>self.grid_sample=[]<line_sep>perm=torch.randperm(self.n)<line_sep>index=0<for_stmt>l range(self.level)<block_start>self.idx.append(perm[index:index+self.m[l]])<line_sep>self.grid_sample.append(self.grid[self.idx[l]])<line_sep>index=index+self.m[l]<block_end>self.idx_all=perm[:index]<line_sep>self.grid_sample_all=self.grid[self.idx_all]<line_sep><return>self.idx self.idx_all<block_end><def_stmt>get_grid self<block_start>grid_out=[]<for_stmt>grid self.grid_sample<block_start>grid_out.append(torch.tensor(grid dtype=torch.float))<block_end><return>grid_out torch.tensor(self.grid_sample_all dtype=torch.float)<block_end><def_stmt>ball_connectivity self radius_inner radius_inter<block_start><assert_stmt>len(radius_inner)<eq>self.level<assert_stmt>len(radius_inter)<eq>self.level-1<line_sep>self.edge_index=[]<line_sep>self.edge_index_down=[]<line_sep>self.edge_index_up=[]<line_sep>self.n_edges_inner=[]<line_sep>self.n_edges_inter=[]<line_sep>edge_index_out=[]<line_sep>edge_index_down_out=[]<line_sep>edge_index_up_out=[]<line_sep>index=0<for_stmt>l range(self.level)<block_start>pwd=sklearn.metrics.pairwise_distances(self.grid_sample[l])<line_sep>edge_index=np.vstack(np.where(pwd<le>radius_inner[l]))+index<line_sep>self.edge_index.append(edge_index)<line_sep>edge_index_out.append(torch.tensor(edge_index dtype=torch.long))<line_sep>self.n_edges_inner.append(edge_index.shape[1])<line_sep>index=index+self.grid_sample[l].shape[0]<block_end>index=0<for_stmt>l range(self.level-1)<block_start>pwd=sklearn.metrics.pairwise_distances(self.grid_sample[l] self.grid_sample[l+1])<line_sep>edge_index=np.vstack(np.where(pwd<le>radius_inter[l]))+index<line_sep>edge_index[1 :]=edge_index[1 :]+self.grid_sample[l].shape[0]<line_sep>self.edge_index_down.append(edge_index)<line_sep>edge_index_down_out.append(torch.tensor(edge_index dtype=torch.long))<line_sep>self.edge_index_up.append(edge_index[[1 0] :])<line_sep>edge_index_up_out.append(torch.tensor(edge_index[[1 0] :] dtype=torch.long))<line_sep>self.n_edges_inter.append(edge_index.shape[1])<line_sep>index=index+self.grid_sample[l].shape[0]<block_end>edge_index_out=torch.cat(edge_index_out dim=1)<line_sep>edge_index_down_out=torch.cat(edge_index_down_out dim=1)<line_sep>edge_index_up_out=torch.cat(edge_index_up_out dim=1)<line_sep><return>edge_index_out edge_index_down_out edge_index_up_out<block_end><def_stmt>get_edge_index_range self# in order to use graph network's data structure, # the edge index shall be stored as tensor instead of list # we concatenate the edge index list and label the range of each level <block_start>edge_index_range=torch.zeros((self.level 2) dtype=torch.long)<line_sep>edge_index_down_range=torch.zeros((self.level-1 2) dtype=torch.long)<line_sep>edge_index_up_range=torch.zeros((self.level-1 2) dtype=torch.long)<line_sep>n_edge_index=0<for_stmt>l range(self.level)<block_start>edge_index_range[l 0]=n_edge_index<line_sep>n_edge_index=n_edge_index+self.edge_index[l].shape[1]<line_sep>edge_index_range[l 1]=n_edge_index<block_end>n_edge_index=0<for_stmt>l range(self.level-1)<block_start>edge_index_down_range[l 0]=n_edge_index<line_sep>edge_index_up_range[l 0]=n_edge_index<line_sep>n_edge_index=n_edge_index+self.edge_index_down[l].shape[1]<line_sep>edge_index_down_range[l 1]=n_edge_index<line_sep>edge_index_up_range[l 1]=n_edge_index<block_end><return>edge_index_range edge_index_down_range edge_index_up_range<block_end><def_stmt>attributes self theta=<none><block_start>self.edge_attr=[]<line_sep>self.edge_attr_down=[]<line_sep>self.edge_attr_up=[]<if_stmt>theta<is><none><block_start><for_stmt>l range(self.level)<block_start>edge_attr=self.grid_sample_all[self.edge_index[l].T].reshape((self.n_edges_inner[l] 2<times>self.d))<line_sep>self.edge_attr.append(torch.tensor(edge_attr))<block_end><for_stmt>l range(self.level-1)<block_start>edge_attr_down=self.grid_sample_all[self.edge_index_down[l].T].reshape((self.n_edges_inter[l] 2<times>self.d))<line_sep>edge_attr_up=self.grid_sample_all[self.edge_index_up[l].T].reshape((self.n_edges_inter[l] 2<times>self.d))<line_sep>self.edge_attr_down.append(torch.tensor(edge_attr_down))<line_sep>self.edge_attr_up.append(torch.tensor(edge_attr_up))<block_end><block_end><else_stmt><block_start>theta=theta[self.idx_all]<for_stmt>l range(self.level)<block_start>edge_attr=np.zeros((self.n_edges_inner[l] 2<times>self.d+2))<line_sep>edge_attr[: 0:2<times>self.d]=self.grid_sample_all[self.edge_index[l].T].reshape((self.n_edges_inner[l] 2<times>self.d))<line_sep>edge_attr[: 2<times>self.d]=theta[self.edge_index[l][0]]<line_sep>edge_attr[: 2<times>self.d+1]=theta[self.edge_index[l][1]]<line_sep>self.edge_attr.append(torch.tensor(edge_attr dtype=torch.float))<block_end><for_stmt>l range(self.level-1)<block_start>edge_attr_down=np.zeros((self.n_edges_inter[l] 2<times>self.d+2))<line_sep>edge_attr_up=np.zeros((self.n_edges_inter[l] 2<times>self.d+2))<line_sep>edge_attr_down[: 0:2<times>self.d]=self.grid_sample_all[self.edge_index_down[l].T].reshape((self.n_edges_inter[l] 2<times>self.d))<line_sep>edge_attr_down[: 2<times>self.d]=theta[self.edge_index_down[l][0]]<line_sep>edge_attr_down[: 2<times>self.d+1]=theta[self.edge_index_down[l][1]]<line_sep>self.edge_attr_down.append(torch.tensor(edge_attr_down dtype=torch.float))<line_sep>edge_attr_up[: 0:2<times>self.d]=self.grid_sample_all[self.edge_index_up[l].T].reshape((self.n_edges_inter[l] 2<times>self.d))<line_sep>edge_attr_up[: 2<times>self.d]=theta[self.edge_index_up[l][0]]<line_sep>edge_attr_up[: 2<times>self.d+1]=theta[self.edge_index_up[l][1]]<line_sep>self.edge_attr_up.append(torch.tensor(edge_attr_up dtype=torch.float))<block_end><block_end>edge_attr_out=torch.cat(self.edge_attr dim=0)<line_sep>edge_attr_down_out=torch.cat(self.edge_attr_down dim=0)<line_sep>edge_attr_up_out=torch.cat(self.edge_attr_up dim=0)<line_sep><return>edge_attr_out edge_attr_down_out edge_attr_up_out<block_end><block_end># generate graph, with split and assemble <class_stmt>RandomGridSplitter(object)<block_start><def_stmt>__init__ self grid resolution d=2 m=200 l=1 radius=0.25<block_start>super(RandomGridSplitter self).__init__()<line_sep>self.grid=grid<line_sep>self.resolution=resolution<line_sep>self.n=resolution<power>d<line_sep>self.d=d<line_sep>self.m=m<line_sep>self.l=l<line_sep>self.radius=radius<assert_stmt>self.n%self.m<eq>0<line_sep>self.num=self.n<floordiv>self.m<block_end># number of sub-grid <def_stmt>get_data self theta edge_features=1<block_start>data=[]<for_stmt>i range(self.l)<block_start>perm=torch.randperm(self.n)<line_sep>perm=perm.reshape(self.num self.m)<for_stmt>j range(self.num)<block_start>idx=perm[j :].reshape(-1 )<line_sep>grid_sample=self.grid.reshape(self.n -1)[idx]<line_sep>theta_sample=theta.reshape(self.n -1)[idx]<line_sep>X=torch.cat([grid_sample theta_sample] dim=1)<line_sep>pwd=sklearn.metrics.pairwise_distances(grid_sample)<line_sep>edge_index=np.vstack(np.where(pwd<le>self.radius))<line_sep>n_edges=edge_index.shape[1]<line_sep>edge_index=torch.tensor(edge_index dtype=torch.long)<if_stmt>edge_features<eq>0<block_start>edge_attr=grid_sample[edge_index.T].reshape(n_edges -1)<block_end><else_stmt><block_start>edge_attr=np.zeros((n_edges 2<times>self.d+2))<line_sep>a=theta_sample[: 0]<line_sep>edge_attr[: :2<times>self.d]=grid_sample[edge_index.T].reshape(n_edges -1)<line_sep>edge_attr[: 2<times>self.d]=a[edge_index[0]]<line_sep>edge_attr[: 2<times>self.d+1]=a[edge_index[1]]<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<block_end>data.append(Data(x=X edge_index=edge_index edge_attr=edge_attr split_idx=idx))<block_end><block_end>print('test' len(data) X.shape edge_index.shape edge_attr.shape)<line_sep><return>data<block_end><def_stmt>assemble self pred split_idx batch_size2 sigma=1 cuda=<false><block_start><assert_stmt>len(pred)<eq>len(split_idx)<assert_stmt>len(pred)<eq>self.num<times>self.l<floordiv>batch_size2<line_sep>out=torch.zeros(self.n )<if_stmt>cuda<block_start>out=out.cuda()<block_end><for_stmt>i range(len(pred))<block_start>pred_i=pred[i].reshape(batch_size2 self.m)<line_sep>split_idx_i=split_idx[i].reshape(batch_size2 self.m)<for_stmt>j range(batch_size2)<block_start>pred_ij=pred_i[j :].reshape(-1 )<line_sep>idx=split_idx_i[j :].reshape(-1 )<line_sep>out[idx]=out[idx]+pred_ij<block_end><block_end>out=out/self.l<line_sep># out = gaussian_filter(out, sigma=sigma, mode='constant', cval=0) # out = torch.tensor(out, dtype=torch.float) <return>out.reshape(-1 )<block_end><block_end># generate multi-level graph, with split and assemble <class_stmt>RandomMultiMeshSplitter(object)<block_start><def_stmt>__init__ self real_space mesh_size level sample_sizes<block_start>super(RandomMultiMeshSplitter self).__init__()<line_sep>self.d=len(real_space)<line_sep>self.ms=sample_sizes<line_sep>self.m=sample_sizes[0]<line_sep>self.level=level<assert_stmt>len(sample_sizes)<eq>level<assert_stmt>len(mesh_size)<eq>self.d<if_stmt>self.d<eq>1<block_start>self.n=mesh_size[0]<line_sep>self.grid=np.linspace(real_space[0][0] real_space[0][1] self.n).reshape((self.n 1))<block_end><else_stmt><block_start>self.n=1<line_sep>grids=[]<for_stmt>j range(self.d)<block_start>grids.append(np.linspace(real_space[j][0] real_space[j][1] mesh_size[j]))<line_sep>self.n<augmul>mesh_size[j]<block_end>self.grid=np.vstack([xx.ravel()<for>xx np.meshgrid(*grids)]).T<block_end>self.splits=self.n<floordiv>self.m# number of sub-grid <if_stmt>self.splits<times>self.m<l>self.n<block_start>self.splits=self.splits+1<block_end>print('n:' self.n ' m:' self.m ' number of splits:' self.splits)<line_sep>self.perm=<none><line_sep>self.idx=[]<line_sep>self.idx_all=<none><line_sep>self.grid_sample=[]<line_sep>self.grid_sample_all=<none><line_sep>self.edge_index=[]<line_sep>self.edge_index_down=[]<line_sep>self.edge_index_up=[]<line_sep>self.edge_attr=[]<line_sep>self.edge_attr_down=[]<line_sep>self.edge_attr_up=[]<line_sep>self.n_edges_inner=[]<line_sep>self.n_edges_inter=[]<block_end><def_stmt>sample self new_sample=<true> index0=0<block_start>self.idx=[]<line_sep>self.grid_sample=[]<if_stmt>(new_sample)<or>(self.perm<is><none>)<block_start>self.perm=torch.randperm(self.n)<block_end>index=index0<for_stmt>l range(self.level)<block_start>index=index%self.n<line_sep>index_end=(index+self.ms[l])%self.n<if_stmt>index<l>index_end<block_start>idx=self.perm[index:index_end]<block_end><else_stmt><block_start>idx=torch.cat((self.perm[index:] self.perm[:index_end]) dim=0)<block_end>self.idx.append(idx)<line_sep>self.grid_sample.append(self.grid[idx])<line_sep>index=index_end<block_end><if_stmt>index0<l>index_end<block_start>idx_all=self.perm[index0:index_end]<block_end><else_stmt><block_start>idx_all=torch.cat((self.perm[index0:] self.perm[:index_end]) dim=0)<block_end>self.idx_all=idx_all<line_sep>self.grid_sample_all=self.grid[self.idx_all]<line_sep><return>self.idx self.idx_all<block_end><def_stmt>get_grid self<block_start>grid_out=[]<for_stmt>grid self.grid_sample<block_start>grid_out.append(torch.tensor(grid dtype=torch.float))<block_end><return>grid_out torch.tensor(self.grid_sample_all dtype=torch.float)<block_end><def_stmt>ball_connectivity self radius_inner radius_inter<block_start><assert_stmt>len(radius_inner)<eq>self.level<assert_stmt>len(radius_inter)<eq>self.level-1<line_sep>self.edge_index=[]<line_sep>self.edge_index_down=[]<line_sep>self.edge_index_up=[]<line_sep>self.n_edges_inner=[]<line_sep>self.n_edges_inter=[]<line_sep>edge_index_out=[]<line_sep>edge_index_down_out=[]<line_sep>edge_index_up_out=[]<line_sep>index=0<for_stmt>l range(self.level)<block_start>pwd=sklearn.metrics.pairwise_distances(self.grid_sample[l])<line_sep>edge_index=np.vstack(np.where(pwd<le>radius_inner[l]))+index<line_sep>self.edge_index.append(edge_index)<line_sep>edge_index_out.append(torch.tensor(edge_index dtype=torch.long))<line_sep>self.n_edges_inner.append(edge_index.shape[1])<line_sep>index=index+self.grid_sample[l].shape[0]<block_end>index=0<for_stmt>l range(self.level-1)<block_start>pwd=sklearn.metrics.pairwise_distances(self.grid_sample[l] self.grid_sample[l+1])<line_sep>edge_index=np.vstack(np.where(pwd<le>radius_inter[l]))+index<line_sep>edge_index[1 :]=edge_index[1 :]+self.grid_sample[l].shape[0]<line_sep>self.edge_index_down.append(edge_index)<line_sep>edge_index_down_out.append(torch.tensor(edge_index dtype=torch.long))<line_sep>self.edge_index_up.append(edge_index[[1 0] :])<line_sep>edge_index_up_out.append(torch.tensor(edge_index[[1 0] :] dtype=torch.long))<line_sep>self.n_edges_inter.append(edge_index.shape[1])<line_sep>index=index+self.grid_sample[l].shape[0]<block_end>edge_index_out=torch.cat(edge_index_out dim=1)<line_sep>edge_index_down_out=torch.cat(edge_index_down_out dim=1)<line_sep>edge_index_up_out=torch.cat(edge_index_up_out dim=1)<line_sep><return>edge_index_out edge_index_down_out edge_index_up_out<block_end><def_stmt>get_edge_index_range self# in order to use graph network's data structure, # the edge index shall be stored as tensor instead of list # we concatenate the edge index list and label the range of each level <block_start>edge_index_range=torch.zeros((self.level 2) dtype=torch.long)<line_sep>edge_index_down_range=torch.zeros((self.level-1 2) dtype=torch.long)<line_sep>edge_index_up_range=torch.zeros((self.level-1 2) dtype=torch.long)<line_sep>n_edge_index=0<for_stmt>l range(self.level)<block_start>edge_index_range[l 0]=n_edge_index<line_sep>n_edge_index=n_edge_index+self.edge_index[l].shape[1]<line_sep>edge_index_range[l 1]=n_edge_index<block_end>n_edge_index=0<for_stmt>l range(self.level-1)<block_start>edge_index_down_range[l 0]=n_edge_index<line_sep>edge_index_up_range[l 0]=n_edge_index<line_sep>n_edge_index=n_edge_index+self.edge_index_down[l].shape[1]<line_sep>edge_index_down_range[l 1]=n_edge_index<line_sep>edge_index_up_range[l 1]=n_edge_index<block_end><return>edge_index_range edge_index_down_range edge_index_up_range<block_end><def_stmt>attributes self theta=<none><block_start>self.edge_attr=[]<line_sep>self.edge_attr_down=[]<line_sep>self.edge_attr_up=[]<if_stmt>theta<is><none><block_start><for_stmt>l range(self.level)<block_start>edge_attr=self.grid_sample_all[self.edge_index[l].T].reshape((self.n_edges_inner[l] 2<times>self.d))<line_sep>self.edge_attr.append(torch.tensor(edge_attr))<block_end><for_stmt>l range(self.level-1)<block_start>edge_attr_down=self.grid_sample_all[self.edge_index_down[l].T].reshape((self.n_edges_inter[l] 2<times>self.d))<line_sep>edge_attr_up=self.grid_sample_all[self.edge_index_up[l].T].reshape((self.n_edges_inter[l] 2<times>self.d))<line_sep>self.edge_attr_down.append(torch.tensor(edge_attr_down))<line_sep>self.edge_attr_up.append(torch.tensor(edge_attr_up))<block_end><block_end><else_stmt><block_start>theta=theta[self.idx_all]<for_stmt>l range(self.level)<block_start>edge_attr=np.zeros((self.n_edges_inner[l] 2<times>self.d+2))<line_sep>edge_attr[: 0:2<times>self.d]=self.grid_sample_all[self.edge_index[l].T].reshape((self.n_edges_inner[l] 2<times>self.d))<line_sep>edge_attr[: 2<times>self.d]=theta[self.edge_index[l][0]]<line_sep>edge_attr[: 2<times>self.d+1]=theta[self.edge_index[l][1]]<line_sep>self.edge_attr.append(torch.tensor(edge_attr dtype=torch.float))<block_end><for_stmt>l range(self.level-1)<block_start>edge_attr_down=np.zeros((self.n_edges_inter[l] 2<times>self.d+2))<line_sep>edge_attr_up=np.zeros((self.n_edges_inter[l] 2<times>self.d+2))<line_sep>edge_attr_down[: 0:2<times>self.d]=self.grid_sample_all[self.edge_index_down[l].T].reshape((self.n_edges_inter[l] 2<times>self.d))<line_sep>edge_attr_down[: 2<times>self.d]=theta[self.edge_index_down[l][0]]<line_sep>edge_attr_down[: 2<times>self.d+1]=theta[self.edge_index_down[l][1]]<line_sep>self.edge_attr_down.append(torch.tensor(edge_attr_down dtype=torch.float))<line_sep>edge_attr_up[: 0:2<times>self.d]=self.grid_sample_all[self.edge_index_up[l].T].reshape((self.n_edges_inter[l] 2<times>self.d))<line_sep>edge_attr_up[: 2<times>self.d]=theta[self.edge_index_up[l][0]]<line_sep>edge_attr_up[: 2<times>self.d+1]=theta[self.edge_index_up[l][1]]<line_sep>self.edge_attr_up.append(torch.tensor(edge_attr_up dtype=torch.float))<block_end><block_end>edge_attr_out=torch.cat(self.edge_attr dim=0)<line_sep>edge_attr_down_out=torch.cat(self.edge_attr_down dim=0)<line_sep>edge_attr_up_out=torch.cat(self.edge_attr_up dim=0)<line_sep><return>edge_attr_out edge_attr_down_out edge_attr_up_out<block_end><def_stmt>splitter self radius_inner radius_inter theta_a theta_all# give a test mesh, generate a list of data <block_start>data=[]<line_sep>index=0<for_stmt>i range(self.splits)<block_start><if_stmt>i<eq>0<block_start>idx,idx_all=self.sample(new_sample=<true> index0=index)<block_end><else_stmt><block_start>idx,idx_all=self.sample(new_sample=<false> index0=index)<block_end>index=(index+self.m)%self.n<line_sep>grid,grid_all=self.get_grid()<line_sep>edge_index,edge_index_down,edge_index_up=self.ball_connectivity(radius_inner radius_inter)<line_sep>edge_index_range,edge_index_down_range,edge_index_up_range=self.get_edge_index_range()<line_sep>edge_attr,edge_attr_down,edge_attr_up=self.attributes(theta=theta_a)<line_sep>x=torch.cat([grid_all theta_all[idx_all :]] dim=1)<line_sep>data.append(Data(x=x edge_index_mid=edge_index edge_index_down=edge_index_down edge_index_up=edge_index_up edge_index_range=edge_index_range edge_index_down_range=edge_index_down_range edge_index_up_range=edge_index_up_range edge_attr_mid=edge_attr edge_attr_down=edge_attr_down edge_attr_up=edge_attr_up sample_idx=idx[0]))<block_end><return>data<block_end><def_stmt>assembler self out_list sample_idx_list is_cuda=<false><block_start><assert_stmt>len(out_list)<eq>self.splits<if_stmt>is_cuda<block_start>pred=torch.zeros(self.n ).cuda()<block_end><else_stmt><block_start>pred=torch.zeros(self.n )<block_end><for_stmt>i range(self.splits)<block_start>pred[sample_idx_list[i]]=out_list[i].reshape(-1)<block_end><return>pred<block_end><block_end># generate graph, with split and assemble with downsample <class_stmt>DownsampleGridSplitter(object)<block_start><def_stmt>__init__ self grid resolution r m=100 radius=0.15 edge_features=1<block_start>super(DownsampleGridSplitter self).__init__()<line_sep># instead of randomly sample sub-grids, here we downsample sub-grids self.grid=grid.reshape(resolution resolution 2)<line_sep># self.theta = theta.reshape(resolution, resolution,-1) # self.y = y.reshape(resolution, resolution,1) self.resolution=resolution<if_stmt>resolution%2<eq>1<block_start>self.s=int(((resolution-1)/r)+1)<block_end><else_stmt><block_start>self.s=int(resolution/r)<block_end>self.r=r<line_sep>self.n=resolution<power>2<line_sep>self.m=m<line_sep>self.radius=radius<line_sep>self.edge_features=edge_features<line_sep>self.index=torch.tensor(range(self.n) dtype=torch.long).reshape(self.resolution self.resolution)<block_end><def_stmt>ball_connectivity self grid<block_start>pwd=sklearn.metrics.pairwise_distances(grid)<line_sep>edge_index=np.vstack(np.where(pwd<le>self.radius))<line_sep>n_edges=edge_index.shape[1]<line_sep><return>torch.tensor(edge_index dtype=torch.long) n_edges<block_end><def_stmt>get_data self theta<block_start>theta_d=theta.shape[1]<line_sep>theta=theta.reshape(self.resolution self.resolution theta_d)<line_sep>data=[]<for_stmt>x range(self.r)<block_start><for_stmt>y range(self.r)<block_start>grid_sub=self.grid[x::self.r y::self.r :].reshape(-1 2)<line_sep>theta_sub=theta[x::self.r y::self.r :].reshape(-1 theta_d)<line_sep>perm=torch.randperm(self.n)<line_sep>m=self.m-grid_sub.shape[0]<line_sep>idx=perm[:m]<line_sep>grid_sample=self.grid.reshape(self.n -1)[idx]<line_sep>theta_sample=theta.reshape(self.n -1)[idx]<line_sep>grid_split=torch.cat([grid_sub grid_sample] dim=0)<line_sep>theta_split=torch.cat([theta_sub theta_sample] dim=0)<line_sep>X=torch.cat([grid_split theta_split] dim=1)<line_sep>edge_index,n_edges=self.ball_connectivity(grid_split)<line_sep>edge_attr=np.zeros((n_edges 4+self.edge_features<times>2))<line_sep>a=theta_split[: :self.edge_features]<line_sep>edge_attr[: :4]=grid_split[edge_index.T].reshape(n_edges -1)<line_sep>edge_attr[: 4:4+self.edge_features]=a[edge_index[0]]<line_sep>edge_attr[: 4+self.edge_features:4+self.edge_features<times>2]=a[edge_index[1]]<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<line_sep>split_idx=torch.tensor([x y] dtype=torch.long).reshape(1 2)<line_sep>data.append(Data(x=X edge_index=edge_index edge_attr=edge_attr split_idx=split_idx))<block_end><block_end>print('test' len(data) X.shape edge_index.shape edge_attr.shape)<line_sep><return>data<block_end><def_stmt>sample self theta Y<block_start>theta_d=theta.shape[1]<line_sep>theta=theta.reshape(self.resolution self.resolution theta_d)<line_sep>Y=Y.reshape(self.resolution self.resolution)<line_sep>x=torch.randint(0 self.r (1 ))<line_sep>y=torch.randint(0 self.r (1 ))<line_sep>grid_sub=self.grid[x::self.r y::self.r :].reshape(-1 2)<line_sep>theta_sub=theta[x::self.r y::self.r :].reshape(-1 theta_d)<line_sep>Y_sub=Y[x::self.r y::self.r].reshape(-1 )<line_sep>index_sub=self.index[x::self.r y::self.r].reshape(-1 )<line_sep>n_sub=Y_sub.shape[0]<if_stmt>self.m<ge>n_sub<block_start>m=self.m-n_sub<line_sep>perm=torch.randperm(self.n)<line_sep>idx=perm[:m]<line_sep>grid_sample=self.grid.reshape(self.n -1)[idx]<line_sep>theta_sample=theta.reshape(self.n -1)[idx]<line_sep>Y_sample=Y.reshape(self.n )[idx]<line_sep>grid_split=torch.cat([grid_sub grid_sample] dim=0)<line_sep>theta_split=torch.cat([theta_sub theta_sample] dim=0)<line_sep>Y_split=torch.cat([Y_sub Y_sample] dim=0).reshape(-1 )<line_sep>index_split=torch.cat([index_sub idx] dim=0).reshape(-1 )<line_sep>X=torch.cat([grid_split theta_split] dim=1)<block_end><else_stmt><block_start>grid_split=grid_sub<line_sep>theta_split=theta_sub<line_sep>Y_split=Y_sub.reshape(-1 )<line_sep>index_split=index_sub.reshape(-1 )<line_sep>X=torch.cat([grid_split theta_split] dim=1)<block_end>edge_index,n_edges=self.ball_connectivity(grid_split)<line_sep>edge_attr=np.zeros((n_edges 4+self.edge_features<times>2))<line_sep>a=theta_split[: :self.edge_features]<line_sep>edge_attr[: :4]=grid_split[edge_index.T].reshape(n_edges -1)<line_sep>edge_attr[: 4:4+self.edge_features]=a[edge_index[0]]<line_sep>edge_attr[: 4+self.edge_features:4+self.edge_features<times>2]=a[edge_index[1]]<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<line_sep>split_idx=torch.tensor([x y] dtype=torch.long).reshape(1 2)<line_sep>data=Data(x=X y=Y_split edge_index=edge_index edge_attr=edge_attr split_idx=split_idx sample_idx=index_split)<line_sep>print('train' X.shape Y_split.shape edge_index.shape edge_attr.shape index_split.shape)<line_sep><return>data<block_end><def_stmt>assemble self pred split_idx batch_size2 sigma=1<block_start><assert_stmt>len(pred)<eq>len(split_idx)<assert_stmt>len(pred)<eq>self.r<power>2<floordiv>batch_size2<line_sep>out=torch.zeros((self.resolution self.resolution))<for_stmt>i range(len(pred))<block_start>pred_i=pred[i].reshape(batch_size2 self.m)<line_sep>split_idx_i=split_idx[i]<for_stmt>j range(batch_size2)<block_start>pred_ij=pred_i[j :]<line_sep>x,y=split_idx_i[j]<if_stmt>self.resolution%2<eq>1<block_start><if_stmt>x<eq>0<block_start>nx=self.s<block_end><else_stmt><block_start>nx=self.s-1<block_end><if_stmt>y<eq>0<block_start>ny=self.s<block_end><else_stmt><block_start>ny=self.s-1<block_end><block_end><else_stmt><block_start>nx=self.s<line_sep>ny=self.s<block_end># pred_ij = pred_i[idx : idx + nx * ny] out[x::self.r y::self.r]=pred_ij[:nx<times>ny].reshape(nx ny)<block_end><block_end>out=gaussian_filter(out sigma=sigma mode='constant' cval=0)<line_sep>out=torch.tensor(out dtype=torch.float)<line_sep><return>out.reshape(-1 )<block_end><block_end># generate graph on Torus, with split and assemble <class_stmt>TorusGridSplitter(object)<block_start><def_stmt>__init__ self grid resolution r m=100 radius=0.15 T=<none> edge_features=1 <block_start>super(TorusGridSplitter self).__init__()<line_sep>self.grid=grid.reshape(resolution resolution 2)<line_sep># self.theta = theta.reshape(resolution, resolution,-1) # self.y = y.reshape(resolution, resolution,1) self.resolution=resolution<if_stmt>resolution%2<eq>1<block_start>self.s=int(((resolution-1)/r)+1)<block_end><else_stmt><block_start>self.s=int(resolution/r)<block_end>self.r=r<line_sep>self.n=resolution<power>2<line_sep>self.m=m<line_sep>self.T=T<line_sep>self.radius=radius<line_sep>self.edge_features=edge_features<line_sep>self.index=torch.tensor(range(self.n) dtype=torch.long).reshape(self.resolution self.resolution)<block_end><def_stmt>pairwise_difference self grid1 grid2<block_start>n=grid1.shape[0]<line_sep>x1=grid1[: 0]<line_sep>y1=grid1[: 1]<line_sep>x2=grid2[: 0]<line_sep>y2=grid2[: 1]<line_sep>X1=np.tile(x1.reshape(n 1) [1 n])<line_sep>X2=np.tile(x2.reshape(1 n) [n 1])<line_sep>X_diff=X1-X2<line_sep>Y1=np.tile(y1.reshape(n 1) [1 n])<line_sep>Y2=np.tile(y2.reshape(1 n) [n 1])<line_sep>Y_diff=Y1-Y2<line_sep><return>X_diff Y_diff<block_end><def_stmt>torus_connectivity self grid<block_start>pwd0=sklearn.metrics.pairwise_distances(grid grid)<line_sep>X_diff0,Y_diff0=self.pairwise_difference(grid grid)<line_sep>grid1=grid<line_sep>grid1[: 0]=grid[: 0]+1<line_sep>pwd1=sklearn.metrics.pairwise_distances(grid grid1)<line_sep>X_diff1,Y_diff1=self.pairwise_difference(grid grid1)<line_sep>grid2=grid<line_sep>grid2[: 1]=grid[: 1]+1<line_sep>pwd2=sklearn.metrics.pairwise_distances(grid grid2)<line_sep>X_diff2,Y_diff2=self.pairwise_difference(grid grid2)<line_sep>grid3=grid<line_sep>grid3[: :]=grid[: :]+1<line_sep>pwd3=sklearn.metrics.pairwise_distances(grid grid3)<line_sep>X_diff3,Y_diff3=self.pairwise_difference(grid grid3)<line_sep>grid4=grid<line_sep>grid4[: 0]=grid[: 0]+1<line_sep>grid4[: 1]=grid[: 1]-1<line_sep>pwd4=sklearn.metrics.pairwise_distances(grid grid4)<line_sep>X_diff4,Y_diff4=self.pairwise_difference(grid grid4)<line_sep>PWD=np.stack([pwd0 pwd1 pwd2 pwd3 pwd4] axis=2)<line_sep>X_DIFF=np.stack([X_diff0 X_diff1 X_diff2 X_diff3 X_diff4] axis=2)<line_sep>Y_DIFF=np.stack([Y_diff0 Y_diff1 Y_diff2 Y_diff3 Y_diff4] axis=2)<line_sep>pwd=np.min(PWD axis=2)<line_sep>pwd_index=np.argmin(PWD axis=2)<line_sep>edge_index=np.vstack(np.where(pwd<le>self.radius))<line_sep>pwd_index=pwd_index[np.where(pwd<le>self.radius)]<line_sep>PWD_index=(np.where(pwd<le>self.radius)[0] np.where(pwd<le>self.radius)[1] pwd_index)<line_sep>distance=PWD[PWD_index]<line_sep>X_difference=X_DIFF[PWD_index]<line_sep>Y_difference=Y_DIFF[PWD_index]<line_sep>n_edges=edge_index.shape[1]<line_sep><return>torch.tensor(edge_index dtype=torch.long) n_edges distance X_difference Y_difference<block_end><def_stmt>get_data self theta params=<none><block_start>theta_d=theta.shape[1]<line_sep>theta=theta.reshape(self.resolution self.resolution theta_d)<line_sep>data=[]<for_stmt>x range(self.r)<block_start><for_stmt>y range(self.r)<block_start>grid_sub=self.grid[x::self.r y::self.r :].reshape(-1 2)<line_sep>theta_sub=theta[x::self.r y::self.r :].reshape(-1 theta_d)<line_sep>perm=torch.randperm(self.n)<line_sep>m=self.m-grid_sub.shape[0]<line_sep>idx=perm[:m]<line_sep>grid_sample=self.grid.reshape(self.n -1)[idx]<line_sep>theta_sample=theta.reshape(self.n -1)[idx]<line_sep>grid_split=torch.cat([grid_sub grid_sample] dim=0)<line_sep>theta_split=torch.cat([theta_sub theta_sample] dim=0)<line_sep>X=torch.cat([grid_split theta_split] dim=1)<line_sep>edge_index,n_edges,distance,X_difference,Y_difference=self.torus_connectivity(grid_split)<line_sep>edge_attr=np.zeros((n_edges 3+self.edge_features<times>2))<line_sep>a=theta_split[: :self.edge_features]<line_sep>edge_attr[: 0]=X_difference.reshape(n_edges )<line_sep>edge_attr[: 1]=Y_difference.reshape(n_edges )<line_sep>edge_attr[: 2]=distance.reshape(n_edges )<line_sep>edge_attr[: 3:3+self.edge_features]=a[edge_index[0]]<line_sep>edge_attr[: 3+self.edge_features:4+self.edge_features<times>2]=a[edge_index[1]]<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<line_sep>split_idx=torch.tensor([x y] dtype=torch.long).reshape(1 2)<if_stmt>params<eq><none><block_start>data.append(Data(x=X edge_index=edge_index edge_attr=edge_attr split_idx=split_idx))<block_end><else_stmt><block_start>data.append(Data(x=X edge_index=edge_index edge_attr=edge_attr split_idx=split_idx params=params))<block_end><block_end><block_end>print('test' len(data) X.shape edge_index.shape edge_attr.shape)<line_sep><return>data<block_end><def_stmt>sample self theta Y<block_start>theta_d=theta.shape[1]<line_sep>theta=theta.reshape(self.resolution self.resolution theta_d)<line_sep>Y=Y.reshape(self.resolution self.resolution)<line_sep>x=torch.randint(0 self.r (1 ))<line_sep>y=torch.randint(0 self.r (1 ))<line_sep>grid_sub=self.grid[x::self.r y::self.r :].reshape(-1 2)<line_sep>theta_sub=theta[x::self.r y::self.r :].reshape(-1 theta_d)<line_sep>Y_sub=Y[x::self.r y::self.r].reshape(-1 )<line_sep>index_sub=self.index[x::self.r y::self.r].reshape(-1 )<line_sep>n_sub=Y_sub.shape[0]<if_stmt>self.m<ge>n_sub<block_start>m=self.m-n_sub<line_sep>perm=torch.randperm(self.n)<line_sep>idx=perm[:m]<line_sep>grid_sample=self.grid.reshape(self.n -1)[idx]<line_sep>theta_sample=theta.reshape(self.n -1)[idx]<line_sep>Y_sample=Y.reshape(self.n )[idx]<line_sep>grid_split=torch.cat([grid_sub grid_sample] dim=0)<line_sep>theta_split=torch.cat([theta_sub theta_sample] dim=0)<line_sep>Y_split=torch.cat([Y_sub Y_sample] dim=0).reshape(-1 )<line_sep>index_split=torch.cat([index_sub idx] dim=0).reshape(-1 )<line_sep>X=torch.cat([grid_split theta_split] dim=1)<block_end><else_stmt><block_start>grid_split=grid_sub<line_sep>theta_split=theta_sub<line_sep>Y_split=Y_sub.reshape(-1 )<line_sep>index_split=index_sub.reshape(-1 )<line_sep>X=torch.cat([grid_split theta_split] dim=1)<block_end>edge_index,n_edges,distance,X_difference,Y_difference=self.torus_connectivity(grid_split)<line_sep>edge_attr=np.zeros((n_edges 3+self.edge_features<times>2))<line_sep>a=theta_split[: :self.edge_features]<line_sep>edge_attr[: 0]=X_difference.reshape(n_edges )<line_sep>edge_attr[: 1]=Y_difference.reshape(n_edges )<line_sep>edge_attr[: 2]=distance.reshape(n_edges )<line_sep>edge_attr[: 3:3+self.edge_features]=a[edge_index[0]]<line_sep>edge_attr[: 3+self.edge_features:4+self.edge_features<times>2]=a[edge_index[1]]<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<line_sep>split_idx=torch.tensor([x y] dtype=torch.long).reshape(1 2)<line_sep>data=Data(x=X y=Y_split edge_index=edge_index edge_attr=edge_attr split_idx=split_idx sample_idx=index_split)<line_sep>print('train' X.shape Y_split.shape edge_index.shape edge_attr.shape index_split.shape)<line_sep><return>data<block_end><def_stmt>sampleT self theta Y params=<none><block_start>theta_d=theta.shape[1]<line_sep>theta=theta.reshape(self.resolution self.resolution theta_d)<line_sep>Y=Y.reshape(self.T self.resolution self.resolution)<line_sep>x=torch.randint(0 self.r (1 ))<line_sep>y=torch.randint(0 self.r (1 ))<line_sep>grid_sub=self.grid[x::self.r y::self.r :].reshape(-1 2)<line_sep>theta_sub=theta[x::self.r y::self.r :].reshape(-1 theta_d)<line_sep>Y_sub=Y[: x::self.r y::self.r].reshape(self.T -1)<line_sep>index_sub=self.index[x::self.r y::self.r].reshape(-1 )<line_sep>n_sub=Y_sub.shape[1]<if_stmt>self.m<ge>n_sub<block_start>m=self.m-n_sub<line_sep>perm=torch.randperm(self.n)<line_sep>idx=perm[:m]<line_sep>grid_sample=self.grid.reshape(self.n -1)[idx]<line_sep>theta_sample=theta.reshape(self.n -1)[idx]<line_sep>Y_sample=Y.reshape(self.T self.n)[: idx]<line_sep>grid_split=torch.cat([grid_sub grid_sample] dim=0)<line_sep>theta_split=torch.cat([theta_sub theta_sample] dim=0)<line_sep>Y_split=torch.cat([Y_sub Y_sample] dim=1).reshape(self.T -1)<line_sep>index_split=torch.cat([index_sub idx] dim=0).reshape(-1 )<line_sep>X=torch.cat([grid_split theta_split] dim=1)<block_end><else_stmt><block_start>grid_split=grid_sub<line_sep>theta_split=theta_sub<line_sep>Y_split=Y_sub.reshape(self.T -1)<line_sep>index_split=index_sub.reshape(-1 )<line_sep>X=torch.cat([grid_split theta_split] dim=1)<block_end>edge_index,n_edges,distance,X_difference,Y_difference=self.torus_connectivity(grid_split)<line_sep>edge_attr=np.zeros((n_edges 3+self.edge_features<times>2))<line_sep>a=theta_split[: :self.edge_features]<line_sep>edge_attr[: 0]=X_difference.reshape(n_edges )<line_sep>edge_attr[: 1]=Y_difference.reshape(n_edges )<line_sep>edge_attr[: 2]=distance.reshape(n_edges )<line_sep>edge_attr[: 3:3+self.edge_features]=a[edge_index[0]]<line_sep>edge_attr[: 3+self.edge_features:4+self.edge_features<times>2]=a[edge_index[1]]<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<line_sep>split_idx=torch.tensor([x y] dtype=torch.long).reshape(1 2)<if_stmt>params<eq><none><block_start>data=Data(x=X y=Y_split edge_index=edge_index edge_attr=edge_attr split_idx=split_idx sample_idx=index_split)<block_end><else_stmt><block_start>data=Data(x=X y=Y_split edge_index=edge_index edge_attr=edge_attr split_idx=split_idx sample_idx=index_split params=params)<block_end>print('train' X.shape Y_split.shape edge_index.shape edge_attr.shape index_split.shape)<line_sep><return>data<block_end><def_stmt>assemble self pred split_idx batch_size2 sigma=1<block_start><assert_stmt>len(pred)<eq>len(split_idx)<assert_stmt>len(pred)<eq>self.r<power>2<floordiv>batch_size2<line_sep>out=torch.zeros((self.resolution self.resolution))<for_stmt>i range(len(pred))<block_start>pred_i=pred[i].reshape(batch_size2 self.m)<line_sep>split_idx_i=split_idx[i]<for_stmt>j range(batch_size2)<block_start>pred_ij=pred_i[j :]<line_sep>x,y=split_idx_i[j]<if_stmt>self.resolution%2<eq>1<block_start><if_stmt>x<eq>0<block_start>nx=self.s<block_end><else_stmt><block_start>nx=self.s-1<block_end><if_stmt>y<eq>0<block_start>ny=self.s<block_end><else_stmt><block_start>ny=self.s-1<block_end><block_end><else_stmt><block_start>nx=self.s<line_sep>ny=self.s<block_end># pred_ij = pred_i[idx : idx + nx * ny] out[x::self.r y::self.r]=pred_ij[:nx<times>ny].reshape(nx ny)<block_end><block_end>out=gaussian_filter(out sigma=sigma mode='wrap')<line_sep>out=torch.tensor(out dtype=torch.float)<line_sep><return>out.reshape(-1 )<block_end><def_stmt>assembleT self pred split_idx batch_size2 sigma=1# pred is a list (batches) of list (time seq) <block_start><assert_stmt>len(pred)<eq>len(split_idx)<assert_stmt>len(pred[0])<eq>self.T<assert_stmt>len(pred)<eq>self.r<power>2<floordiv>batch_size2<line_sep>out=torch.zeros((self.T self.resolution self.resolution))<for_stmt>t range(self.T)<block_start><for_stmt>i range(len(pred))<block_start>pred_i=pred[i][t].reshape(batch_size2 self.m)<line_sep>split_idx_i=split_idx[i]<for_stmt>j range(batch_size2)<block_start>pred_ij=pred_i[j :]<line_sep>x,y=split_idx_i[j]<if_stmt>self.resolution%2<eq>1<block_start><if_stmt>x<eq>0<block_start>nx=self.s<block_end><else_stmt><block_start>nx=self.s-1<block_end><if_stmt>y<eq>0<block_start>ny=self.s<block_end><else_stmt><block_start>ny=self.s-1<block_end><block_end><else_stmt><block_start>nx=self.s<line_sep>ny=self.s<block_end># pred_ij = pred_i[idx : idx + nx * ny] out[t x::self.r y::self.r]=pred_ij[:nx<times>ny].reshape(nx ny)<block_end><block_end><block_end>out=gaussian_filter(out sigma=sigma mode='wrap')<line_sep>out=torch.tensor(out dtype=torch.float)<line_sep><return>out.reshape(self.T self.n)<block_end><block_end><def_stmt>downsample data grid_size l<block_start>data=data.reshape(-1 grid_size grid_size)<line_sep>data=data[: ::l ::l]<line_sep>data=data.reshape(-1 (grid_size<floordiv>l)<power>2)<line_sep><return>data<block_end><def_stmt>simple_grid n_x n_y<block_start>xs=np.linspace(0.0 1.0 n_x)<line_sep>ys=np.linspace(0.0 1.0 n_y)<line_sep># xs = np.array(range(n_x)) # ys = np.array(range(n_y)) grid=np.vstack([xx.ravel()<for>xx np.meshgrid(xs ys)]).T<line_sep>edge_index=[]<line_sep>edge_attr=[]<for_stmt>y range(n_y)<block_start><for_stmt>x range(n_x)<block_start>i=y<times>n_x+x<if_stmt>(x<ne>n_x-1)<block_start>edge_index.append((i i+1))<line_sep>edge_attr.append((1 0 0))<line_sep>edge_index.append((i+1 i))<line_sep>edge_attr.append((-1 0 0))<block_end><if_stmt>(y<ne>n_y-1)<block_start>edge_index.append((i i+n_x))<line_sep>edge_attr.append((0 1 0))<line_sep>edge_index.append((i+n_x i))<line_sep>edge_attr.append((0 -1 0))<block_end><block_end><block_end>X=torch.tensor(grid dtype=torch.float)<line_sep># Exact = torch.tensor(Exact, dtype=torch.float).view(-1) edge_index=torch.tensor(edge_index dtype=torch.long).transpose(0 1)<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<line_sep><return>X edge_index edge_attr<block_end><def_stmt>grid_edge n_x n_y a=<none><block_start><if_stmt>a<ne><none><block_start>a=a.reshape(n_x n_y)<block_end>xs=np.linspace(0.0 1.0 n_x)<line_sep>ys=np.linspace(0.0 1.0 n_y)<line_sep># xs = np.array(range(n_x)) # ys = np.array(range(n_y)) grid=np.vstack([xx.ravel()<for>xx np.meshgrid(xs ys)]).T<line_sep>edge_index=[]<line_sep>edge_attr=[]<for_stmt>y range(n_y)<block_start><for_stmt>x range(n_x)<block_start>i=y<times>n_x+x<if_stmt>(x<ne>n_x-1)<block_start>d=1/n_x<line_sep>edge_index.append((i i+1))<line_sep>edge_index.append((i+1 i))<if_stmt>a<ne><none><block_start>a1=a[x y]<line_sep>a2=a[x+1 y]<line_sep>edge_attr.append((x/n_x y/n_y a1 a2))<line_sep>edge_attr.append((y/n_y x/n_x a2 a1))<block_end><block_end><if_stmt>(y<ne>n_y-1)<block_start>d=1/n_y<line_sep>edge_index.append((i i+n_x))<line_sep>edge_index.append((i+n_x i))<if_stmt>a<ne><none><block_start>a1=a[x y]<line_sep>a2=a[x y+1]<line_sep>edge_attr.append((x/n_x y/n_y a1 a2))<line_sep>edge_attr.append((y/n_y x/n_x a2 a1))<block_end><block_end><block_end><block_end>X=torch.tensor(grid dtype=torch.float)<line_sep># Exact = torch.tensor(Exact, dtype=torch.float).view(-1) edge_index=torch.tensor(edge_index dtype=torch.long).transpose(0 1)<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<line_sep><return>X edge_index edge_attr<block_end><def_stmt>grid_edge1d n_x a=<none><block_start><if_stmt>a<ne><none><block_start>a=a.reshape(n_x)<block_end>xs=np.linspace(0.0 1.0 n_x)<line_sep># xs = np.array(range(n_x)) # ys = np.array(range(n_y)) edge_index=[]<line_sep>edge_attr=[]<for_stmt>x range(n_x)<block_start>i=x<line_sep>i1=(x+1)%n_x<line_sep>edge_index.append((i i1))<line_sep>edge_index.append((i1 i))<line_sep>i2=(x+2)%n_x<line_sep>edge_index.append((i i2))<line_sep>edge_index.append((i2 i))<if_stmt>a<ne><none><block_start>a1=a[x]<line_sep>a2=a[x+1]<line_sep>edge_attr.append((x/n_x a1 a2))<line_sep>edge_attr.append((x/n_x a2 a1))<block_end><block_end>X=torch.tensor(xs dtype=torch.float)<line_sep># Exact = torch.tensor(Exact, dtype=torch.float).view(-1) edge_index=torch.tensor(edge_index dtype=torch.long).transpose(0 1)<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<line_sep><return>X edge_index edge_attr<block_end><def_stmt>grid_edge_aug n_x n_y a<block_start>a=a.reshape(n_x n_y)<line_sep>xs=np.linspace(0.0 1.0 n_x)<line_sep>ys=np.linspace(0.0 1.0 n_y)<line_sep># xs = np.array(range(n_x)) # ys = np.array(range(n_y)) grid=np.vstack([xx.ravel()<for>xx np.meshgrid(xs ys)]).T<line_sep>edge_index=[]<line_sep>edge_attr=[]<for_stmt>y range(n_y)<block_start><for_stmt>x range(n_x)<block_start>i=y<times>n_x+x<if_stmt>(x<ne>n_x-1)<block_start>d=1/n_x<line_sep>a1=a[x y]<line_sep>a2=a[x+1 y]<line_sep>edge_index.append((i i+1))<line_sep>edge_attr.append((d a1 a2 1/np.sqrt(np.abs(a1<times>a2)) np.exp(-(d)<power>2) np.exp(-(d/0.1)<power>2) np.exp(-(d/0.01)<power>2)))<line_sep>edge_index.append((i+1 i))<line_sep>edge_attr.append((d a2 a1 1/np.sqrt(np.abs(a1<times>a2)) np.exp(-(d)<power>2) np.exp(-(d/0.1)<power>2) np.exp(-(d/0.01)<power>2)))<block_end><if_stmt>(y<ne>n_y-1)<block_start>d=1/n_y<line_sep>a1=a[x y]<line_sep>a2=a[x y+1]<line_sep>edge_index.append((i i+n_x))<line_sep>edge_attr.append((d a1 a2 1/np.sqrt(np.abs(a1<times>a2)) np.exp(-(d)<power>2) np.exp(-(d/0.1)<power>2) np.exp(-(d/0.01)<power>2)))<line_sep>edge_index.append((i+n_x i))<line_sep>edge_attr.append((d a2 a1 1/np.sqrt(np.abs(a1<times>a2)) np.exp(-(d)<power>2) np.exp(-(d/0.1)<power>2) np.exp(-(d/0.01)<power>2)))<block_end><block_end><block_end>X=torch.tensor(grid dtype=torch.float)<line_sep># Exact = torch.tensor(Exact, dtype=torch.float).view(-1) edge_index=torch.tensor(edge_index dtype=torch.long).transpose(0 1)<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<line_sep><return>X edge_index edge_attr<block_end><def_stmt>grid_edge_aug_full n_x n_y r a<block_start>n=n_x<times>n_y<line_sep>xs=np.linspace(0.0 1.0 n_x)<line_sep>ys=np.linspace(0.0 1.0 n_y)<line_sep>grid=np.vstack([xx.ravel()<for>xx np.meshgrid(xs ys)]).T<line_sep>edge_index=[]<line_sep>edge_attr=[]<for_stmt>i1 range(n)<block_start>x1=grid[i1]<for_stmt>i2 range(n)<block_start>x2=grid[i2]<line_sep>d=np.linalg.norm(x1-x2)<if_stmt>(d<le>r)<block_start>a1=a[i1]<line_sep>a2=a[i2]<line_sep>edge_index.append((i1 i2))<line_sep>edge_attr.append((d a1 a2 1/np.sqrt(np.abs(a1<times>a2)) np.exp(-(d)<power>2) np.exp(-(d/0.1)<power>2) np.exp(-(d/0.01)<power>2)))<line_sep>edge_index.append((i2 i1))<line_sep>edge_attr.append((d a2 a1 1/np.sqrt(np.abs(a1<times>a2)) np.exp(-(d)<power>2) np.exp(-(d/0.1)<power>2) np.exp(-(d/0.01)<power>2)))<block_end><block_end><block_end>X=torch.tensor(grid dtype=torch.float)<line_sep># Exact = torch.tensor(Exact, dtype=torch.float).view(-1) edge_index=torch.tensor(edge_index dtype=torch.long).transpose(0 1)<line_sep>edge_attr=torch.tensor(edge_attr dtype=torch.float)<line_sep><return>X edge_index edge_attr<block_end><def_stmt>multi_grid depth n_x n_y grid params<block_start>edge_index_global=[]<line_sep>edge_attr_global=[]<line_sep>X_global=[]<line_sep>num_nodes=0<line_sep># build connected graph <for_stmt>l range(depth)<block_start>h_x_l=n_x<floordiv>(2<power>l)<line_sep>h_y_l=n_y<floordiv>(2<power>l)<line_sep>n_l=h_x_l<times>h_y_l<line_sep>a=downsample(params n_x (2<power>l))<if_stmt>grid<eq>'grid'<block_start>X,edge_index_inner,edge_attr_inner=grid(h_y_l h_x_l)<block_end><elif_stmt>grid<eq>'grid_edge'<block_start>X,edge_index_inner,edge_attr_inner=grid_edge(h_y_l h_x_l a)<block_end><elif_stmt>grid<eq>'grid_edge_aug'<block_start>X,edge_index_inner,edge_attr_inner=grid_edge(h_y_l h_x_l a)<block_end># update index edge_index_inner=edge_index_inner+num_nodes<line_sep>edge_index_global.append(edge_index_inner)<line_sep>edge_attr_global.append(edge_attr_inner)<line_sep># construct X # if (is_high): # X = torch.cat([torch.zeros(n_l, l * 2), X, torch.zeros(n_l, (depth - 1 - l) * 2)], dim=1) # else: # X_l = torch.tensor(l, dtype=torch.float).repeat(n_l, 1) # X = torch.cat([X, X_l], dim=1) X_global.append(X)<line_sep># construct edges index1=torch.tensor(range(n_l) dtype=torch.long)<line_sep>index1=index1+num_nodes<line_sep>num_nodes<augadd>n_l<line_sep># #construct inter-graph edge <if_stmt>l<ne>depth-1<block_start>index2=np.array(range(n_l<floordiv>4)).reshape(h_x_l<floordiv>2 h_y_l<floordiv>2)# torch.repeat is different from numpy index2=index2.repeat(2 axis=0).repeat(2 axis=1)<line_sep>index2=torch.tensor(index2).reshape(-1)<line_sep>index2=index2+num_nodes<line_sep>index2=torch.tensor(index2 dtype=torch.long)<line_sep>edge_index_inter1=torch.cat([index1 index2] dim=-1).reshape(2 -1)<line_sep>edge_index_inter2=torch.cat([index2 index1] dim=-1).reshape(2 -1)<line_sep>edge_index_inter=torch.cat([edge_index_inter1 edge_index_inter2] dim=1)<line_sep>edge_attr_inter1=torch.tensor((0 0 1) dtype=torch.float).repeat(n_l 1)<line_sep>edge_attr_inter2=torch.tensor((0 0 -1) dtype=torch.float).repeat(n_l 1)<line_sep>edge_attr_inter=torch.cat([edge_attr_inter1 edge_attr_inter2] dim=0)<line_sep>edge_index_global.append(edge_index_inter)<line_sep>edge_attr_global.append(edge_attr_inter)<block_end><block_end>X=torch.cat(X_global dim=0)<line_sep>edge_index=torch.cat(edge_index_global dim=1)<line_sep>edge_attr=torch.cat(edge_attr_global dim=0)<line_sep>mask_index=torch.tensor(range(n_x<times>n_y) dtype=torch.long)<line_sep># print('create multi_grid with size:', X.shape, edge_index.shape, edge_attr.shape, mask_index.shape) <return>(X edge_index edge_attr mask_index num_nodes)<block_end><def_stmt>multi_pole_grid1d theta theta_d s N is_periodic=<false><block_start>grid_list=[]<line_sep>theta_list=[]<line_sep>edge_index_list=[]<line_sep>edge_index_list_cuda=[]<line_sep>level=int(np.log2(s)-1)<line_sep>print(level)<for_stmt>l range(1 level+1)<block_start>r_l=2<power>(l-1)<line_sep>s_l=s<floordiv>r_l<line_sep>n_l=s_l<line_sep>print('level' s_l r_l n_l)<line_sep>xs=np.linspace(0.0 1.0 s_l)<line_sep>grid_l=xs<line_sep>grid_l=torch.tensor(grid_l dtype=torch.float)<line_sep>print(grid_l.shape)<line_sep>grid_list.append(grid_l)<line_sep>theta_l=theta[: : :theta_d].reshape(N s theta_d)<line_sep>theta_l=theta_l[: ::r_l :]<line_sep>theta_l=theta_l.reshape(N n_l theta_d)<line_sep>theta_l=torch.tensor(theta_l dtype=torch.float)<line_sep>print(theta_l.shape)<line_sep>theta_list.append(theta_l)<line_sep># for the finest level, we construct the nearest neighbors (NN) <if_stmt>l<eq>1<block_start>edge_index_nn=[]<for_stmt>x_i range(s_l)<block_start><for_stmt>x (-1 1)<block_start>x_j=x_i+x<if_stmt>is_periodic<block_start>x_j=x_j%s_l<block_end># if (xj, yj) is a valid node <if_stmt>(x_j<in>range(s_l))<block_start>edge_index_nn.append([x_i x_j])<block_end><block_end><block_end>edge_index_nn=torch.tensor(edge_index_nn dtype=torch.long)<line_sep>edge_index_nn=edge_index_nn.transpose(0 1)<line_sep>edge_index_list.append(edge_index_nn)<line_sep>edge_index_list_cuda.append(edge_index_nn.cuda())<line_sep>print('edge' edge_index_nn.shape)<block_end># we then compute the interactive neighbors -- their parents are NN but they are not NearestNeighbor edge_index_inter=[]<for_stmt>x_i range(s_l)<block_start><for_stmt>x range(-3 4)<block_start>x_j=x_i+x<line_sep># if (xj, yj) is a valid node <if_stmt>is_periodic<block_start>x_j=x_j%s_l<block_end><if_stmt>(x_j<in>range(s_l))# if (xi, yi), (xj, yj) not NearestNeighbor <block_start><if_stmt>abs(x)<ge>2# if their parents are NN <block_start><if_stmt>abs(x_i<floordiv>2-x_j<floordiv>2)%(s_l<floordiv>2)<le>1<block_start>edge_index_inter.append([x_i x_j])<block_end><block_end><block_end><block_end><block_end>edge_index_inter=torch.tensor(edge_index_inter dtype=torch.long)<line_sep>edge_index_inter=edge_index_inter.transpose(0 1)<line_sep>edge_index_list.append(edge_index_inter)<line_sep>edge_index_list_cuda.append(edge_index_inter.cuda())<line_sep>print('edge_inter' edge_index_inter.shape)<block_end>print(len(grid_list) len(edge_index_list) len(theta_list))<line_sep><return>grid_list theta_list edge_index_list edge_index_list_cuda<block_end><def_stmt>get_edge_attr grid theta edge_index<block_start>n_edges=edge_index.shape[1]<line_sep>edge_attr=np.zeros((n_edges 4))<line_sep>edge_attr[: 0:2]=grid[edge_index.transpose(0 1)].reshape((n_edges -1))<line_sep>edge_attr[: 2]=theta[edge_index[0]]<line_sep>edge_attr[: 3]=theta[edge_index[1]]<line_sep><return>torch.tensor(edge_attr dtype=torch.float)<block_end>
<import_stmt>django<import_from_stmt>channels.routing ProtocolTypeRouter<import_from_stmt>baserow.ws.routers websocket_router<import_from_stmt>django.core.asgi get_asgi_application<line_sep>django.setup()<line_sep>django_asgi_app=get_asgi_application()<line_sep>application=ProtocolTypeRouter({"http":django_asgi_app "websocket":websocket_router})<line_sep>
# TODO: Explain 8 corners logic at the top and use it consistently # Add comments of explanation <import_stmt>numpy<as>np<import_stmt>scipy.spatial<import_from_stmt>.rotation rotate_points_along_z<def_stmt>get_size box<block_start>""" Args: box: 8x3 Returns: size: [dx, dy, dz] """<line_sep>distance=scipy.spatial.distance.cdist(box[0:1 :] box[1:5 :])<line_sep>l=distance[0 2]<line_sep>w=distance[0 0]<line_sep>h=distance[0 3]<line_sep><return>[l w h]<block_end><def_stmt>get_heading_angle box<block_start>""" Args: box: (8, 3) Returns: heading_angle: float """<line_sep>a=box[0 0]-box[1 0]<line_sep>b=box[0 1]-box[1 1]<line_sep>heading_angle=np.arctan2(a b)<line_sep><return>heading_angle<block_end><def_stmt>compute_box_3d size center rotmat<block_start>"""Compute corners of a single box from rotation matrix Args: size: list of float [dx, dy, dz] center: np.array [x, y, z] rotmat: np.array (3, 3) Returns: corners: (8, 3) """<line_sep>l,h,w=[i/2<for>i size]<line_sep>center=np.reshape(center (-1 3))<line_sep>center=center.reshape(3)<line_sep>x_corners=[l l -l -l l l -l -l]<line_sep>y_corners=[h -h -h h h -h -h h]<line_sep>z_corners=[w w w w -w -w -w -w]<line_sep>corners_3d=np.dot(np.transpose(rotmat) np.vstack([x_corners y_corners z_corners]))<line_sep>corners_3d[0 :]<augadd>center[0]<line_sep>corners_3d[1 :]<augadd>center[1]<line_sep>corners_3d[2 :]<augadd>center[2]<line_sep><return>np.transpose(corners_3d)<block_end><def_stmt>corners_to_boxes corners3d<block_start>""" 7 -------- 4 /| /| 6 -------- 5 . | | | | . 3 -------- 0 |/ |/ 2 -------- 1 Args: corners: (N, 8, 3), vertex order shown in figure above Returns: boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading] with (x, y, z) is the box center (dx, dy, dz) as the box size and heading as the clockwise rotation angle """<line_sep>boxes3d=np.zeros((corners3d.shape[0] 7))<for_stmt>i range(corners3d.shape[0])<block_start>boxes3d[i :3]=np.mean(corners3d[i : :] axis=0)<line_sep>boxes3d[i 3:6]=get_size(corners3d[i : :])<line_sep>boxes3d[i 6]=get_heading_angle(corners3d[i : :])<block_end><return>boxes3d<block_end><def_stmt>boxes_to_corners_3d boxes3d<block_start>""" 7 -------- 4 /| /| 6 -------- 5 . | | | | . 3 -------- 0 |/ |/ 2 -------- 1 Args: boxes3d: (N, 7) [x, y, z, dx, dy, dz, heading], (x, y, z) is the box center Returns: corners: (N, 8, 3) """<line_sep>template=np.array([[1 1 -1] [1 -1 -1] [-1 -1 -1] [-1 1 -1] [1 1 1] [1 -1 1] [-1 -1 1] [-1 1 1]])/2.<line_sep># corners3d: of shape (N, 3, 8) corners3d=np.tile(boxes3d[: <none> 3:6] (1 8 1))<times>template[<none> : :]<line_sep>corners3d=rotate_points_along_z(corners3d.reshape(-1 8 3) boxes3d[: 6]).reshape(-1 8 3)<line_sep>corners3d<augadd>boxes3d[: <none> 0:3]<line_sep><return>corners3d<block_end><def_stmt>points_in_boxes points boxes<block_start>""" Args: pc: np.array (n, 3+d) boxes: np.array (m, 8, 3) Returns: mask: np.array (n, m) of type bool """<if_stmt>len(boxes)<eq>0<block_start><return>np.zeros([points.shape[0] 1] dtype=np.bool)<block_end>points=points[: :3]# get xyz # u = p6 - p5 u=boxes[: 6 :]-boxes[: 5 :]# (m, 3) # v = p6 - p7 v=boxes[: 6 :]-boxes[: 7 :]# (m, 3) # w = p6 - p2 w=boxes[: 6 :]-boxes[: 2 :]# (m, 3) # ux, vx, wx ux=np.matmul(points u.T)# (n, m) vx=np.matmul(points v.T)<line_sep>wx=np.matmul(points w.T)<line_sep># up6, up5, vp6, vp7, wp6, wp2 up6=np.sum(u<times>boxes[: 6 :] axis=1)<line_sep>up5=np.sum(u<times>boxes[: 5 :] axis=1)<line_sep>vp6=np.sum(v<times>boxes[: 6 :] axis=1)<line_sep>vp7=np.sum(v<times>boxes[: 7 :] axis=1)<line_sep>wp6=np.sum(w<times>boxes[: 6 :] axis=1)<line_sep>wp2=np.sum(w<times>boxes[: 2 :] axis=1)<line_sep>mask_u=np.logical_and(ux<le>up6 ux<ge>up5)# (1024, n) mask_v=np.logical_and(vx<le>vp6 vx<ge>vp7)<line_sep>mask_w=np.logical_and(wx<le>wp6 wx<ge>wp2)<line_sep>mask=mask_u&mask_v&mask_w# (10240, n) <return>mask<block_end><def_stmt>poly_area x y<block_start>""" Ref: http://stackoverflow.com/questions/24467972/calculate-area-of-polygon-given-x-y-coordinates """<line_sep><return>0.5<times>np.abs(np.dot(x np.roll(y 1))-np.dot(y np.roll(x 1)))<block_end><def_stmt>polygon_clip subjectPolygon clipPolygon<block_start>""" Clip a polygon with another polygon. Ref: https://rosettacode.org/wiki/Sutherland-Hodgman_polygon_clipping#Python Args: subjectPolygon: a list of (x,y) 2d points, any polygon. clipPolygon: a list of (x,y) 2d points, has to be *convex* Note: **points have to be counter-clockwise ordered** Return: a list of (x,y) vertex point for the intersection polygon. """<def_stmt>inside p<block_start><return>(cp2[0]-cp1[0])<times>(p[1]-cp1[1])<g>(cp2[1]-cp1[1])<times>(p[0]-cp1[0])<block_end><def_stmt>computeIntersection <block_start>dc=[cp1[0]-cp2[0] cp1[1]-cp2[1]]<line_sep>dp=[s[0]-e[0] s[1]-e[1]]<line_sep>n1=cp1[0]<times>cp2[1]-cp1[1]<times>cp2[0]<line_sep>n2=s[0]<times>e[1]-s[1]<times>e[0]<line_sep>n3=1.0/(dc[0]<times>dp[1]-dc[1]<times>dp[0])<line_sep><return>[(n1<times>dp[0]-n2<times>dc[0])<times>n3 (n1<times>dp[1]-n2<times>dc[1])<times>n3]<block_end>outputList=subjectPolygon<line_sep>cp1=clipPolygon[-1]<for_stmt>clipVertex clipPolygon<block_start>cp2=clipVertex<line_sep>inputList=outputList<line_sep>outputList=[]<line_sep>s=inputList[-1]<for_stmt>subjectVertex inputList<block_start>e=subjectVertex<if_stmt>inside(e)<block_start><if_stmt><not>inside(s)<block_start>outputList.append(computeIntersection())<block_end>outputList.append(e)<block_end><elif_stmt>inside(s)<block_start>outputList.append(computeIntersection())<block_end>s=e<block_end>cp1=cp2<if_stmt>len(outputList)<eq>0<block_start><return><none><block_end><block_end><return>(outputList)<block_end><def_stmt>convex_hull_intersection p1 p2<block_start>""" Compute area of two convex hull's intersection area. p1,p2 are a list of (x,y) tuples of hull vertices. return a list of (x,y) for the intersection and its volume """<line_sep>inter_p=polygon_clip(p1 p2)<if_stmt>inter_p<is><not><none><block_start>hull_inter=scipy.spatial.ConvexHull(inter_p)<line_sep><return>inter_p hull_inter.volume<block_end><else_stmt><block_start><return><none> 0.0<block_end><block_end><def_stmt>box3d_vol corners<block_start>''' corners: (8,3) no assumption on axis direction '''<line_sep>a=np.sqrt(np.sum((corners[0 :]-corners[1 :])<power>2))<line_sep>b=np.sqrt(np.sum((corners[1 :]-corners[2 :])<power>2))<line_sep>c=np.sqrt(np.sum((corners[0 :]-corners[4 :])<power>2))<line_sep><return>a<times>b<times>c<block_end><def_stmt>box3d_iou corners1 corners2<block_start>''' Compute 3D bounding box IoU. Input: corners1: numpy array (8,3), assume up direction is negative Y corners2: numpy array (8,3), assume up direction is negative Y Output: iou: 3D bounding box IoU iou_2d: bird's eye view 2D bounding box IoU '''<line_sep># corner points are in counter clockwise order rect1=[(corners1[i 0] corners1[i 1])<for>i range(3 -1 -1)]<line_sep>rect2=[(corners2[i 0] corners2[i 1])<for>i range(3 -1 -1)]<line_sep>area1=poly_area(np.array(rect1)[: 0] np.array(rect1)[: 1])<line_sep>area2=poly_area(np.array(rect2)[: 0] np.array(rect2)[: 1])<line_sep>inter,inter_area=convex_hull_intersection(rect1 rect2)<line_sep>iou_2d=inter_area/(area1+area2-inter_area)<line_sep>ymax=min(corners1[: 2].max() corners2[: 2].max())<line_sep>ymin=max(corners1[: 2].min() corners2[: 2].min())<line_sep>inter_vol=inter_area<times>max(0.0 ymax-ymin)<line_sep>vol1=box3d_vol(corners1)<line_sep>vol2=box3d_vol(corners2)<line_sep>iou=inter_vol/(vol1+vol2-inter_vol)<line_sep><return>iou<block_end>
<import_stmt>argparse<import_from_stmt>pathlib Path<import_from_stmt>typing Any Dict List<import_stmt>os<import_from_stmt>dbt.config.profile DEFAULT_PROFILES_DIR<import_from_stmt>fal.run_scripts raise_for_run_results_failures run_scripts<import_from_stmt>fal.fal_script FalScript<import_from_stmt>faldbt.project DbtModel FalDbt FalGeneralException<def_stmt>create_fal_dbt args:argparse.Namespace<block_start>real_project_dir=os.path.realpath(os.path.normpath(args.project_dir))<line_sep>real_profiles_dir=<none><line_sep>env_profiles_dir=os.getenv("DBT_PROFILES_DIR")<if_stmt>args.profiles_dir<is><not><none><block_start>real_profiles_dir=os.path.realpath(os.path.normpath(args.profiles_dir))<block_end><elif_stmt>env_profiles_dir<block_start>real_profiles_dir=os.path.realpath(os.path.normpath(env_profiles_dir))<block_end><else_stmt><block_start>real_profiles_dir=DEFAULT_PROFILES_DIR<block_end><if_stmt>hasattr(args "state")<and>args.state<is><not><none><block_start>real_state=Path(os.path.realpath(os.path.normpath(args.state)))<block_end><else_stmt><block_start>real_state=<none><block_end><return>FalDbt(real_project_dir real_profiles_dir args.select args.exclude args.selector args.keyword args.threads real_state args.target )<block_end><def_stmt>fal_run args:argparse.Namespace<block_start>"Runs the fal run command in a subprocess"<line_sep>selector_flags=args.select<or>args.exclude<or>args.selector<if_stmt>args.all<and>selector_flags<block_start><raise>FalGeneralException("Cannot pass --all flag alongside selection flags (--select/--models, --exclude, --selector)")<block_end>faldbt=create_fal_dbt(args)<line_sep>models=_get_filtered_models(faldbt args.all selector_flags args.before)<line_sep>scripts=_select_scripts(args models faldbt)<if_stmt>args.before<block_start><if_stmt><not>_scripts_flag(args)# run globals when no --script is passed <block_start>_run_global_scripts(faldbt args.before)<block_end>results=run_scripts(scripts faldbt)<line_sep>raise_for_run_results_failures(scripts results)<block_end><else_stmt><block_start>results=run_scripts(scripts faldbt)<line_sep>raise_for_run_results_failures(scripts results)<if_stmt><not>_scripts_flag(args)# run globals when no --script is passed <block_start>_run_global_scripts(faldbt args.before)<block_end><block_end><block_end><def_stmt>_scripts_flag args:argparse.Namespace<arrow>bool<block_start><return>bool(args.scripts)<block_end><def_stmt>_select_scripts args:argparse.Namespace models:List[DbtModel] faldbt:FalDbt<arrow>List[FalScript]<block_start>scripts=[]<line_sep>scripts_flag=_scripts_flag(args)<for_stmt>model models<block_start>model_scripts=model.get_scripts(args.keyword bool(args.before))<for_stmt>path model_scripts<block_start><if_stmt><not>scripts_flag# run all scripts when no --script is passed <block_start>scripts.append(FalScript(faldbt model path))<block_end><elif_stmt>path<in>args.scripts# if --script selector is there only run selected scripts <block_start>scripts.append(FalScript(faldbt model path))<block_end><block_end><block_end><return>scripts<block_end><def_stmt>_run_global_scripts faldbt:FalDbt is_before:bool<block_start>global_scripts=list(map(<lambda>path:FalScript(faldbt <none> path) faldbt._global_script_paths["before"<if>is_before<else>"after"] ))<line_sep>results=run_scripts(global_scripts faldbt)<line_sep>raise_for_run_results_failures(global_scripts results)<block_end><def_stmt>_get_models_with_keyword faldbt:FalDbt<arrow>List[DbtModel]<block_start><return>list(filter(<lambda>model:faldbt.keyword<in>model.meta faldbt.list_models()))<block_end><def_stmt>_get_filtered_models faldbt:FalDbt all selected before<arrow>List[DbtModel]<block_start>selected_ids=_models_ids(faldbt._compile_task._flattened_nodes)<line_sep>filtered_models:List[DbtModel]=[]<if_stmt>(<not>all<and><not>selected<and><not>before<and>faldbt._run_results.nativeRunResult<is><none>)<block_start><import_from_stmt>faldbt.parse FalParseError<line_sep><raise>FalParseError("Cannot define models to run without selection flags or dbt run_results artifact or --before flag")<block_end>models=_get_models_with_keyword(faldbt)<for_stmt>node models<block_start><if_stmt>selected<block_start><if_stmt>node.unique_id<in>selected_ids<block_start>filtered_models.append(node)<block_end><block_end><elif_stmt>before<block_start><if_stmt>node.get_scripts(faldbt.keyword before)<ne>[]<block_start>filtered_models.append(node)<block_end><block_end><elif_stmt>all<block_start>filtered_models.append(node)<block_end><elif_stmt>node.status<ne>"skipped"<block_start>filtered_models.append(node)<block_end><block_end><return>filtered_models<block_end><def_stmt>_models_ids models<block_start><return>list(map(<lambda>r:r.unique_id models))<block_end>
<import_stmt>logging<line_sep>logger=logging.getLogger(__name__)<def_stmt>make_file_safe_api_name api_name<block_start>"""Make an api name safe for use in a file name"""<line_sep><return>"".join([c<for>c api_name<if>c.isalpha()<or>c.isdigit()<or>c<in>("." "_" "-")])<block_end>
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. # coding: utf-8 """Classes for registering and storing bijection/transformations from unconstrained space to a given domain. """<import_from_stmt>numbers Number<import_from_stmt>.transformation ExpTransform AffineTransform SigmoidTransform ComposeTransform <import_from_stmt>..distributions.constraint Constraint Positive GreaterThan GreaterThanEq LessThan Interval HalfOpenInterval <line_sep>__all__=['domain_map' 'biject_to' 'transform_to']<class_stmt>domain_map()<block_start>""" Abstract Class for registering and storing mappings from domain to bijections/transformations """<def_stmt>__init__ self# constraint -> constraint -> transformation <block_start>self._storage={}<line_sep>super(domain_map self).__init__()<block_end><def_stmt>register self constraint factory=<none><block_start>"""Register a bijection/transformation from unconstrained space to the domain specified by `constraint`. Parameters ---------- constraint : Type or Object A class of constraint or an object of constraint factory : callable A function that outputs a `transformation` given a `constraint`, by default None. """<line_sep># Decorator mode <if_stmt>factory<is><none><block_start><return><lambda>factory:self.register(constraint factory)<block_end><if_stmt>isinstance(constraint Constraint)<block_start>constraint=type(constraint)<block_end><if_stmt><not>isinstance(constraint type)<or><not>issubclass(constraint Constraint)<block_start><raise>TypeError('Expected constraint to be either a Constraint subclass or instance, '<concat>'but got {}'.format(constraint))<block_end>self._storage[constraint]=factory<line_sep><return>factory<block_end><def_stmt>__call__ self constraint<block_start><try_stmt><block_start>factory=self._storage[type(constraint)]<block_end><except_stmt>KeyError<block_start><raise>NotImplementedError('Cannot transform {} constraints'.format(type(constraint).__name__))<block_end><return>factory(constraint)<block_end><block_end>biject_to=domain_map()<line_sep>transform_to=domain_map()<line_sep>@biject_to.register(Positive)@transform_to.register(Positive)<def_stmt>_transform_to_positive constraint# Although `constraint` is not used in this factory function, # we decide to keep it for the purpose of consistency. # pylint: disable=unused-argument <block_start><return>ExpTransform()<block_end>@biject_to.register(GreaterThan)@biject_to.register(GreaterThanEq)@transform_to.register(GreaterThan)@transform_to.register(GreaterThanEq)<def_stmt>_transform_to_greater_than constraint<block_start><return>ComposeTransform([ExpTransform() AffineTransform(constraint._lower_bound 1)])<block_end>@biject_to.register(LessThan)@transform_to.register(LessThan)<def_stmt>_transform_to_less_than constraint<block_start><return>ComposeTransform([ExpTransform() AffineTransform(constraint._upper_bound -1)])<block_end>@biject_to.register(Interval)@biject_to.register(HalfOpenInterval)@transform_to.register(Interval)@transform_to.register(HalfOpenInterval)<def_stmt>_transform_to_interval constraint# Handle the special case of the unit interval. <block_start>lower_is_0=isinstance(constraint._lower_bound Number)<and>constraint._lower_bound<eq>0<line_sep>upper_is_1=isinstance(constraint._upper_bound Number)<and>constraint._upper_bound<eq>1<if_stmt>lower_is_0<and>upper_is_1<block_start><return>SigmoidTransform()<block_end>loc=constraint._lower_bound<line_sep>scale=constraint._upper_bound-constraint._lower_bound<line_sep><return>ComposeTransform([SigmoidTransform() AffineTransform(loc scale)])<block_end>
#! /usr/bin/env python # -*- coding: utf-8 -*- """ Readqc report: record stat key-value in readqc-stats.txt ### JGI_Analysis_Utility_Illumina::illumina_read_level_report Created: Jul 24 2013 sulsj (<EMAIL>) """<import_stmt>os<import_stmt>sys<line_sep>## custom libs in "../lib/" srcDir=os.path.dirname(__file__)<line_sep>sys.path.append(os.path.join(srcDir 'tools'))## ./tools sys.path.append(os.path.join(srcDir '../lib'))## rqc-pipeline/lib sys.path.append(os.path.join(srcDir '../tools'))## rqc-pipeline/tools <import_from_stmt>readqc_constants RQCReadQcConfig ReadqcStats<import_from_stmt>rqc_constants RQCExitCodes<import_from_stmt>os_utility run_sh_command<import_from_stmt>common append_rqc_stats append_rqc_file<line_sep>statsFile=RQCReadQcConfig.CFG["stats_file"]<line_sep>filesFile=RQCReadQcConfig.CFG["files_file"]<line_sep>""" Title : read_megablast_hits Function : This function generates tophit list of megablast against different databases. Usage : read_megablast_hits(db_name, log) Args : blast db name or full path Returns : SUCCESS FAILURE Comments : """<def_stmt>read_megablast_hits db log<block_start>currentDir=RQCReadQcConfig.CFG["output_path"]<line_sep>megablastDir="megablast"<line_sep>megablastPath=os.path.join(currentDir megablastDir)<line_sep>statsFile=RQCReadQcConfig.CFG["stats_file"]<line_sep>filesFile=RQCReadQcConfig.CFG["files_file"]<line_sep>## ## Process blast output files ## matchings=0<line_sep>hitCount=0<line_sep>parsedFile=os.path.join(megablastPath "megablast.*.%s*.parsed"%(db))<line_sep>matchings,_,exitCode=run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l "%(parsedFile) <true> log)<if_stmt>exitCode<eq>0## if parsed file found. <block_start>t=matchings.split()<if_stmt>len(t)<eq>1<and>t[0].isdigit()<block_start>hitCount=int(t[0])<block_end>append_rqc_stats(statsFile ReadqcStats.ILLUMINA_READ_MATCHING_HITS+" "+db hitCount log)<line_sep>## ## add .parsed file ## parsedFileFound,_,exitCode=run_sh_command("ls %s"%(parsedFile) <true> log)<if_stmt>parsedFileFound<block_start>parsedFileFound=parsedFileFound.strip()<line_sep>append_rqc_file(filesFile ReadqcStats.ILLUMINA_READ_PARSED_FILE+" "+db os.path.join(megablastPath parsedFileFound) log)<block_end><else_stmt><block_start>log.error("- Failed to add megablast parsed file of %s."%(db))<line_sep><return>RQCExitCodes.JGI_FAILURE<block_end>## ## wc the top hits ## topHit=0<line_sep>tophitFile=os.path.join(megablastPath "megablast.*.%s*.parsed.tophit"%(db))<line_sep>tophits,_,exitCode=run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l "%(tophitFile) <true> log)<line_sep>t=tophits.split()<if_stmt>len(t)<eq>1<and>t[0].isdigit()<block_start>topHit=int(t[0])<block_end>append_rqc_stats(statsFile ReadqcStats.ILLUMINA_READ_TOP_HITS+" "+db topHit log)<line_sep>## ## wc the taxonomic species ## spe=0<line_sep>taxlistFile=os.path.join(megablastPath "megablast.*.%s*.parsed.taxlist"%(db))<line_sep>species,_,exitCode=run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l "%(taxlistFile) <true> log)<line_sep>t=species.split()<if_stmt>len(t)<eq>1<and>t[0].isdigit()<block_start>spe=int(t[0])<block_end>append_rqc_stats(statsFile ReadqcStats.ILLUMINA_READ_TAX_SPECIES+" "+db spe log)<line_sep>## ## wc the top 100 hit ## top100hits=0<line_sep>top100hitFile=os.path.join(megablastPath "megablast.*.%s*.parsed.top100hit"%(db))<line_sep>species,_,exitCode=run_sh_command("grep -v '^#' %s 2>/dev/null | wc -l "%(top100hitFile) <true> log)<line_sep>t=species.split()<if_stmt>len(t)<eq>1<and>t[0].isdigit()<block_start>top100hits=int(t[0])<block_end>append_rqc_stats(statsFile ReadqcStats.ILLUMINA_READ_TOP_100HITS+" "+db top100hits log)<line_sep>## ## Find and add taxlist file ## taxListFound,_,exitCode=run_sh_command("ls %s"%(taxlistFile) <true> log)<line_sep>taxListFound=taxListFound.strip()<if_stmt>taxListFound<block_start>append_rqc_file(filesFile ReadqcStats.ILLUMINA_READ_TAXLIST_FILE+" "+db os.path.join(megablastPath taxListFound) log)<block_end><else_stmt><block_start>log.error("- Failed to add megablast taxlist file of %s."%(db))<line_sep><return>RQCExitCodes.JGI_FAILURE<block_end>## ## Find and add tophit file ## tophitFound,_,exitCode=run_sh_command("ls %s"%(tophitFile) <true> log)<line_sep>tophitFound=tophitFound.strip()<if_stmt>tophitFound<block_start>append_rqc_file(filesFile ReadqcStats.ILLUMINA_READ_TOPHIT_FILE+" "+db os.path.join(megablastPath tophitFound) log)<block_end><else_stmt><block_start>log.error("- Failed to add megablast tophit file of %s."%(db))<line_sep><return>RQCExitCodes.JGI_FAILURE<block_end>## ## Find and add top100hit file ## top100hitFound,_,exitCode=run_sh_command("ls %s"%(top100hitFile) <true> log)<line_sep>top100hitFound=top100hitFound.strip()<if_stmt>top100hitFound<block_start>append_rqc_file(filesFile ReadqcStats.ILLUMINA_READ_TOP100HIT_FILE+" "+db os.path.join(megablastPath top100hitFound) log)<block_end><else_stmt><block_start>log.error("- Failed to add megablast top100hit file of %s."%(db))<line_sep><return>RQCExitCodes.JGI_FAILURE<block_end><block_end><else_stmt><block_start>log.info("- No blast hits for %s."%(db))<block_end><return>RQCExitCodes.JGI_SUCCESS<block_end>""" Title : read_level_qual_stats Function : Generate qual scores and plots of read level 20mer sampling Usage : read_level_mer_sampling($analysis, $summary_file_dir) Args : 1) A reference to an JGI_Analysis object 2) current working folder wkdir/uniqueness Returns : JGI_SUCCESS: Illumina read level report could be successfully generated. JGI_FAILURE: Illumina read level report could not be generated. Comments : This function is intended to be called at the very end of the illumina read level data processing script. """<def_stmt>read_level_mer_sampling dataToRecordDict dataFile log<block_start>retCode=RQCExitCodes.JGI_FAILURE<line_sep>## Old data #nSeq nStartUniMer fracStartUniMer nRandUniMer fracRandUniMer ## 0 1 2 3 4 ##25000 2500 0.1 9704 0.3882 ## New data #count first rand first_cnt rand_cnt # 0 1 2 3 4 #25000 66.400 76.088 16600 19022 #50000 52.148 59.480 13037 14870 #75000 46.592 53.444 11648 13361 #100000 43.072 49.184 10768 12296 ... <if_stmt>os.path.isfile(dataFile)<block_start><with_stmt>open(dataFile "r")<as>merFH<block_start>lines=merFH.readlines()<line_sep>## last line t=lines[-1].split('\t')<line_sep># breaks 2016-09-07 #assert len(t) == 5 totalMers=int(t[0])<line_sep>## new by bbcountunique uniqStartMerPer=float("%.2f"%(float(t[1])))<line_sep>uniqRandtMerPer=float("%.2f"%(float(t[2])))<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_SAMPLE_SIZE]=totalMers<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_PERCENTAGE_STARTING_MERS]=uniqStartMerPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_20MER_PERCENTAGE_RANDOM_MERS]=uniqRandtMerPer<line_sep>retCode=RQCExitCodes.JGI_SUCCESS<block_end><block_end><else_stmt><block_start>log.error("- qhist file not found: %s"%(dataFile))<block_end><return>retCode<block_end>""" Title : base_level_qual_stats Function : Generate qual scores and plots of read level QC Usage : base_level_qual_stats($analysis, $) Args : 1) A reference to an JGI_Analysis object 2) current working folder wkdir/qual Returns : JGI_SUCCESS: Illumina read level report could be successfully generated. JGI_FAILURE: Illumina read level report could not be generated. Comments : This function is intended to be called at the very end of the illumina base level data processing script. """<def_stmt>base_level_qual_stats dataToRecordDict reformatObqhistFile log<block_start>cummlatPer=0<line_sep>cummlatBase=0<line_sep>statsPerc={30:0 25:0 20:0 15:0 10:0 5:0}<line_sep>statsBase={30:0 25:0 20:0 15:0 10:0 5:0}<line_sep>Q30_seen=0<line_sep>Q25_seen=0<line_sep>Q20_seen=0<line_sep>Q15_seen=0<line_sep>Q10_seen=0<line_sep>Q5_seen=0<line_sep>## New format ##Median 38 ##Mean 37.061 ##STDev 4.631 ##Mean_30 37.823 ##STDev_30 1.699 ##Quality bases fraction #0 159 0.00008 #1 0 0.00000 #2 12175 0.00593 #3 0 0.00000 #4 0 0.00000 #5 0 0.00000 #6 0 0.00000 allLines=open(reformatObqhistFile).readlines()<for_stmt>l allLines[::-1]<block_start>l=l.strip()<line_sep>## ## obqhist file format example ## # #Median 36 # #Mean 33.298 # #STDev 5.890 # #Mean_30 35.303 # #STDev_30 1.517 # #Quality bases fraction # 0 77098 0.00043 # 1 0 0.00000 # 2 0 0.00000 # 3 0 0.00000 # 4 0 0.00000 # 5 0 0.00000 # 6 0 0.00000 <if_stmt>len(l)<g>0<block_start><if_stmt>l.startswith("#")<block_start><if_stmt>l.startswith("#Mean_30")<block_start>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30_SCORE_MEAN]=l.split('\t')[1]<block_end><elif_stmt>l.startswith("#STDev_30")<block_start>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30_SCORE_STD]=l.split('\t')[1]<block_end><elif_stmt>l.startswith("#Mean")<block_start>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_OVERALL_BASES_Q_SCORE_MEAN]=l.split('\t')[1]<block_end><elif_stmt>l.startswith("#STDev")<block_start>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_OVERALL_BASES_Q_SCORE_STD]=l.split('\t')[1]<block_end><continue><block_end>qavg=<none><line_sep>nbase=<none><line_sep>percent=<none><line_sep>t=l.split()<try_stmt><block_start>qavg=int(t[0])<line_sep>nbase=int(t[1])<line_sep>percent=float(t[2])<block_end><except_stmt>IndexError<block_start>log.warn("parse error in base_level_qual_stats: %s %s %s %s"%(l qavg nbase percent))<line_sep><continue><block_end>log.debug("base_level_qual_stats(): qavg and nbase and percent: %s %s %s"%(qavg nbase percent))<line_sep>cummlatPer<augadd>percent<times>100.0<line_sep>cummlatPer=float("%.f"%(cummlatPer))<if_stmt>cummlatPer<g>100<block_start>cummlatPer=100.0<block_end>## RQC-621 cummlatBase<augadd>nbase<if_stmt>qavg<eq>30<block_start>Q30_seen=1<line_sep>statsPerc[30]=cummlatPer<line_sep>statsBase[30]=cummlatBase<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q30]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C30]=cummlatBase<block_end><elif_stmt>qavg<eq>25<block_start>Q25_seen=1<line_sep>statsPerc[25]=cummlatPer<line_sep>statsBase[25]=cummlatBase<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q25]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C25]=cummlatBase<block_end><elif_stmt>qavg<eq>20<block_start>Q20_seen=1<line_sep>statsPerc[20]=cummlatPer<line_sep>statsBase[20]=cummlatBase<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q20]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C20]=cummlatBase<block_end><elif_stmt>qavg<eq>15<block_start>Q15_seen=1<line_sep>statsPerc[15]=cummlatPer<line_sep>statsBase[15]=cummlatBase<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q15]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C15]=cummlatBase<block_end><elif_stmt>qavg<eq>10<block_start>Q10_seen=1<line_sep>statsPerc[10]=cummlatPer<line_sep>statsBase[10]=cummlatBase<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q10]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C10]=cummlatBase<block_end><elif_stmt>qavg<eq>5<block_start>Q5_seen=1<line_sep>statsPerc[5]=cummlatPer<line_sep>statsBase[5]=cummlatBase<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q5]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C5]=cummlatBase<block_end><block_end><block_end>## Double check that no value is missing. <if_stmt>Q25_seen<eq>0<and>Q30_seen<ne>0<block_start>Q25_seen=1<line_sep>statsPerc[25]=statsPerc[30]<line_sep>statsBase[25]=statsBase[30]<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q25]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C25]=cummlatBase<block_end><if_stmt>Q20_seen<eq>0<and>Q25_seen<ne>0<block_start>Q20_seen=1<line_sep>statsPerc[20]=statsPerc[25]<line_sep>statsBase[20]=statsBase[25]<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q20]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C20]=cummlatBase<block_end><if_stmt>Q15_seen<eq>0<and>Q20_seen<ne>0<block_start>Q15_seen=1<line_sep>statsPerc[15]=statsPerc[20]<line_sep>statsBase[15]=statsBase[20]<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q15]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C15]=cummlatBase<block_end><if_stmt>Q10_seen<eq>0<and>Q15_seen<ne>0<block_start>Q10_seen=1<line_sep>statsPerc[10]=statsPerc[15]<line_sep>statsBase[10]=statsBase[15]<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q10]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C10]=cummlatBase<block_end><if_stmt>Q5_seen<eq>0<and>Q10_seen<ne>0<block_start>Q5_seen=1<line_sep>statsPerc[5]=statsPerc[10]<line_sep>statsBase[5]=statsBase[10]<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_Q5]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_BASE_C5]=cummlatBase<block_end><if_stmt>Q30_seen<eq>0<block_start>log.error("Q30 is 0. Base quality values are ZERO.")<block_end>log.debug("Q and C values: %s"%(dataToRecordDict))<line_sep><return>RQCExitCodes.JGI_SUCCESS<block_end>""" Title : q20_score Function : this method returns Q20 using a qrpt file as input Usage : JGI_QC_Utility::qc20_score($qrpt) Args : $_[0] : qrpt file. Returns : a number of Q20 score Comments : """<line_sep># def q20_score(qrpt, log): # log.debug("qrpt file %s" % (qrpt)) # # q20 = None # num = 0 # # if os.path.isfile(qrpt): # with open(qrpt, "r") as qrptFH: # for l in qrptFH: # num += 1 # # if num == 1: # continue # # ############## # ## Old format # ## READ1.qrpt # ## column count min max sum mean Q1 med Q3 IQR lW rW A_Count C_Count G_Count T_Count N_Count Max_count # ## 1 378701 2 34 12447306 32.87 31 34 34 3 27 34 108573 83917 81999 104127 85 378701 # ## 2 378701 2 34 12515957 33.05 33 34 34 1 32 34 112178 83555 84449 98519 0 378701 # ## 3 378701 2 34 12519460 33.06 33 34 34 1 32 34 104668 72341 80992 120700 0 378701 # ## 4 378701 2 37 13807944 36.46 37 37 37 0 37 37 96935 95322 83958 102440 46 378701 # ## 5 378701 2 37 13790443 36.42 37 37 37 0 37 37 114586 68297 78020 117740 58 378701 # ## # ## or # ## # ## READ2.qrpt # ## column count min max sum mean Q1 med Q3 IQR lW rW A_Count C_Count G_Count T_Count N_Count Max_count # ## 1 378701 2 34 8875097 23.44 25 26 28 3 21 32 106904 84046 81795 105956 0 378701 # ## 2 378701 2 34 6543224 17.28 15 16 26 11 2 34 107573 77148 97953 88998 7029 378701 # ## 3 378701 2 34 7131741 18.83 16 16 26 10 2 34 96452 83003 107891 91355 0 378701 # ## 4 378701 2 37 9686653 25.58 19 32 33 14 2 37 97835 78304 87944 114618 0 378701 # ## 5 378701 2 37 10208226 26.96 25 33 35 10 10 37 98021 90611 89040 101029 0 378701 # # pos = None # mean = None # t = l.split("\t") # assert len(t) > 6 # pos = int(t[0]) # mean = float(t[5]) # # if mean and pos: # if mean < 20: # return pos - 1 # else: # q20 = pos # # else: # log.error("- qhist file not found: %s" % (qrpt)) # return None # # # return q20 <def_stmt>q20_score_new bqHist readNum log<block_start>log.debug("q20_score_new(): bqHist file = %s"%(bqHist))<line_sep>q20=<none><if_stmt>os.path.isfile(bqHist)<block_start><with_stmt>open(bqHist "r")<as>qrptFH<block_start><for_stmt>l qrptFH<block_start><if_stmt>l.startswith('#')<block_start><continue><block_end>## New data # 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 ##BaseNum count_1 min_1 max_1 mean_1 Q1_1 med_1 Q3_1 LW_1 RW_1 count_2 min_2 max_2 mean_2 Q1_2 med_2 Q3_2 LW_2 RW_2 # 0 6900 0 36 33.48 33 34 34 29 36 6900 0 36 33.48 33 34 34 29 36 pos=<none><line_sep>mean=<none><line_sep>t=l.split("\t")<line_sep>pos=int(t[0])+1<if_stmt>readNum<eq>1<block_start>mean=float(t[4])<block_end><else_stmt><block_start>mean=float(t[13])<block_end><if_stmt>mean<and>pos<block_start><if_stmt>mean<l>20<block_start><return>pos-1<block_end><else_stmt><block_start>q20=pos<block_end><block_end><block_end><block_end><block_end><else_stmt><block_start>log.error("- bqHist file not found: %s"%(bqHist))<line_sep><return><none><block_end><return>q20<block_end>""" Title : read_level_qual_stats Function : Generate qual scores and plots of read level QC Usage : read_level_qual_stats($analysis, $) Args : 1) A reference to an JGI_Analysis object 2) current working folder wkdir/qual Returns : JGI_SUCCESS: Illumina read level report could be successfully generated. JGI_FAILURE: Illumina read level report could not be generated. Comments : This function is intended to be called at the very end of the illumina read level data processing script. """<def_stmt>read_level_qual_stats dataToRecordDict qhistTxtFullPath log<block_start>retCode=RQCExitCodes.JGI_FAILURE<line_sep>cummlatPer=0.0<line_sep>Q30_seen=0<line_sep>Q25_seen=0<line_sep>Q20_seen=0<line_sep>Q15_seen=0<line_sep>Q10_seen=0<line_sep>Q5_seen=0<if_stmt>os.path.isfile(qhistTxtFullPath)<block_start>stats={30:0 25:0 20:0 15:0 10:0 5:0}<line_sep>allLines=open(qhistTxtFullPath).readlines()<for_stmt>l allLines[::-1]<block_start><if_stmt><not>l<block_start><break><block_end><if_stmt>l.startswith('#')<block_start><continue><block_end>t=l.split()<assert_stmt>len(t)<eq>3<line_sep>qavg=int(t[0])<line_sep>percent=float(t[2])<times>100.0## 20140826 Changed for bbtools cummlatPer=cummlatPer+percent<line_sep>cummlatPer=float("%.2f"%cummlatPer)<if_stmt>qavg<le>30<and>qavg<g>25<and>Q30_seen<eq>0<block_start>Q30_seen=1<line_sep>stats[30]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q30]=cummlatPer<block_end><elif_stmt>qavg<le>25<and>qavg<g>20<and>Q25_seen<eq>0<block_start>Q25_seen=1<line_sep>stats[25]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q25]=cummlatPer<block_end><elif_stmt>qavg<le>20<and>qavg<g>15<and>Q20_seen<eq>0<block_start>Q20_seen=1<line_sep>stats[20]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q20]=cummlatPer<block_end><elif_stmt>qavg<le>15<and>qavg<g>10<and>Q15_seen<eq>0<block_start>Q15_seen=1<line_sep>stats[15]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q15]=cummlatPer<block_end><elif_stmt>qavg<le>10<and>qavg<g>5<and>Q10_seen<eq>0<block_start>Q10_seen=1<line_sep>stats[10]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q10]=cummlatPer<block_end><elif_stmt>qavg<le>5<and>Q5_seen<eq>0<block_start>Q5_seen=1<line_sep>stats[5]=cummlatPer<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q5]=cummlatPer<block_end><block_end>### Double check that no value is missing. <if_stmt>Q25_seen<eq>0<and>Q30_seen<ne>0<block_start>Q25_seen=1<line_sep>stats[25]=stats[30]<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q25]=cummlatPer<block_end><if_stmt>Q20_seen<eq>0<and>Q25_seen<ne>0<block_start>Q20_seen=1<line_sep>stats[20]=stats[25]<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q20]=cummlatPer<block_end><if_stmt>Q15_seen<eq>0<and>Q20_seen<ne>0<block_start>Q15_seen=1<line_sep>stats[15]=stats[20]<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q15]=cummlatPer<block_end><if_stmt>Q10_seen<eq>0<and>Q15_seen<ne>0<block_start>Q10_seen=1<line_sep>stats[10]=stats[15]<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q10]=cummlatPer<block_end><if_stmt>Q5_seen<eq>0<and>Q10_seen<ne>0<block_start>Q5_seen=1<line_sep>stats[5]=stats[10]<line_sep>dataToRecordDict[ReadqcStats.ILLUMINA_READ_Q5]=cummlatPer<block_end><if_stmt>Q30_seen<eq>0<block_start>log.error("Q30 is 0 . Read quality values are ZERO.")<block_end>log.debug("Q30 %s, Q25 %s, Q20 %s, Q15 %s, Q10 %s, Q5 %s"%(stats[30] stats[25] stats[20] stats[15] stats[10] stats[5]))<line_sep>retCode=RQCExitCodes.JGI_SUCCESS<block_end><else_stmt><block_start>log.error("- qhist file not found: %s"%(qhistTxtFullPath))<block_end><return>retCode<block_end>""" Title : read_gc_mean Function : This function generates average GC content % and its standard deviation and put them into database. Usage : read_gc_mean($analysis) Args : 1) A reference to an JGI_Analysis object Returns : JGI_SUCCESS: JGI_FAILURE: Comments : """<def_stmt>read_gc_mean histFile log<block_start>mean=0.0<line_sep>stdev=0.0<line_sep>retCode=RQCExitCodes.JGI_FAILURE<if_stmt>os.path.isfile(histFile)<block_start><with_stmt>open(histFile "r")<as>histFH<block_start>line=histFH.readline()## we only need the first line # Ex) #Found 1086 total values totalling 420.3971. <0.387106 +/- 0.112691> <if_stmt>len(line)<eq>0<or><not>line.startswith("#Found")<block_start>log.error("- GC content hist text file does not contains right results: %s, %s"%(histFile line))<line_sep>retCode=RQCExitCodes.JGI_FAILURE<block_end><else_stmt><block_start>toks=line.split()<assert_stmt>len(toks)<eq>9<line_sep>mean=float(toks[6][1:])<times>100.0<line_sep>stdev=float(toks[8][:-1])<times>100.0<line_sep>log.debug("mean, stdev = %.2f, %.2f"%(mean stdev))<block_end>retCode=RQCExitCodes.JGI_SUCCESS<block_end><block_end><else_stmt><block_start>log.error("- gc hist file not found: %s"%(histFile))<block_end><return>retCode mean stdev<block_end><if_stmt>__name__<eq>"__main__"<block_start>exit(0)<block_end>## EOF
""" style fixes and lookups for templates """<import_from_stmt>unittest.mock patch<import_from_stmt>django.test TestCase<import_from_stmt>bookwyrm models<import_from_stmt>bookwyrm.templatetags notification_page_tags<line_sep>@patch("bookwyrm.activitystreams.add_status_task.delay")@patch("bookwyrm.activitystreams.remove_status_task.delay")<class_stmt>NotificationPageTags(TestCase)<block_start>"""lotta different things here"""<def_stmt>setUp self<block_start>"""create some filler objects"""<with_stmt>patch("bookwyrm.suggested_users.rerank_suggestions_task.delay") patch("bookwyrm.activitystreams.populate_stream_task.delay") patch("bookwyrm.lists_stream.populate_lists_task.delay")<block_start>self.user=models.User.objects.create_user("<EMAIL>" "<EMAIL>" "mouseword" local=<true> localname="mouse" )<block_end><block_end><def_stmt>test_related_status self *_<block_start>"""gets the subclass model for a notification status"""<with_stmt>patch("bookwyrm.models.activitypub_mixin.broadcast_task.apply_async")<block_start>status=models.Status.objects.create(content="hi" user=self.user)<block_end>notification=models.Notification.objects.create(user=self.user notification_type="MENTION" related_status=status)<line_sep>result=notification_page_tags.related_status(notification)<line_sep>self.assertIsInstance(result models.Status)<block_end><block_end>
# -*- coding: utf-8 -*- <import_stmt>tensorflow<as>tf<line_sep># 【该方法测试的时候使用】返回一个方法。这个方法根据输入的值,得到对应的索引,再得到这个词的embedding. <def_stmt>extract_argmax_and_embed embedding output_projection=<none><block_start>""" Get a loop_function that extracts the previous symbol and embeds it. Used by decoder. :param embedding: embedding tensor for symbol :param output_projection: None or a pair (W, B). If provided, each fed previous output will first be multiplied by W and added B. :return: A loop function """<def_stmt>loop_function prev _<block_start><if_stmt>output_projection<is><not><none><block_start>prev=tf.matmul(prev output_projection[0])+output_projection[1]<block_end>prev_symbol=tf.argmax(prev 1)#得到对应的INDEX emb_prev=tf.gather(embedding prev_symbol)#得到这个INDEX对应的embedding <return>emb_prev<block_end><return>loop_function<block_end># RNN的解码部分。 # 如果是训练,使用训练数据的输入;如果是test,将t时刻的输出作为t+1时刻的s输入 <def_stmt>rnn_decoder_with_attention decoder_inputs initial_state cell loop_function attention_states scope=<none>#3D Tensor [batch_size x attn_length x attn_size] <block_start>"""RNN decoder for the sequence-to-sequence model. Args: decoder_inputs: A list of 2D Tensors [batch_size x input_size].it is decoder input. initial_state: 2D Tensor with shape [batch_size x cell.state_size].it is the encoded vector of input sentences, which represent 'thought vector' cell: core_rnn_cell.RNNCell defining the cell function and size. loop_function: If not None, this function will be applied to the i-th output in order to generate the i+1-st input, and decoder_inputs will be ignored, except for the first element ("GO" symbol). This can be used for decoding, but also for training to emulate http://arxiv.org/abs/1506.03099. Signature -- loop_function(prev, i) = next * prev is a 2D Tensor of shape [batch_size x output_size], * i is an integer, the step number (when advanced control is needed), * next is a 2D Tensor of shape [batch_size x input_size]. attention_states: 3D Tensor [batch_size x attn_length x attn_size].it is represent input X. scope: VariableScope for the created subgraph; defaults to "rnn_decoder". Returns: A tuple of the form (outputs, state), where: outputs: A list of the same length as decoder_inputs of 2D Tensors with shape [batch_size x output_size] containing generated outputs. state: The state of each cell at the final time-step. It is a 2D Tensor of shape [batch_size x cell.state_size]. (Note that in some cases, like basic RNN cell or GRU cell, outputs and states can be the same. They are different for LSTM cells though.) """<with_stmt>tf.variable_scope(scope<or>"rnn_decoder")<block_start>print("rnn_decoder_with_attention started...")<line_sep>state=initial_state#[batch_size x cell.state_size]. _,hidden_size=state.get_shape().as_list()#200 attention_states_original=attention_states<line_sep>batch_size,sequence_length,_=attention_states.get_shape().as_list()<line_sep>outputs=[]<line_sep>prev=<none><line_sep>################################################# <for_stmt>i,inp enumerate(decoder_inputs)#循环解码部分的输入。如sentence_length个[batch_size x input_size] # 如果是训练,使用训练数据的输入;如果是test, 将t时刻的输出作为t + 1 时刻的s输入 <block_start><if_stmt>loop_function<is><not><none><and>prev<is><not><none>#测试的时候:如果loop_function不为空且前一个词的值不为空,那么使用前一个的值作为RNN的输入 <block_start><with_stmt>tf.variable_scope("loop_function" reuse=<true>)<block_start>inp=loop_function(prev i)<block_end><block_end><if_stmt>i<g>0<block_start>tf.get_variable_scope().reuse_variables()<block_end>##ATTENTION################################################################################################################################################# # 1.get logits of attention for each encoder input. attention_states:[batch_size x attn_length x attn_size]; query=state:[batch_size x cell.state_size] query=state<line_sep>W_a=tf.get_variable("W_a" shape=[hidden_size hidden_size] initializer=tf.random_normal_initializer(stddev=0.1))<line_sep>query=tf.matmul(query W_a)#[batch_size,hidden_size] query=tf.expand_dims(query axis=1)#[batch_size, 1, hidden_size] U_a=tf.get_variable("U_a" shape=[hidden_size hidden_size] initializer=tf.random_normal_initializer(stddev=0.1))<line_sep>U_aa=tf.get_variable("U_aa" shape=[hidden_size])<line_sep>attention_states=tf.reshape(attention_states shape=(-1 hidden_size))#[batch_size*sentence_length,hidden_size] attention_states=tf.matmul(attention_states U_a)#[batch_size*sentence_length,hidden_size] #print("batch_size",batch_size," ;sequence_length:",sequence_length," ;hidden_size:",hidden_size) #print("attention_states:", attention_states) #(?, 200) attention_states=tf.reshape(attention_states shape=(-1 sequence_length hidden_size))# TODO [batch_size,sentence_length,hidden_size] #query_expanded: [batch_size,1, hidden_size] #attention_states_reshaped: [batch_size,sentence_length,hidden_size] attention_logits=tf.nn.tanh(query+attention_states+U_aa)#[batch_size,sentence_length,hidden_size]. additive style # 2.get possibility of attention attention_logits=tf.reshape(attention_logits shape=(-1 hidden_size))#batch_size*sequence_length [batch_size*sentence_length,hidden_size] V_a=tf.get_variable("V_a" shape=[hidden_size 1] initializer=tf.random_normal_initializer(stddev=0.1))#[hidden_size,1] attention_logits=tf.matmul(attention_logits V_a)#最终需要的是[batch_size*sentence_length,1]<-----[batch_size*sentence_length,hidden_size],[hidden_size,1] attention_logits=tf.reshape(attention_logits shape=(-1 sequence_length))#attention_logits:[batch_size,sequence_length] ########################################################################################################################################################## #attention_logits=tf.reduce_sum(attention_logits,2) #[batch_size x attn_length] attention_logits_max=tf.reduce_max(attention_logits axis=1 keep_dims=<true>)#[batch_size x 1] # possibility distribution for each encoder input.it means how much attention or focus for each encoder input p_attention=tf.nn.softmax(attention_logits-attention_logits_max)#[batch_size x attn_length] # 3.get weighted sum of hidden state for each encoder input as attention state p_attention=tf.expand_dims(p_attention axis=2)#[batch_size x attn_length x 1] # attention_states:[batch_size x attn_length x attn_size]; p_attention:[batch_size x attn_length]; attention_final=tf.multiply(attention_states_original p_attention)#[batch_size x attn_length x attn_size] context_vector=tf.reduce_sum(attention_final axis=1)#[batch_size x attn_size] ############################################################################################################################################################ #inp:[batch_size x input_size].it is decoder input; attention_final:[batch_size x attn_size] output,state=cell(inp state context_vector)#attention_final TODO 使用RNN走一步 outputs.append(output)# 将输出添加到结果列表中 <if_stmt>loop_function<is><not><none><block_start>prev=output<block_end><block_end><block_end>print("rnn_decoder_with_attention ended...")<line_sep><return>outputs state<block_end>
# Generated by Django 2.2.14 on 2020-09-26 06:38 <import_from_stmt>django.db migrations<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('users' '0024_auto_20200914_0433') ]<line_sep>operations=[migrations.AlterModelOptions(name='profile' options={'permissions':(('view_karma_points' 'Can view karma points') ('deactivate_users' 'Can deactivate users'))} ) ]<block_end>
# -*- coding: utf-8 -*- """ """<import_from_stmt>pylab *<import_from_stmt>acoular *<line_sep># files datafile='example_data.h5'<line_sep>t1=MaskedTimeSamples(name=datafile)<line_sep>t1.start=0# first sample, default t1.stop=16000# last valid sample = 15999 invalid=[1 7]# list of invalid channels (unwanted microphones etc.) t1.invalid_channels=invalid<line_sep>t2=ChannelMixer(source=t1)<line_sep>sig=GenericSignalGenerator(source=t2)<line_sep>plot(sig.signal())<line_sep>show()<line_sep>
# MIT License # # Copyright (c) 2021 <NAME> and <NAME> and <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. <import_stmt>os<import_stmt>sentencepiece<as>spm<import_stmt>shutil<import_from_stmt>typing Tuple<import_from_stmt>openspeech.datasets.librispeech.preprocess.preprocess collect_transcripts<line_sep>SENTENCEPIECE_MODEL_NAME="sp"<def_stmt>_prepare_tokenizer train_transcripts vocab_size<block_start>""" Prepare sentencepice tokenizer """<line_sep>input_file='spm_input.txt'<line_sep>model_type='unigram'<with_stmt>open(input_file 'w')<as>f<block_start><for_stmt>transcript train_transcripts<block_start>f.write(f"{transcript.split('|')[-1]}\n")<block_end><block_end>spm.SentencePieceTrainer.Train(f"--input={input_file} "<concat>f"--model_prefix={SENTENCEPIECE_MODEL_NAME} "<concat>f"--vocab_size={vocab_size} "<concat>f"--model_type={model_type} "<concat>f"--pad_id=0 "<concat>f"--bos_id=1 "<concat>f"--eos_id=2 "<concat>f"--unk_id=3 "<concat>f"--user_defined_symbols=<blank>")<block_end><def_stmt>generate_manifest_files dataset_path:str manifest_file_path:str vocab_path:str vocab_size:int<arrow><none><block_start>""" Generate manifest files. Format: {audio_path}\t{transcript}\t{numerical_label} Args: vocab_size (int): size of subword vocab Returns: None """<line_sep>transcripts_collection=collect_transcripts(dataset_path)<line_sep>_prepare_tokenizer(transcripts_collection[0] vocab_size)<line_sep>shutil.copy(f"{SENTENCEPIECE_MODEL_NAME}.model" os.path.join(vocab_path f"{SENTENCEPIECE_MODEL_NAME}.model"))<line_sep>shutil.copy(f"{SENTENCEPIECE_MODEL_NAME}.vocab" os.path.join(vocab_path f"{SENTENCEPIECE_MODEL_NAME}.vocab"))<line_sep>sp=spm.SentencePieceProcessor()<line_sep>sp.Load(os.path.join(vocab_path f"{SENTENCEPIECE_MODEL_NAME}.model"))<with_stmt>open(manifest_file_path 'w')<as>f<block_start><for_stmt>idx,part enumerate(['train-960' 'dev-clean' 'dev-other' 'test-clean' 'test-other'])<block_start><for_stmt>transcript transcripts_collection[idx]<block_start>audio_path,transcript=transcript.split('|')<line_sep>text=" ".join(sp.EncodeAsPieces(transcript))<line_sep>label=" ".join([str(item)<for>item sp.EncodeAsIds(transcript)])<line_sep>f.write(f"{audio_path}\t{text}\t{label}\n")<block_end><block_end><block_end><block_end>
# Copyright 2017 Google Inc. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS-IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Generates a JSON manifest and a Maya camera rig for Seurat. Example usage: CreateRig(headbox_min=[-0.5, -0.5, -0.5], headbox_max=[0.5, 0.5, 0.5], num_view_groups=16, # Should be a power of two. image_size=1024, near_clip=0.1, far_clip=100.0, depth_type='EYE_Z', depth_channel_name='A', color_file_path_pattern='%s_color.%04d.exr', depth_file_path_pattern='%s_depth.%04d.exr', json_file_path='./manifest.json') """<import_stmt>json<import_stmt>math<import_stmt>operator<def_stmt>ProjectPoint matrix point<block_start>"""Projects a 3D point using a 4x4 matrix. Args: matrix: A 4x4 matrix represented as a list of 16 floats. point: A 3D point represented as a list of 3 floats. Returns: The projected point, represented as a list of 3 floats. """<line_sep>result_hom=[0.0 0.0 0.0 0.0]<for_stmt>row xrange(4)<block_start><for_stmt>col xrange(3)<block_start>result_hom[row]<augadd>matrix[4<times>row+col]<times>point[col]<block_end># point.w = 1.0 implicitly result_hom[row]<augadd>matrix[4<times>row+3]<block_end>w=result_hom[3]<line_sep><return>map(operator.div result_hom[0:3] [w w w])<block_end><def_stmt>WorldFromEyeMatrixFromFace face_name<block_start>"""Creates world-from-eye matrix for the given face of a cube map. Args: face_name: Name of the face. Must be one of 'front', 'back', 'left', 'right', 'bottom', 'top'. Returns: The world-from-eye matrix for the given face as a list in row-major order. Raises: ValueError: face_name is not the name of a cube map face. """<line_sep># pylint: disable=bad-whitespace # pylint: disable=bad-continuation <if_stmt>face_name<is>'front'<block_start><return>[1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 1.0]<line_sep># pyformat: disable <block_end><elif_stmt>face_name<is>'back'<block_start><return>[-1.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 1.0]<line_sep># pyformat: disable <block_end><elif_stmt>face_name<is>'left'<block_start><return>[0.0 0.0 1.0 0.0 0.0 1.0 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0]<line_sep># pyformat: disable <block_end><elif_stmt>face_name<is>'right'<block_start><return>[0.0 0.0 -1.0 0.0 0.0 1.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 0.0 1.0]<line_sep># pyformat: disable <block_end><elif_stmt>face_name<is>'bottom'<block_start><return>[1.0 0.0 0.0 0.0 0.0 0.0 1.0 0.0 0.0 -1.0 0.0 0.0 0.0 0.0 0.0 1.0]<line_sep># pyformat: disable <block_end><elif_stmt>face_name<is>'top'<block_start><return>[1.0 0.0 0.0 0.0 0.0 0.0 -1.0 0.0 0.0 1.0 0.0 0.0 0.0 0.0 0.0 1.0]<line_sep># pyformat: disable <block_end><else_stmt><block_start><raise>ValueError('Invalid face_name')<block_end><block_end><def_stmt>CubeFaceProjectionMatrix near far<block_start>"""Creates a cube-face 90 degree FOV projection matrix. The created matrix is an OpenGL-style projection matrix. Args: near: Eye-space Z position of the near clipping plane. far: Eye-space Z position of the far clipping plane. Returns: The clip-from-eye matrix as a list in row-major order. Raises: ValueError: Invalid clip planes. near <= 0.0 or far <= near. """<if_stmt>near<le>0.0<block_start><raise>ValueError('near must be positive.')<block_end><if_stmt>far<le>near<block_start><raise>ValueError('far must be greater than near.')<block_end>left=-near<line_sep>right=near<line_sep>bottom=-near<line_sep>top=near<line_sep>a=(2.0<times>near)/(right-left)<line_sep>b=(2.0<times>near)/(top-bottom)<line_sep>c=(right+left)/(right-left)<line_sep>d=(top+bottom)/(top-bottom)<line_sep>e=(near+far)/(near-far)<line_sep>f=(2.0<times>near<times>far)/(near-far)<line_sep># pylint: disable=bad-whitespace <return>[a 0.0 c 0.0 0.0 b d 0.0 0.0 0.0 e f 0.0 0.0 -1.0 0.0]<line_sep># pyformat: disable <block_end><def_stmt>RadicalInverse a base<block_start>"""Computes the radical inverse of |a| in base |base|. Args: a: The integer number for which the radical inverse is computed. base: The radical inverse is computed in this base (integer). Returns: The radical inverse as a float in the range [0.0, 1.0). """<line_sep>reversed_digits=0<line_sep>base_n=1<line_sep># Compute the reversed digits, base b. <while_stmt>a<g>0<block_start>next_a=a/base<line_sep>digit=a-next_a<times>base<line_sep>reversed_digits=reversed_digits<times>base+digit<line_sep>base_n<augmul>base<line_sep>a=next_a<block_end># Only when done are the reversed digits divided by b^n. <return>min(reversed_digits/float(base_n) 1.0)<block_end><def_stmt>PointInBox box_min box_max sample<block_start>"""Computes a sample point inside a box with arbitrary number of dimensions. Args: box_min: A list of floats representing the lower bounds of the box. box_max: A list of floats representing the upper bounds of the box. sample: A list of floats in the range [0.0, 1.0] representing the relative sample position in the box. Returns: A list of floats, representing the absolute position of the sample in the box. """<line_sep>delta=map(operator.sub box_max box_min)<line_sep>offset=map(operator.mul delta sample)<line_sep>position=map(operator.add box_min offset)<line_sep><return>position<block_end><def_stmt>Distance point_a point_b<block_start>"""Computes the euclidean distance between two points. The points can have an aribtrary number of dimensions. Args: point_a: A list of numbers representing the first point. point_b: A list of numbers representing the second point. Returns: The euclidean distance as a float. """<line_sep>delta=map(operator.sub point_a point_b)<line_sep>delta_sqr=map(operator.mul delta delta)<line_sep>distance_sqr=0.0<for_stmt>element delta_sqr<block_start>distance_sqr<augadd>element<block_end><return>math.sqrt(distance_sqr)<block_end><def_stmt>RotateCamera camera_name face_name<block_start>"""Rotates a Maya camera node to look at a given cube map face. Args: camera_name: Name of the Maya camera's transform node. face_name: Name of the cube map face. Raises: ValueError: face is not a valid cube map face name. """<line_sep># Disable the undefined-variable lint error, because the Maya package is not # defined in the environment where the linter runs. # # pylint: disable=undefined-variable <if_stmt>face_name<is>'front'<block_start><pass><block_end><elif_stmt>face_name<is>'back'<block_start>maya.cmds.setAttr(camera_name+'.rotateY' 180)<block_end><elif_stmt>face_name<is>'left'<block_start>maya.cmds.setAttr(camera_name+'.rotateY' 90)<block_end><elif_stmt>face_name<is>'right'<block_start>maya.cmds.setAttr(camera_name+'.rotateY' -90)<block_end><elif_stmt>face_name<is>'bottom'<block_start>maya.cmds.setAttr(camera_name+'.rotateX' -90)<block_end><elif_stmt>face_name<is>'top'<block_start>maya.cmds.setAttr(camera_name+'.rotateX' 90)<block_end><else_stmt><block_start><raise>ValueError('Invalid face_name')<block_end><block_end><def_stmt>GenerateCameraPositions headbox_min headbox_max num_cameras<block_start>"""Generates camera positions in a headbox. Camera posittions are computed as a 3D Hammersley point set. The points are transformed such that their bounding box is exactly equal to the headbox. The points are then sorted according to distance to the headbox center. Finally, the point that is closest to the headbox center is replaced by the headbox center itself to include a view from the reference camera. Args: headbox_min: The lower bounds of the headbox as a list of 3 floats. headbox_max: The upper bounds of the headbox as a list of 3 floats. num_cameras: The number of cameras to generate. Should be a power of two. Returns: A list of 3D points (each a list of 3 floats), representing the positions of the generated cameras. Raises: ValueError: num_cameras is not positive. """<if_stmt>num_cameras<le>0<block_start><raise>ValueError('num_cameras must be positive')<block_end><if_stmt>num_cameras<eq>1# Use the headbox center if a single camera position is requested. <block_start><return>[PointInBox(headbox_min headbox_max [0.5 0.5 0.5])]<block_end>samples=[]<line_sep>max_sample=[0.0 0.0 0.0]<for_stmt>i xrange(num_cameras)# Use a 3D Hammersley point set for the samples. <block_start>sample=[i/float(num_cameras) RadicalInverse(i 2) RadicalInverse(i 3)]<for_stmt>dim xrange(3)<block_start>max_sample[dim]=max(max_sample[dim] sample[dim])<block_end>samples.append(sample)<block_end>headbox_center=PointInBox(headbox_min headbox_max [0.5 0.5 0.5])<line_sep>camera_positions=[]<for_stmt>sample samples# Normalize the samples so that their bounding box is the unit cube. <block_start><for_stmt>dim xrange(3)<block_start>sample[dim]<augdiv>max_sample[dim]<block_end>position=PointInBox(headbox_min headbox_max sample)<line_sep>camera_positions.append(position)<block_end>sorted_positions=sorted(camera_positions key=<lambda>point:Distance(point headbox_center))<line_sep># Replace the point closest to the headbox center by the headbox center # itself. sorted_positions[0]=PointInBox(headbox_min headbox_max [0.5 0.5 0.5])<line_sep><return>sorted_positions<block_end><def_stmt>CreateCameras camera_positions near_clip far_clip<block_start>"""Creates and animates the Maya cameras for the rig. Six cameras, one for each cube face, are generated. Each camera is configured with a square viewport and the given near and far clipping planes. This method also adjusts the Maya timeline to exactly contain the frames for the rig animation. Each of the six cameras will get one keyframe per camera position. Args: camera_positions: A list of 3D points (each a list of 3 floats) representing the positions of the cameras. near_clip: Eye-space Z position of the near clipping planes. far_clip: Eye-space Z position of the far clipping planes. """<line_sep># Disable the undefined-variable lint error, because the Maya package is not # defined in the environment where the linter runs. # # pylint: disable=undefined-variable start_time=0<line_sep>end_time=len(camera_positions)-1<line_sep>maya.cmds.playbackOptions(animationStartTime=start_time animationEndTime=end_time minTime=start_time maxTime=end_time)<for_stmt>face ['front' 'back' 'left' 'right' 'bottom' 'top']# Create a cube face camera and rotate it. <block_start>camera_name=maya.cmds.camera(name='seurat_'+face focalLength=12.7 horizontalFilmAperture=1 verticalFilmAperture=1 nearClipPlane=near_clip farClipPlane=far_clip)[0]<line_sep>RotateCamera(camera_name face)<line_sep># Set translation keyframes for all positions on this camera. <for_stmt>view_group_index,position enumerate(camera_positions)<block_start>maya.cmds.setKeyframe(camera_name at='translateX' t=view_group_index v=position[0])<line_sep>maya.cmds.setKeyframe(camera_name at='translateY' t=view_group_index v=position[1])<line_sep>maya.cmds.setKeyframe(camera_name at='translateZ' t=view_group_index v=position[2])<block_end><block_end><block_end><def_stmt>CreateViewGroups headbox_center camera_positions image_size near_clip far_clip depth_type depth_channel_name color_file_path_pattern depth_file_path_pattern<block_start>"""Creates and returns the view groups for the JSON output. Args: headbox_center: Center of the headbox as a list of 3 floats. camera_positions: Positions of the cameras as a list of 3D points (each a list of 3 floats). image_size: Size of the output images in pixels. near_clip: Eye-space Z position of the near clipping planes. far_clip: Eye-space Z position of the far clipping planes. depth_type: A string representing the depth encoding. Valid values are: 'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]), 'EYE_Z' (negated eye-space Z coordinate in the range [0.0, inf); Arnold's encoding), 'RAY_DEPTH' (distance to eye). depth_channel_name: Name of the depth channel in the output file. Commonly used values are 'R' (VRay) and 'A' (Arnold). color_file_path_pattern: File name pattern for color images. Must contain a placeholder for a string (face name) and an integer (view group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern: File name pattern for depth images. Must contain a placeholder for a string (face name) and an integer (view group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. Returns: A dictionary representing the view groups. """<line_sep>view_groups=[]<for_stmt>view_group_index,absolute_position enumerate(camera_positions)<block_start>views=[]<for_stmt>face ['front' 'back' 'left' 'right' 'bottom' 'top']# Camera position relative to headbox center. <block_start>position=map(operator.sub absolute_position headbox_center)<line_sep>clip_from_eye_matrix=CubeFaceProjectionMatrix(near_clip far_clip)<line_sep>world_from_eye_matrix=WorldFromEyeMatrixFromFace(face)<line_sep># Set translation component of world-from-eye matrix. <for_stmt>i xrange(3)<block_start>world_from_eye_matrix[4<times>i+3]=position[i]<block_end># Create camera object camera={'image_width':image_size 'image_height':image_size 'clip_from_eye_matrix':clip_from_eye_matrix 'world_from_eye_matrix':world_from_eye_matrix 'depth_type':depth_type}<line_sep># Create view object and add it to the view groups color_image_path=(color_file_path_pattern%(face view_group_index))<line_sep>depth_image_path=(depth_file_path_pattern%(face view_group_index))<line_sep>view={'projective_camera':camera 'depth_image_file':{'color':{'path':color_image_path 'channel_0':'R' 'channel_1':'G' 'channel_2':'B' 'channel_alpha':'A'} 'depth':{'path':depth_image_path 'channel_0':depth_channel_name}}}<line_sep>views.append(view)<block_end>view_group={'views':views}<line_sep>view_groups.append(view_group)<block_end># Return the view_groups as a Python list. <return>view_groups<block_end><def_stmt>CreateRig headbox_min headbox_max num_view_groups image_size near_clip far_clip depth_type depth_channel_name color_file_path_pattern depth_file_path_pattern json_file_path json_only=<false><block_start>"""Creates a Maya camera rig and JSON manifest for Seurat. Args: headbox_min: List of three floats representing the lower bounds of the headbox in world-space. headbox_max: List of three floats representing the upper bounds of the headbox in world-space. num_view_groups: Number of view groups (camera positions) to generate. Must be a power of two. image_size: Resolution of the output images in pixels. near_clip: Eye-space Z position of the near clipping planes. far_clip: Eye-space Z position of the far clipping planes. depth_type: A string representing the depth encoding. Valid values are: 'WINDOW_Z' (window-space Z coordinate in the range [0.0, 1.0]), 'EYE_Z' (negated eye-space Z coordinate in the range [0.0, inf); Arnold's encoding), 'RAY_DEPTH' (distance to eye). depth_channel_name: Name of the depth channel in the output file. Commonly used values are 'R' (VRay) and 'A' (Arnold). color_file_path_pattern: File name pattern for color images. Must contain a placeholder for a string (face name) and an integer (view group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. depth_file_path_pattern: File name pattern for depth images. Must contain a placeholder for a string (face name) and an integer (view group number). Example: '%s.%04d.exr' for file names 'front.0000.exr', 'front.0001.exr', ... , 'top.9999.exr'. json_file_path: Path to the output JSON manifest file. json_only: A boolean value. If true, the Maya camera generation step is bypassed. """<line_sep># Compute the positions of the cameras. camera_positions=GenerateCameraPositions(headbox_min headbox_max num_view_groups)<line_sep># Generate the six Maya cameras and keyframe their positions. <if_stmt><not>json_only<block_start>CreateCameras(camera_positions near_clip far_clip)<block_end># Compute the headbox center. headbox_center=PointInBox(headbox_min headbox_max [0.5 0.5 0.5])<line_sep># Generate the JSON manifest and write it to the file. view_groups=CreateViewGroups(headbox_center camera_positions image_size near_clip far_clip depth_type depth_channel_name color_file_path_pattern depth_file_path_pattern)<line_sep>json_string=json.dumps({'view_groups':view_groups} indent=2)<with_stmt>open(json_file_path 'w')<as>json_file<block_start>json_file.write(json_string)<block_end><block_end>
# Copyright 2019 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Module for creating metrics in tensorflow for model training."""<import_stmt>tensorflow<as>tf<def_stmt>mean_acc labels predictions num_classes<block_start>"""Mean per class accuracy metrics Arguments: labels: tf.Tensor objects, True values of the dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: The mean per class accuracy """<line_sep><return>{'mean_class_acc':tf.metrics.mean_per_class_accuracy(labels predictions['class_ids'] num_classes)}<block_end><def_stmt>my_auc labels predictions<block_start>"""Custom AUC metric using interpolation. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: The AUC metric for the model """<line_sep><return>{'auc_ci':tf.metrics.auc(labels predictions['class_ids'] summation_method='careful_interpolation')}<block_end><def_stmt>rmse labels predictions<block_start>"""Root mean squared error metric for regression tasks. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: Root mean squared error for regression model """<line_sep><return>{'root_mean_square_error':tf.metrics.root_mean_squared_error(labels predictions['predictions'])}<block_end><def_stmt>mar labels predictions<block_start>"""Mean absolute error for regression model. Arguments: labels: tf.Tensor objects, True values of dependent variable predictions: tf.Tensor objects, Predictions from the model Returns: Mean absolute error for the regression model """<line_sep><return>{'mean_absolute_error':tf.metrics.mean_absolute_error(labels predictions['predictions'])}<block_end>
# Simple series generator with # multiple generators & decorators. # Author : <NAME> <def_stmt>myfunc **kwds<block_start><def_stmt>func f<block_start>cond=kwds['condition']<line_sep>proc=kwds['process']<line_sep>num=kwds['number']<line_sep>x=0<for_stmt>item f()<block_start><if_stmt>cond<and>cond(item)<block_start><if_stmt>proc<block_start>item=proc(item)<block_end><yield>item<line_sep>x<augadd>1<block_end><if_stmt>x<eq>num<block_start><break><block_end><block_end><block_end><return>func<block_end><def_stmt>series condition=<none> process=<none> number=10<block_start>@myfunc(condition=condition process=process number=number)<def_stmt>wrapper <block_start>x=1<while_stmt>1<block_start><yield>x<line_sep>x<augadd>1<block_end><block_end><return>wrapper<block_end>
<import_stmt>functools<import_stmt>inspect<import_stmt>typing<import_stmt>warnings<line_sep>__all__=["BaseModel" "create_model" "validate_arguments" "set_type_model" "is_typed_dict_type" "parse_typed_dict" "TEMPLATE" ]<line_sep>Callable=typing.TypeVar("Callable" bound=typing.Callable)<try_stmt><block_start><import_from_stmt>pydantic BaseModel ValidationError create_model<import_from_stmt>pydantic validate_arguments<as>pydantic_validate_arguments<line_sep># visit this issue # https://github.com/samuelcolvin/pydantic/issues/1205 <def_stmt>validate_arguments function:Callable<arrow>Callable<block_start>function=pydantic_validate_arguments(function)<line_sep>@functools.wraps(function)<def_stmt>change_exception *args **kwargs<block_start><try_stmt><block_start><return>function(*args **kwargs)<block_end><except_stmt>ValidationError<as>exception<block_start>type_error=TypeError("Failed to pass pydantic's type verification, please output"<concat>" `.more_info` of this exception to view detailed information.")<line_sep>type_error.more_info=exception<line_sep><raise>type_error<block_end><block_end><return>change_exception<block_end><block_end># type: ignore <except_stmt>ImportError<block_start><def_stmt>create_model *args **kwargs# type: ignore <block_start><raise>NotImplementedError("Need install `pydantic` from pypi.")<block_end><def_stmt>validate_arguments function:Callable<arrow>Callable<block_start><return>function<block_end>BaseModel=type("BaseModel" () {})<block_end># type: ignore <def_stmt>set_type_model func:Callable<arrow>Callable<block_start>""" try generate request body model from type hint and default value """<line_sep>sig=inspect.signature(func)<line_sep>field_definitions:typing.Dict[str typing.Any]={}<for_stmt>name,parameter sig.parameters.items()<block_start><if_stmt>parameter.annotation<eq>parameter.empty# raise ValueError( # f"You must specify the type for the parameter {func.__name__}:{name}." # ) <block_start><return>func# Maybe the type hint should be mandatory? I'm not sure. <block_end><if_stmt>parameter.default<eq>parameter.empty<block_start>field_definitions[name]=(parameter.annotation <ellipsis>)<block_end><else_stmt><block_start>field_definitions[name]=(parameter.annotation parameter.default)<block_end><block_end><if_stmt>field_definitions<block_start><try_stmt><block_start>body_model:typing.Type[BaseModel]=create_model(func.__name__ **field_definitions)<line_sep>setattr(func "__body_model__" body_model)<block_end><except_stmt>NotImplementedError<block_start>message=("If you wanna using type hint "<concat>"to create OpenAPI docs or convert type, "<concat>"please install `pydantic` from pypi.")<line_sep>warnings.warn(message ImportWarning)<block_end><block_end><return>func<block_end><def_stmt>is_typed_dict_type type_<arrow>bool<block_start><return>issubclass(type_ dict)<and>getattr(type_ "__annotations__" <false>)<block_end><def_stmt>parse_typed_dict typed_dict<arrow>typing.Type[BaseModel]<block_start>""" parse `TypedDict` to generate `pydantic.BaseModel` """<line_sep>annotations={}<for_stmt>name,field typed_dict.__annotations__.items()<block_start><if_stmt>is_typed_dict_type(field)<block_start>annotations[name]=(parse_typed_dict(field) <ellipsis>)<block_end><else_stmt><block_start>default_value=getattr(typed_dict name <ellipsis>)<line_sep>annotations[name]=(field default_value)<block_end><block_end><return>create_model(typed_dict.__name__ **annotations)<block_end># type: ignore TEMPLATE="""<!DOCTYPE html> <html> <head> <link type="text/css" rel="stylesheet" href="https://cdn.jsdelivr.net/npm/[email protected]/swagger-ui.css"> <title>OpenAPI Docs</title> </head> <body> <div id="swagger-ui"></div> <script src="https://cdn.jsdelivr.net/npm/[email protected]/swagger-ui-bundle.js"></script> <script> const ui = SwaggerUIBundle({ url: './get-openapi-docs', dom_id: '#swagger-ui', presets: [ SwaggerUIBundle.presets.apis, SwaggerUIBundle.SwaggerUIStandalonePreset ], layout: "BaseLayout", deepLinking: true, showExtensions: true, showCommonExtensions: true }) </script> </body> </html> """<line_sep>
<import_from_stmt>realtime_predictor *<line_sep>emoji={'Writing':'\U0001F4DD ' 'Scissors':'\u2701 ' 'Computer_keyboard':'\u2328 '}<def_stmt>on_predicted_deskwork ensembled_pred<block_start>result=np.argmax(ensembled_pred)<line_sep>label=conf.labels[result]<if_stmt>label<in>['Writing' 'Scissors' 'Computer_keyboard']<block_start>p=ensembled_pred[result]<line_sep>level=int(p<times>10)+1<line_sep>print(emoji[label]<times>level label p)<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>model=get_model(args.model_pb_graph)<line_sep># file mode <if_stmt>args.input_file<ne>''<block_start>process_file(model args.input_file on_predicted_deskwork)<line_sep>my_exit(model)<block_end># device list display mode <if_stmt>args.input<l>0<block_start>print_pyaudio_devices()<line_sep>my_exit(model)<block_end># normal: realtime mode FORMAT=pyaudio.paInt16<line_sep>CHANNELS=1<line_sep>audio=pyaudio.PyAudio()<line_sep>stream=audio.open(format=FORMAT channels=CHANNELS rate=conf.sampling_rate input=<true> input_device_index=args.input frames_per_buffer=conf.rt_chunk_samples start=<false> stream_callback=callback# uncomment for non_blocking )<line_sep># main loop stream.start_stream()<while_stmt>stream.is_active()<block_start>main_process(model on_predicted_deskwork)<line_sep>time.sleep(0.001)<block_end>stream.stop_stream()<line_sep>stream.close()<line_sep># finish audio.terminate()<line_sep>my_exit(model)<block_end>
<import_from_stmt>datetime datetime<as>dt<import_from_stmt>dash.dependencies Input<import_from_stmt>dash.dependencies Output<import_from_stmt>dash.dependencies State<import_from_stmt>flask_login current_user<import_stmt>pandas_datareader<as>pdr<def_stmt>register_callbacks dashapp<block_start>@dashapp.callback(Output('my-graph' 'figure') Input('my-dropdown' 'value') State('user-store' 'data'))<def_stmt>update_graph selected_dropdown_value data<block_start>df=pdr.get_data_yahoo(selected_dropdown_value start=dt(2017 1 1) end=dt.now())<line_sep><return>{'data':[{'x':df.index 'y':df.Close}] 'layout':{'margin':{'l':40 'r':0 't':20 'b':30}}}<block_end>@dashapp.callback(Output('user-store' 'data') Input('my-dropdown' 'value') State('user-store' 'data'))<def_stmt>cur_user args data<block_start><if_stmt>current_user.is_authenticated<block_start><return>current_user.username<block_end><block_end>@dashapp.callback(Output('username' 'children') Input('user-store' 'data'))<def_stmt>username data<block_start><if_stmt>data<is><none><block_start><return>''<block_end><else_stmt><block_start><return>f'Hello {data}'<block_end><block_end><block_end>
<import_stmt>torchvision<import_from_stmt>fastai.vision ImageDataBunch cnn_learner unet_learner SegmentationItemList imagenet_stats<line_sep>data=ImageDataBunch.from_csv('fixtures/classification').normalize(imagenet_stats)<line_sep>learner=cnn_learner(data torchvision.models.resnet34)<line_sep>learner.export()<line_sep>data=(SegmentationItemList.from_folder('fixtures/segmentation/images').split_none().label_from_func(<lambda>x:f'fixtures/segmentation/masks/{x.stem}.jpg' classes=[0 1 2]).databunch().normalize(imagenet_stats))<line_sep>learner=unet_learner(data torchvision.models.resnet50)<line_sep>learner.export('../export.pkl')<line_sep>
<import_from_stmt>sympy *<import_from_stmt>sympy.matrices *<import_stmt>os<import_stmt>re<import_stmt>argparse<line_sep># local <import_stmt>pretty_print<def_stmt>sqr a<block_start><return>a<times>a<block_end><def_stmt>trunc_acos x<block_start>tmp=Piecewise((0.0 x<ge>1.0) (pi x<le>-1.0) (acos(x) <true>))<line_sep><return>tmp.subs(x x)<block_end><def_stmt>eigs_2d mat<block_start>a=mat[0 0]+mat[1 1]<line_sep>delta=(mat[0 0]-mat[1 1])<power>2+4<times>mat[0 1]<power>2<line_sep>tmp1=Piecewise((a/2 delta<l>1e-10) ((a-sqrt(delta))/2.0 <true>))<line_sep>tmp2=Piecewise((a/2 delta<l>1e-10) ((a+sqrt(delta))/2.0 <true>))<line_sep><return>tmp1.subs(delta delta) tmp2.subs(delta delta)<block_end><def_stmt>eigs_3d mat<block_start>b=mat[0]+mat[4]+mat[8]<line_sep>t=sqr(mat[1])+sqr(mat[2])+sqr(mat[5])<line_sep>p=0.5<times>(sqr(mat[0]-mat[4])+sqr(mat[0]-mat[8])+sqr(mat[4]-mat[8]))<line_sep>p<augadd>3.0<times>t<line_sep>q=18.0<times>(mat[0]<times>mat[4]<times>mat[8]+3.0<times>mat[1]<times>mat[2]<times>mat[5])<line_sep>q<augadd>2.0<times>(mat[0]<times>sqr(mat[0])+mat[4]<times>sqr(mat[4])+mat[8]<times>sqr(mat[8]))<line_sep>q<augadd>9.0<times>b<times>t<line_sep>q<augsub>3.0<times>(mat[0]+mat[4])<times>(mat[0]+mat[8])<times>(mat[4]+mat[8])<line_sep>q<augsub>27.0<times>(mat[0]<times>sqr(mat[5])+mat[4]<times>sqr(mat[2])+mat[8]<times>sqr(mat[1]))<line_sep>delta=trunc_acos(0.5<times>q/sqrt(p<times>sqr(p)))<line_sep>p=2.0<times>sqrt(p)<line_sep>tmp1=Piecewise((b/3.0 p<l>1e-10) ((b+p<times>cos(delta/3.0))/3.0 <true>))<line_sep>tmp2=Piecewise((b/3.0 p<l>1e-10) ((b+p<times>cos((delta+2.0<times>pi)/3.0))/3.0 <true>))<line_sep>tmp3=Piecewise((b/3.0 p<l>1e-10) ((b+p<times>cos((delta-2.0<times>pi)/3.0))/3.0 <true>))<line_sep><return>tmp1.subs(p p) tmp2.subs(p p) tmp3.subs(p p)<block_end><def_stmt>parse_args <block_start>parser=argparse.ArgumentParser(description=__doc__ formatter_class=argparse.RawDescriptionHelpFormatter)<line_sep>parser.add_argument("output" type=str help="path to the output folder")<line_sep><return>parser.parse_args()<block_end><if_stmt>__name__<eq>"__main__"<block_start>args=parse_args()<line_sep>dims=[2 3]<line_sep>cpp="#include <polyfem/auto_eigs.hpp>\n\n\n"<line_sep>hpp="#pragma once\n\n#include <Eigen/Dense>\n\n"<line_sep>cpp=cpp+"namespace polyfem {\nnamespace autogen "+"{\n"<line_sep>hpp=hpp+"namespace polyfem {\nnamespace autogen "+"{\n"<line_sep>hpp=hpp+"template<typename T>\nT int_pow(T val, int exp) { T res = exp <=0 ? T(0.): val; for(int i = 1; i < exp; ++i) res = res*val; return res; }\n\n"<line_sep>lambdaa=Symbol('lambda' real=<true>)<for_stmt>dim dims<block_start>print("processing "+str(dim))<line_sep>M=zeros(dim dim)<for_stmt>i range(0 dim)<block_start><for_stmt>j range(0 dim)<block_start><if_stmt>i<le>j<block_start>M[i j]=Symbol('m['+str(i)+','+str(j)+']' real=<true>)<block_end><else_stmt><block_start>M[i j]=Symbol('m['+str(j)+','+str(i)+']' real=<true>)<block_end><block_end><block_end><if_stmt>dim<eq>2<block_start>lambdas=eigs_2d(M)<block_end><else_stmt><block_start>lambdas=eigs_3d(M)<block_end># lambdas = simplify(lambdas) c99=pretty_print.C99_print(lambdas)<line_sep>c99=re.sub(r"m\[(\d{1}),(\d{1})\]" r'm(\1,\2)' c99)<line_sep>c99=re.sub(r"result_(\d{1})" r'res(\1)' c99)<line_sep>c99=c99.replace("0.0" "T(0)")<line_sep>c99=c99.replace(" M_PI" " T(M_PI)")<line_sep>signature="template<typename T>\nvoid eigs_"+str(dim)+"d(const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic, 0, 3, 3> &m, "<line_sep>signature<augadd>"Eigen::Matrix<T, Eigen::Dynamic, 1, 0, 3, 1> &res)"<line_sep>hpp=hpp+signature+" {\nres.resize("+str(dim)+");\n"+c99+"\n}\n\n"<block_end>cpp=cpp+"\n"<line_sep>hpp=hpp+"\n"<line_sep>cpp=cpp+"\n}}\n"<line_sep>hpp=hpp+"\n}}\n"<line_sep>path=os.path.abspath(args.output)<line_sep>print("saving...")<with_stmt>open(os.path.join(path "auto_eigs.cpp") "w")<as>file<block_start>file.write(cpp)<block_end><with_stmt>open(os.path.join(path "auto_eigs.hpp") "w")<as>file<block_start>file.write(hpp)<block_end>print("done!")<block_end>
<import_stmt>torch<import_stmt>torch.nn<as>nn<import_stmt>torch.nn.functional<as>F<line_sep># from loglinear import LogLinear <class_stmt>DeepSet(nn.Module)<block_start><def_stmt>__init__ self in_features set_features=50<block_start>super(DeepSet self).__init__()<line_sep>self.in_features=in_features<line_sep>self.out_features=set_features<line_sep>self.feature_extractor=nn.Sequential(nn.Linear(in_features 50) nn.ELU(inplace=<true>) nn.Linear(50 100) nn.ELU(inplace=<true>) nn.Linear(100 set_features))<line_sep>self.regressor=nn.Sequential(nn.Linear(set_features 30) nn.ELU(inplace=<true>) nn.Linear(30 30) nn.ELU(inplace=<true>) nn.Linear(30 10) nn.ELU(inplace=<true>) nn.Linear(10 1) )<line_sep>self.add_module('0' self.feature_extractor)<line_sep>self.add_module('1' self.regressor)<block_end><def_stmt>reset_parameters self<block_start><for_stmt>module self.children()<block_start>reset_op=getattr(module "reset_parameters" <none>)<if_stmt>callable(reset_op)<block_start>reset_op()<block_end><block_end><block_end><def_stmt>forward self input<block_start>x=input<line_sep>x=self.feature_extractor(x)<line_sep>x=x.sum(dim=1)<line_sep>x=self.regressor(x)<line_sep><return>x<block_end><def_stmt>__repr__ self<block_start><return>self.__class__.__name__+'('+'Feature Exctractor='+str(self.feature_extractor)+'\n Set Feature'+str(self.regressor)+')'<block_end><block_end><class_stmt>DeepSet1(nn.Module)<block_start><def_stmt>__init__ self in_features set_features=512<block_start>super(DeepSet1 self).__init__()<line_sep>self.in_features=in_features<line_sep>self.out_features=set_features<line_sep>self.feature_extractor=nn.Sequential(nn.Linear(in_features 512) nn.ELU(inplace=<true>) nn.Linear(512 512) nn.ELU(inplace=<true>) nn.Linear(512 set_features))<line_sep>self.regressor=nn.Sequential(nn.Linear(set_features 512) nn.ELU(inplace=<true>) nn.Linear(512 512) nn.ELU(inplace=<true>) nn.Linear(512 512) nn.ELU(inplace=<true>) nn.Linear(512 1) )<line_sep>self.add_module('0' self.feature_extractor)<line_sep>self.add_module('1' self.regressor)<block_end><def_stmt>reset_parameters self<block_start><for_stmt>module self.children()<block_start>reset_op=getattr(module "reset_parameters" <none>)<if_stmt>callable(reset_op)<block_start>reset_op()<block_end><block_end><block_end><def_stmt>forward self input<block_start>x=input<line_sep>x=self.feature_extractor(x)<line_sep>x=x.sum(dim=1)<line_sep>x=self.regressor(x)<line_sep><return>x<block_end><def_stmt>__repr__ self<block_start><return>self.__class__.__name__+'('+'Feature Exctractor='+str(self.feature_extractor)+'\n Set Feature'+str(self.regressor)+')'<block_end><block_end><class_stmt>DeepSet2(nn.Module)<block_start><def_stmt>__init__ self in_features set_features=256<block_start>super(DeepSet2 self).__init__()<line_sep>self.in_features=in_features<line_sep>self.out_features=set_features<line_sep>self.feature_extractor=nn.Sequential(nn.Linear(in_features 256) nn.ELU(inplace=<true>) nn.Linear(256 256) nn.ELU(inplace=<true>) nn.Linear(256 set_features))<line_sep>self.log_feature_extractor=nn.Sequential(nn.Linear(in_features 256) nn.ReLU(inplace=<true>) nn.Linear(256 256) nn.ReLU(inplace=<true>) nn.Linear(256 set_features) nn.ReLU(inplace=<true>))<line_sep>self.regressor=nn.Sequential(nn.Linear(set_features<times>2 512) nn.ELU(inplace=<true>) nn.Linear(512 512) nn.ELU(inplace=<true>) nn.Linear(512 512) nn.ELU(inplace=<true>) nn.Linear(512 1) )<line_sep>self.add_module('0' self.feature_extractor)<line_sep>self.add_module('1' self.regressor)<block_end><def_stmt>reset_parameters self<block_start><for_stmt>module self.children()<block_start>reset_op=getattr(module "reset_parameters" <none>)<if_stmt>callable(reset_op)<block_start>reset_op()<block_end><block_end><block_end><def_stmt>forward self input<block_start>x=input<line_sep>x1=self.feature_extractor(x)<line_sep>x2=self.log_feature_extractor(x)+0.001<line_sep>x2=x2.log()<line_sep>x=torch.cat((x1 x2) 2)<line_sep>x=x.sum(dim=1)<line_sep>x=self.regressor(x)<line_sep><return>x<block_end><def_stmt>__repr__ self<block_start><return>self.__class__.__name__+'('+'Feature Exctractor='+str(self.feature_extractor)+'\n Set Feature'+str(self.regressor)+')'<block_end><block_end><class_stmt>DeepSet3(nn.Module)<block_start><def_stmt>__init__ self in_features set_features=50<block_start>super(DeepSet3 self).__init__()<line_sep>self.in_features=in_features<line_sep>self.out_features=set_features<line_sep>self.feature_extractor=nn.Sequential(nn.Linear(in_features 50) nn.ELU(inplace=<true>) nn.Linear(50 50) nn.ELU(inplace=<true>) nn.Linear(50 set_features))<line_sep>self.log_feature_extractor=nn.Sequential(nn.Linear(in_features 50) nn.ReLU(inplace=<true>) nn.Linear(50 50) nn.ReLU(inplace=<true>) nn.Linear(50 set_features) nn.ReLU(inplace=<true>))<line_sep>self.l1=nn.Linear(set_features<times>2 30)<line_sep>self.l2=LogLinear(set_features<times>2 30)<line_sep>self.lp=nn.ReLU()<line_sep>self.regressor=nn.Sequential(#nn.Linear(set_features*2, 512), nn.ELU(inplace=<true>) nn.Linear(60 30) nn.ELU(inplace=<true>) nn.Linear(30 10) nn.ELU(inplace=<true>) nn.Linear(10 1) )<line_sep>self.add_module('0' self.feature_extractor)<line_sep>self.add_module('1' self.regressor)<block_end><def_stmt>reset_parameters self<block_start><for_stmt>module self.children()<block_start>reset_op=getattr(module "reset_parameters" <none>)<if_stmt>callable(reset_op)<block_start>reset_op()<block_end><block_end><block_end><def_stmt>forward self input<block_start>x=input<line_sep>x1=self.feature_extractor(x)<line_sep>x2=self.log_feature_extractor(x)+0.001<line_sep>x2=x2.log()<line_sep>x=torch.cat((x1 x2) 2)<line_sep>x=x.sum(dim=1)<line_sep>x1=self.l1(x)<line_sep>x2=self.lp(x)+0.001<line_sep>x2=self.l2(x2)<line_sep>x=torch.cat((x1 x2) 1)<line_sep>x=self.regressor(x)<line_sep><return>x<block_end><def_stmt>__repr__ self<block_start><return>self.__class__.__name__+'('+'Feature Exctractor='+str(self.feature_extractor)+'\n Set Feature'+str(self.regressor)+')'<block_end><block_end>
<import_from_stmt>decimal Decimal<import_from_stmt>unittest TestCase<import_from_stmt>hummingbot.core.data_type.common TradeType PositionAction<import_from_stmt>hummingbot.core.data_type.in_flight_order TradeUpdate<import_from_stmt>hummingbot.core.data_type.trade_fee AddedToCostTradeFee DeductedFromReturnsTradeFee TokenAmount TradeFeeBase TradeFeeSchema <class_stmt>TradeFeeTests(TestCase)<block_start><def_stmt>test_added_to_cost_spot_fee_created_for_buy_and_fee_not_deducted_from_return self<block_start>schema=TradeFeeSchema(percent_fee_token="HBOT" maker_percent_fee_decimal=Decimal("1") taker_percent_fee_decimal=Decimal("1") buy_percent_fee_deducted_from_returns=<false> )<line_sep>fee=TradeFeeBase.new_spot_fee(fee_schema=schema trade_type=TradeType.BUY percent=Decimal("1.1") percent_token="HBOT" flat_fees=[TokenAmount(token="COINALPHA" amount=Decimal("20"))])<line_sep>self.assertEqual(AddedToCostTradeFee type(fee))<line_sep>self.assertEqual(Decimal("1.1") fee.percent)<line_sep>self.assertEqual("HBOT" fee.percent_token)<line_sep>self.assertEqual([TokenAmount(token="COINALPHA" amount=Decimal("20"))] fee.flat_fees)<block_end><def_stmt>test_deducted_from_return_spot_fee_created_for_buy_and_fee_deducted_from_return self<block_start>schema=TradeFeeSchema(maker_percent_fee_decimal=Decimal("1") taker_percent_fee_decimal=Decimal("1") buy_percent_fee_deducted_from_returns=<true> )<line_sep>fee=TradeFeeBase.new_spot_fee(fee_schema=schema trade_type=TradeType.BUY percent=Decimal("1.1") percent_token="HBOT" flat_fees=[TokenAmount(token="COINALPHA" amount=Decimal("20"))])<line_sep>self.assertEqual(DeductedFromReturnsTradeFee type(fee))<line_sep>self.assertEqual(Decimal("1.1") fee.percent)<line_sep>self.assertEqual("HBOT" fee.percent_token)<line_sep>self.assertEqual([TokenAmount(token="COINALPHA" amount=Decimal("20"))] fee.flat_fees)<block_end><def_stmt>test_deducted_from_return_spot_fee_created_for_sell self<block_start>schema=TradeFeeSchema(percent_fee_token="HBOT" maker_percent_fee_decimal=Decimal("1") taker_percent_fee_decimal=Decimal("1") buy_percent_fee_deducted_from_returns=<false> )<line_sep>fee=TradeFeeBase.new_spot_fee(fee_schema=schema trade_type=TradeType.SELL percent=Decimal("1.1") percent_token="HBOT" flat_fees=[TokenAmount(token="COINALPHA" amount=Decimal("20"))])<line_sep>self.assertEqual(DeductedFromReturnsTradeFee type(fee))<line_sep>self.assertEqual(Decimal("1.1") fee.percent)<line_sep>self.assertEqual("HBOT" fee.percent_token)<line_sep>self.assertEqual([TokenAmount(token="COINALPHA" amount=Decimal("20"))] fee.flat_fees)<line_sep>schema.percent_fee_token=<none><line_sep>schema.buy_percent_fee_deducted_from_returns=<true><line_sep>fee=TradeFeeBase.new_spot_fee(fee_schema=schema trade_type=TradeType.SELL percent=Decimal("1.1") percent_token="HBOT" flat_fees=[TokenAmount(token="COINALPHA" amount=Decimal("20"))])<line_sep>self.assertEqual(DeductedFromReturnsTradeFee type(fee))<block_end><def_stmt>test_added_to_cost_perpetual_fee_created_when_opening_positions self<block_start>schema=TradeFeeSchema(maker_percent_fee_decimal=Decimal("1") taker_percent_fee_decimal=Decimal("1") buy_percent_fee_deducted_from_returns=<false> )<line_sep>fee=TradeFeeBase.new_perpetual_fee(fee_schema=schema position_action=PositionAction.OPEN percent=Decimal("1.1") percent_token="HBOT" flat_fees=[TokenAmount(token="COINALPHA" amount=Decimal("20"))])<line_sep>self.assertEqual(AddedToCostTradeFee type(fee))<line_sep>self.assertEqual(Decimal("1.1") fee.percent)<line_sep>self.assertEqual("HBOT" fee.percent_token)<line_sep>self.assertEqual([TokenAmount(token="COINALPHA" amount=Decimal("20"))] fee.flat_fees)<line_sep>schema.percent_fee_token="HBOT"<line_sep>fee=TradeFeeBase.new_perpetual_fee(fee_schema=schema position_action=PositionAction.OPEN percent=Decimal("1.1") percent_token="HBOT" flat_fees=[TokenAmount(token="COINALPHA" amount=Decimal("20"))])<line_sep>self.assertEqual(AddedToCostTradeFee type(fee))<block_end><def_stmt>test_added_to_cost_perpetual_fee_created_when_closing_position_but_schema_has_percent_fee_token self<block_start>schema=TradeFeeSchema(percent_fee_token="HBOT" maker_percent_fee_decimal=Decimal("1") taker_percent_fee_decimal=Decimal("1") buy_percent_fee_deducted_from_returns=<false> )<line_sep>fee=TradeFeeBase.new_perpetual_fee(fee_schema=schema position_action=PositionAction.CLOSE percent=Decimal("1.1") percent_token="HBOT" flat_fees=[TokenAmount(token="COINALPHA" amount=Decimal("20"))])<line_sep>self.assertEqual(AddedToCostTradeFee type(fee))<line_sep>self.assertEqual(Decimal("1.1") fee.percent)<line_sep>self.assertEqual("HBOT" fee.percent_token)<line_sep>self.assertEqual([TokenAmount(token="COINALPHA" amount=Decimal("20"))] fee.flat_fees)<block_end><def_stmt>test_deducted_from_returns_perpetual_fee_created_when_closing_position_and_no_percent_fee_token self<block_start>schema=TradeFeeSchema(maker_percent_fee_decimal=Decimal("1") taker_percent_fee_decimal=Decimal("1") buy_percent_fee_deducted_from_returns=<false> )<line_sep>fee=TradeFeeBase.new_perpetual_fee(fee_schema=schema position_action=PositionAction.CLOSE percent=Decimal("1.1") percent_token="HBOT" flat_fees=[TokenAmount(token="COINALPHA" amount=Decimal("20"))])<line_sep>self.assertEqual(DeductedFromReturnsTradeFee type(fee))<line_sep>self.assertEqual(Decimal("1.1") fee.percent)<line_sep>self.assertEqual("HBOT" fee.percent_token)<line_sep>self.assertEqual([TokenAmount(token="COINALPHA" amount=Decimal("20"))] fee.flat_fees)<block_end><def_stmt>test_added_to_cost_json_serialization self<block_start>token_amount=TokenAmount(token="COINALPHA" amount=Decimal("20.6"))<line_sep>fee=AddedToCostTradeFee(percent=Decimal("0.5") percent_token="COINALPHA" flat_fees=[token_amount])<line_sep>expected_json={"fee_type":AddedToCostTradeFee.type_descriptor_for_json() "percent":"0.5" "percent_token":"COINALPHA" "flat_fees":[token_amount.to_json()]}<line_sep>self.assertEqual(expected_json fee.to_json())<block_end><def_stmt>test_added_to_cost_json_deserialization self<block_start>token_amount=TokenAmount(token="COINALPHA" amount=Decimal("20.6"))<line_sep>fee=AddedToCostTradeFee(percent=Decimal("0.5") percent_token="COINALPHA" flat_fees=[token_amount])<line_sep>self.assertEqual(fee TradeFeeBase.from_json(fee.to_json()))<block_end><def_stmt>test_deducted_from_returns_json_serialization self<block_start>token_amount=TokenAmount(token="COINALPHA" amount=Decimal("20.6"))<line_sep>fee=DeductedFromReturnsTradeFee(percent=Decimal("0.5") percent_token="COINALPHA" flat_fees=[token_amount])<line_sep>expected_json={"fee_type":DeductedFromReturnsTradeFee.type_descriptor_for_json() "percent":"0.5" "percent_token":"COINALPHA" "flat_fees":[token_amount.to_json()]}<line_sep>self.assertEqual(expected_json fee.to_json())<block_end><def_stmt>test_deducted_from_returns_json_deserialization self<block_start>token_amount=TokenAmount(token="CO<PASSWORD>" amount=Decimal("20.6"))<line_sep>fee=DeductedFromReturnsTradeFee(percent=Decimal("0.5") percent_token="COINALPHA" flat_fees=[token_amount])<line_sep>self.assertEqual(fee TradeFeeBase.from_json(fee.to_json()))<block_end><def_stmt>test_added_to_cost_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero self# Configure fee to use a percent token different from the token used to request the fee value # That forces the logic to need the convertion rate if the fee amount is calculated <block_start>fee=AddedToCostTradeFee(percent=Decimal("0") percent_token="CO<PASSWORD>")<line_sep>fee_amount=fee.fee_amount_in_token(trading_pair="HBOT-COINALPHA" price=Decimal("1000") order_amount=Decimal("1") token="BNB")<line_sep>self.assertEqual(Decimal("0") fee_amount)<block_end><def_stmt>test_deducted_from_returns_fee_amount_in_token_does_not_look_for_convertion_rate_when_percentage_zero self# Configure fee to use a percent token different from the token used to request the fee value # That forces the logic to need the convertion rate if the fee amount is calculated <block_start>fee=DeductedFromReturnsTradeFee(percent=Decimal("0") percent_token="CO<PASSWORD>")<line_sep>fee_amount=fee.fee_amount_in_token(trading_pair="HBOT-COINALPHA" price=Decimal("1000") order_amount=Decimal("1") token="BNB")<line_sep>self.assertEqual(Decimal("0") fee_amount)<block_end><block_end><class_stmt>TokenAmountTests(TestCase)<block_start><def_stmt>test_json_serialization self<block_start>amount=TokenAmount(token="HBOT-COINALPHA" amount=Decimal("1000.50"))<line_sep>expected_json={"token":"HBOT-COINALPHA" "amount":"1000.50" }<line_sep>self.assertEqual(expected_json amount.to_json())<block_end><def_stmt>test_json_deserialization self<block_start>amount=TokenAmount(token="HBOT-COINALPHA" amount=Decimal("1000.50"))<line_sep>self.assertEqual(amount TokenAmount.from_json(amount.to_json()))<block_end><block_end><class_stmt>TradeUpdateTests(TestCase)<block_start><def_stmt>test_json_serialization self<block_start>token_amount=TokenAmount(token="COINALPHA" amount=Decimal("20.6"))<line_sep>fee=DeductedFromReturnsTradeFee(percent=Decimal("0.5") percent_token="COINALPHA" flat_fees=[token_amount])<line_sep>trade_update=TradeUpdate(trade_id="12345" client_order_id="OID1" exchange_order_id="EOID1" trading_pair="HBOT-COINALPHA" fill_timestamp=1640001112 fill_price=Decimal("1000.11") fill_base_amount=Decimal("2") fill_quote_amount=Decimal("2000.22") fee=fee )<line_sep>expected_json=trade_update._asdict()<line_sep>expected_json.update({"fill_price":"1000.11" "fill_base_amount":"2" "fill_quote_amount":"2000.22" "fee":fee.to_json() })<line_sep>self.assertEqual(expected_json trade_update.to_json())<block_end><def_stmt>test_json_deserialization self<block_start>token_amount=TokenAmount(token="COINALPHA" amount=Decimal("20.6"))<line_sep>fee=DeductedFromReturnsTradeFee(percent=Decimal("0.5") percent_token="CO<PASSWORD>" flat_fees=[token_amount])<line_sep>trade_update=TradeUpdate(trade_id="12345" client_order_id="OID1" exchange_order_id="EOID1" trading_pair="HBOT-COINALPHA" fill_timestamp=1640001112 fill_price=Decimal("1000.11") fill_base_amount=Decimal("2") fill_quote_amount=Decimal("2000.22") fee=fee )<line_sep>self.assertEqual(trade_update TradeUpdate.from_json(trade_update.to_json()))<block_end><block_end>
#---------------------------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. #---------------------------------------------------------------------------------------------- <import_from_future_stmt> absolute_import<import_from_future_stmt> print_function<import_stmt>os<import_from_stmt>mmdnn.conversion.examples.darknet darknet<as>cdarknet<import_from_stmt>mmdnn.conversion.examples.imagenet_test TestKit<import_from_stmt>mmdnn.conversion.examples.extractor base_extractor<import_from_stmt>mmdnn.conversion.common.utils download_file<class_stmt>darknet_extractor(base_extractor)<block_start>_base_model_url="https://raw.githubusercontent.com/pjreddie/darknet/master/"<line_sep>architecture_map={'yolov3':{'config':_base_model_url+"cfg/yolov3.cfg" 'weights':"https://pjreddie.com/media/files/yolov3.weights"} 'yolov2':{'config':_base_model_url+"cfg/yolov2.cfg" 'weights':"https://pjreddie.com/media/files/yolov2.weights"}}<line_sep>@classmethod<def_stmt>download cls architecture path='./'<block_start><if_stmt>cls.sanity_check(architecture)<block_start>cfg_name=architecture+".cfg"<line_sep>architecture_file=download_file(cls.architecture_map[architecture]['config'] directory=path local_fname=cfg_name)<if_stmt><not>architecture_file<block_start><return><none><block_end>weight_name=architecture+".weights"<line_sep>weight_file=download_file(cls.architecture_map[architecture]['weights'] directory=path local_fname=weight_name)<if_stmt><not>weight_file<block_start><return><none><block_end>print("Darknet Model {} saved as [{}] and [{}].".format(architecture architecture_file weight_file))<line_sep><return>(architecture_file weight_file)<block_end><else_stmt><block_start><return><none><block_end><block_end>@classmethod<def_stmt>inference cls architecture files model_path image_path<block_start><import_stmt>numpy<as>np<if_stmt>cls.sanity_check(architecture)<block_start>download_file(cls._base_model_url+"cfg/coco.data" directory='./')<line_sep>download_file(cls._base_model_url+"data/coco.names" directory='./data/')<line_sep>print(files)<line_sep>net=cdarknet.load_net(files[0].encode() files[1].encode() 0)<line_sep>meta=cdarknet.load_meta("coco.data".encode())<line_sep>r=cdarknet.detect(net meta image_path.encode())<line_sep># print(r) <return>r<block_end><else_stmt><block_start><return><none><block_end><block_end><block_end># d = darknet_extractor() # model_filename = d.download('yolov3') # print(model_filename) # image_path = "./mmdnn/conversion/examples/data/dog.jpg" # model_path = "./" # d = darknet_extractor() # result = d.inference('yolov3', model_filename, model_path, image_path = image_path) # print(result)
# Copyright (c) 2019-present, Facebook, Inc. # All rights reserved. # # This source code is licensed under the license found in the # LICENSE file in the root directory of this source tree. # <def_stmt>f_gold a b n<block_start>s=0<for_stmt>i range(0 n)<block_start>s<augadd>a[i]+b[i]<block_end><if_stmt>n<eq>1<block_start><return>a[0]+b[0]<block_end><if_stmt>s%n<ne>0<block_start><return>-1<block_end>x=s<floordiv>n<for_stmt>i range(0 n)<block_start><if_stmt>a[i]<g>x<block_start><return>-1<block_end><if_stmt>i<g>0<block_start>a[i]<augadd>b[i-1]<line_sep>b[i-1]=0<block_end><if_stmt>a[i]<eq>x<block_start><continue><block_end>y=a[i]+b[i]<if_stmt>i+1<l>n<block_start>y<augadd>b[i+1]<block_end><if_stmt>y<eq>x<block_start>a[i]=y<line_sep>b[i]=0<if_stmt>i+1<l>n<block_start>b[i+1]=0<block_end><continue><block_end><if_stmt>a[i]+b[i]<eq>x<block_start>a[i]<augadd>b[i]<line_sep>b[i]=0<line_sep><continue><block_end><if_stmt>i+1<l>n<and>a[i]+b[i+1]<eq>x<block_start>a[i]<augadd>b[i+1]<line_sep>b[i+1]=0<line_sep><continue><block_end><return>-1<block_end><for_stmt>i range(0 n)<block_start><if_stmt>b[i]<ne>0<block_start><return>-1<block_end><block_end><return>x<block_end>#TOFILL <if_stmt>__name__<eq>'__main__'<block_start>param=[([4 9 16 18 20 23 24 25 25 26 29 30 35 40 41 43 44 46 53 53 56 56 58 60 62 70 80 80 80 82 86 90 92 92 95] [3 15 16 16 18 26 30 32 32 35 37 41 42 43 48 49 49 54 55 57 65 66 67 67 68 83 85 89 89 90 91 93 96 97 99] 29 ) ([-24 70 -74 -90 72 50 -94 86 -58 -68 42 0 98 -70 -14 -32 6 74 64 -78 86 -42 -56 2 -34 -46 70 -62 50 -58 -58 42 86 96 -8 8 -22 -14 -14 98 2 98 -28] [-26 36 48 48 -38 -86 90 -62 30 -4 82 16 32 -6 58 82 -66 -40 52 -78 94 -70 -80 -68 -58 -26 50 -78 -90 -48 -28 48 56 50 72 -22 -2 8 -94 92 -44 -66 -30] 34 ) ([0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1] [0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1] 13 ) ([98 18 50 36 88 75 2 40 74 19 63 82 77 5 59 97 70 50 71 90 90 61 63 99] [93 25 16 42 55 61 69 68 95 28 40 90 1 86 76 40 13 47 71 4 64 54 84 45] 16 ) ([-80 -64 -64 -64 -64 -62 -54 -48 -44 -44 -38 -30 -30 -26 -14 -12 -10 -6 -6 6 22 22 22 26 28 50 52 70 86 86 88 90] [-96 -94 -80 -74 -64 -56 -52 -32 -30 -24 -12 -12 -8 -2 4 8 16 20 24 24 24 48 50 54 60 64 74 80 88 90 92 92] 22 ) ([0 1 1 0 0 0 0 1 1 1 1 1 0 0 1 1 0 0 1 0 0 0 0 1 1 1 0 0 1 1 0 0 1] [1 1 1 0 1 1 0 0 0 1 0 0 1 0 1 0 0 0 0 0 0 1 1 1 1 1 0 0 0 0 0 1 1] 20 ) ([59 61 64] [22 59 85] 1 ) ([98 92 28 42 -74 -36 40 -8 32 -22 -70 -22 -56 74 6 6 -62 46 34 2] [-62 -84 72 60 10 -18 -44 -22 14 0 76 72 96 -28 -24 52 -74 -30 16 66] 18 ) ([0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] [0 0 0 0 0 0 0 0 0 0 0 0 0 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1 1] 34 ) ([72 97 79 21 83 2 31 59 6 11 79 97] [27 71 87 36 73 37 80 34 57 17 88 52] 9 )]<line_sep>n_success=0<for_stmt>i,parameters_set enumerate(param)<block_start><if_stmt>f_filled(*parameters_set)<eq>f_gold(*parameters_set)<block_start>n_success<augadd>1<block_end><block_end>print("#Results: %i, %i"%(n_success len(param)))<block_end>
<import_from_future_stmt> absolute_import<import_stmt>autograd.numpy<as>np<import_stmt>scipy.stats<import_from_stmt>autograd.extend primitive defvjp<import_from_stmt>autograd.numpy.numpy_vjps unbroadcast_f<import_from_stmt>autograd.scipy.special beta psi<line_sep>cdf=primitive(scipy.stats.beta.cdf)<line_sep>logpdf=primitive(scipy.stats.beta.logpdf)<line_sep>pdf=primitive(scipy.stats.beta.pdf)<def_stmt>grad_beta_logpdf_arg0 x a b<block_start><return>(1+a<times>(x-1)+x<times>(b-2))/(x<times>(x-1))<block_end><def_stmt>grad_beta_logpdf_arg1 x a b<block_start><return>np.log(x)-psi(a)+psi(a+b)<block_end><def_stmt>grad_beta_logpdf_arg2 x a b<block_start><return>np.log1p(-x)-psi(b)+psi(a+b)<block_end>defvjp(cdf <lambda>ans x a b:unbroadcast_f(x <lambda>g:g<times>np.power(x a-1)<times>np.power(1-x b-1)/beta(a b)) argnums=[0])<line_sep>defvjp(logpdf <lambda>ans x a b:unbroadcast_f(x <lambda>g:g<times>grad_beta_logpdf_arg0(x a b)) <lambda>ans x a b:unbroadcast_f(a <lambda>g:g<times>grad_beta_logpdf_arg1(x a b)) <lambda>ans x a b:unbroadcast_f(b <lambda>g:g<times>grad_beta_logpdf_arg2(x a b)))<line_sep>defvjp(pdf <lambda>ans x a b:unbroadcast_f(x <lambda>g:g<times>ans<times>grad_beta_logpdf_arg0(x a b)) <lambda>ans x a b:unbroadcast_f(a <lambda>g:g<times>ans<times>grad_beta_logpdf_arg1(x a b)) <lambda>ans x a b:unbroadcast_f(b <lambda>g:g<times>ans<times>grad_beta_logpdf_arg2(x a b)))<line_sep>
"""create tokens table Revision ID: 1<PASSWORD> Revises: Create Date: 2020-12-12 01:44:28.195736 """<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<import_from_stmt>sqlalchemy Table Column Integer String Boolean DateTime ForeignKey<import_from_stmt>sqlalchemy.engine.reflection Inspector<import_from_stmt>flask_sqlalchemy SQLAlchemy<line_sep># revision identifiers, used by Alembic. revision='1<PASSWORD>'<line_sep>down_revision=<none><line_sep>branch_labels=<none><line_sep>depends_on=<none><line_sep>db=SQLAlchemy()<def_stmt>upgrade <block_start>conn=op.get_bind()<line_sep>inspector=Inspector.from_engine(conn)<line_sep>tables=inspector.get_table_names()<if_stmt>'ips'<not><in>tables<block_start>op.create_table('ips' sa.Column('id' sa.Integer primary_key=<true>) sa.Column('address' sa.String(255) nullable=<true>))<block_end><if_stmt>'tokens'<not><in>tables<block_start>op.create_table('tokens' sa.Column('name' String(255) primary_key=<true>) sa.Column('expiration_date' DateTime nullable=<true>) sa.Column('max_usage' Integer default=1) sa.Column('used' Integer default=0) sa.Column('disabled' Boolean default=<false>) sa.Column('ips' Integer ForeignKey('association.id')))<block_end><else_stmt><block_start><try_stmt><block_start><with_stmt>op.batch_alter_table('tokens')<as>batch_op<block_start>batch_op.alter_column('ex_date' new_column_name='expiration_date' nullable=<true>)<line_sep>batch_op.alter_column('one_time' new_column_name='max_usage')<line_sep>batch_op.add_column(Column('disabled' Boolean default=<false>))<block_end><block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><if_stmt>'association'<not><in>tables<block_start>op.create_table('association' db.Model.metadata Column('ips' String ForeignKey('ips.address') primary_key=<true>) Column('tokens' Integer ForeignKey('tokens.name') primary_key=<true>))<block_end>op.execute("update tokens set expiration_date=null where expiration_date='None'")<block_end><def_stmt>downgrade <block_start>op.alter_column('tokens' 'expiration_date' new_column_name='ex_date')<line_sep>op.alter_column('tokens' 'max_usage' new_column_name='one_time')<block_end>
#-*- Python -*- # # Copyright 2017 Google Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # <import_stmt>sys<import_stmt>os<import_stmt>re<import_from_stmt>packageUtils IsPackageVersionSufficient<import_from_stmt>packageUtils UsesRPM<import_from_stmt>packageUtils FileContains<import_from_stmt>packageUtils GetDEBPackageInfo<line_sep>failure_list=[]<line_sep># test FileContains test_file='/etc/profile'<if_stmt>FileContains('/IDontExist' re.compile(r"a"))<block_start>failure_list.append("FileContains Failed: returned true for non-existing file")<block_end><if_stmt>FileContains(test_file re.compile(r"PROFILE"))<eq><false><block_start>failure_list.append("FileContains Failed: did not find PROFILE in /etc/hostname")<block_end><if_stmt>FileContains(test_file re.compile(r"not anything here"))<block_start>failure_list.append("FileContains Failed: found garbage search string in /etc/hostname")<block_end># test UsesRPM print("Basic checks for Ubuntu vs RPM\nMake sure these coincide with your current system.\n\n")<line_sep>uses_rpm="does not use RPM"<if_stmt>UsesRPM()<block_start>uses_rpm="uses RPM"<block_end>print("This machine %s"%uses_rpm)<line_sep># test GetDEBPackageInfo for non-RPM systems <if_stmt>UsesRPM()<eq><false><block_start>package_name="gdal-ge"<line_sep>package_results=GetDEBPackageInfo(package_name)<if_stmt>len(package_results)<ne>2|package_results[1]<eq><false><block_start>failure_list.append("%s not installed: GetDEBPackageInfo returns %s"%(package_name package_results))<block_end><block_end># test Package check valid_test_packages=[['apache-ge-devel' '2.2.2'] ['apache-ge-devel' '2.2.2.1'] ['jdk-ge' '1.6.0-1'] ['jdk-ge' '1.6.0-0']]<line_sep>invalid_test_packages=[['apache-ge-devel9' '2.2.2'] ['apache-ge-devel' '10.2.2.1'] ['j9dk-ge' '1.6.0-1'] ['jdk-ge' '1.99.0-0']]<line_sep><for_stmt>package_list valid_test_packages<block_start><if_stmt>IsPackageVersionSufficient(package_list[0] package_list[1])<eq><false><block_start>failure_list.append("Failed test that should pass: %s"%(package_list))<block_end><block_end>print("Test is now looking for invalid packages (error messages expected until tests are complete).\n\n")<for_stmt>package_list invalid_test_packages<block_start><if_stmt>IsPackageVersionSufficient(package_list[0] package_list[1])<block_start>failure_list.append("Passed test that should fail: %s"%(package_list))<block_end><block_end>print("\n\nTests complete.\n\n")<if_stmt>len(failure_list)<g>0<block_start>print("\n\n%s TEST FAILURES"%len(failure_list))<for_stmt>s failure_list<block_start>print(s)<block_end><block_end><else_stmt><block_start>print("\n\nSUCCESS: All tests succeeded!")<block_end>
<import_stmt>structlog<import_from_stmt>django.contrib.contenttypes.models ContentType<import_from_stmt>django.http JsonResponse<import_from_stmt>django.views.decorators.cache never_cache<import_from_stmt>django.views.decorators.csrf csrf_exempt<import_from_stmt>.models Like<import_from_stmt>.templatetags.likes_tags liked_count<line_sep>logger=structlog.get_logger("django_structlog")<line_sep>@never_cache@csrf_exempt<def_stmt>json_set_like request content_type_id object_id<block_start>""" Sets the object as a favorite for the current user """<line_sep>result={"success":<false> }<if_stmt>request.user.is_authenticated<and>request.method<eq>"POST"<block_start>content_type=ContentType.objects.get(id=content_type_id)<line_sep>obj=content_type.get_object_for_this_type(pk=object_id)<line_sep>like,is_created=Like.objects.get_or_create(content_type=ContentType.objects.get_for_model(obj) object_id=obj.pk user=request.user)<if_stmt>is_created<block_start>logger.info("like_created" content_type_id=content_type.pk object_id=obj.pk)<block_end><else_stmt><block_start>like.delete()<line_sep>logger.info("like_deleted" content_type_id=content_type.pk object_id=obj.pk)<block_end>result={"success":<true> "action":"add"<if>is_created<else>"remove" "count":liked_count(obj) }<block_end><return>JsonResponse(result)<block_end>
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.5' # jupytext_version: 1.11.3 # kernelspec: # display_name: Python 3 # name: python3 # --- # + [markdown] id="M_qo7DmLJKLP" # #Class-Conditional Bernoulli Mixture Model for EMNIST # + [markdown] id="TU1pCzcIJHTm" # ## Setup # # + id="400WanLyGA2C" # !git clone --depth 1 https://github.com/probml/pyprobml /pyprobml &> /dev/null # %cd -q /pyprobml/scripts # + id="k1rLl6dHH7Wh" # !pip install -q superimport # !pip install -q distrax # + id="cLpBn5KQeB46" <import_from_stmt>conditional_bernoulli_mix_lib ClassConditionalBMM<import_from_stmt>conditional_bernoulli_mix_utils fake_test_data encode decode get_decoded_samples get_emnist_images_per_class<import_from_stmt>noisy_spelling_hmm Word<import_from_stmt>jax vmap<import_stmt>jax.numpy<as>jnp<import_stmt>jax<import_from_stmt>jax.random PRNGKey split<import_stmt>numpy<as>np<import_from_stmt>matplotlib pyplot<as>plt<line_sep># + colab={"base_uri": "https://localhost:8080/"} id="ey9k06RweuKc" outputId="38131e5a-82fb-49db-c4d3-f4364a643152" select_n=25<line_sep>dataset,targets=get_emnist_images_per_class(select_n)<line_sep>dataset,targets=jnp.array(dataset) jnp.array(targets)<line_sep># + [markdown] id="KwNq7HYYLPO9" # ## Initialization of Class Conditional BMMs # + colab={"base_uri": "https://localhost:8080/"} id="UABtUDPjffFt" outputId="d873a708-542c-44e6-8c72-2c5908c7bbad" n_mix=30<line_sep>n_char=52<line_sep>mixing_coeffs=jnp.array(np.full((n_char n_mix) 1./n_mix))<line_sep>p_min,p_max=0.4 0.6<line_sep>n_pixels=28<times>28<line_sep>probs=jnp.array(np.random.uniform(p_min p_max (n_char n_mix n_pixels)))<line_sep>class_priors=jnp.array(np.full((n_char ) 1./n_char))<line_sep>cbm_gd=ClassConditionalBMM(mixing_coeffs=mixing_coeffs probs=probs class_priors=class_priors n_char=n_char)<line_sep>cbm_em=ClassConditionalBMM(mixing_coeffs=mixing_coeffs probs=probs class_priors=class_priors n_char=n_char)<line_sep># + [markdown] id="Qa95Fua5Kc3i" # ## Full Batch Gradient Descentt # + colab={"base_uri": "https://localhost:8080/", "height": 336} id="PDzuEjs9Kewi" outputId="c81916c0-c6b7-45bd-d308-eab878afe281" num_epochs,batch_size=100 len(dataset)<line_sep>losses=cbm_gd.fit_sgd(dataset.reshape((-1 n_pixels)) targets batch_size num_epochs=num_epochs)<line_sep>plt.plot(losses color="k" linewidth=3)<line_sep>plt.xlabel("Iteration")<line_sep>plt.ylabel("Negative Log Likelihood")<line_sep>plt.show()<line_sep># + [markdown] id="37mNMNrpInfh" # ## EM Algorithm # + colab={"base_uri": "https://localhost:8080/", "height": 336} id="FJeBzIKYfsUk" outputId="9d8db485-a251-4b1a-a6e5-93833c83dce6" losses=cbm_em.fit_em(dataset targets 8)<line_sep>plt.plot(losses color="k" linewidth=3)<line_sep>plt.xlabel("Iteration")<line_sep>plt.ylabel("Negative Log Likelihood")<line_sep>plt.show()<line_sep># + [markdown] id="NjCQpoH1Iuuf" # ## Plot of the Probabilities of Components Distribution # + id="KkyAHDW4JgyM" <def_stmt>plot_components_dist cbm n_mix<block_start>fig=plt.figure(figsize=(45 20))<for_stmt>k range(n_mix)<block_start><for_stmt>cls range(cbm.num_of_classes)<block_start>plt.subplot(n_mix cbm.num_of_classes cbm.num_of_classes<times>k+cls+1)<line_sep>plt.imshow(1-cbm.model.components_distribution.distribution.probs[cls][k :].reshape((28 28)) cmap="gray")<line_sep>plt.axis('off')<block_end><block_end>plt.tight_layout()<line_sep>plt.show()<block_end># + [markdown] id="J8KLkCWpNAeF" # ### GD # + colab={"base_uri": "https://localhost:8080/", "height": 666} id="DSOiuNeAM8gl" outputId="dce9416a-b646-423d-b4bf-c78728db1cab" plot_components_dist(cbm_gd n_mix)<line_sep># + [markdown] id="FO31plUVNDSO" # ### EM # + id="ZM43qs6FfvlP" colab={"base_uri": "https://localhost:8080/", "height": 666} outputId="81a095f1-1099-4809-90a8-272dbed11662" plot_components_dist(cbm_em n_mix)<line_sep># + [markdown] id="IqRdcklzOeAY" # ## Sampling # + id="wgI6sFWKN4ax" p1,p2,p3=0.4 0.1 2e-3<line_sep>n_misspelled=1# number of misspelled words created for each class vocab=['book' 'bird' 'bond' 'bone' 'bank' 'byte' 'pond' 'mind' 'song' 'band']<line_sep>rng_key=PRNGKey(0)<line_sep>keys=[dev_array<for>dev_array split(rng_key len(vocab))]<line_sep># + id="x3GpZ8jbf11N" colab={"base_uri": "https://localhost:8080/"} outputId="5a348b69-bdf4-4f80-f059-1062ba2fbb88" hmms={word:Word(word p1 p2 p3 n_char "all" mixing_coeffs=cbm_em.model.mixture_distribution.probs initial_probs=cbm_em.model.components_distribution.distribution.probs n_mix=n_mix)<for>word vocab}<line_sep>samples=jax.tree_multimap(<lambda>word key:hmms[word].n_sample(n_misspelled key) vocab keys)<line_sep># + id="7VXVsobcg_KO" colab={"base_uri": "https://localhost:8080/"} outputId="3e915a79-7f5c-4131-d6ee-97f11c83d86f" decoded_words=vmap(decode in_axes=(0 <none> <none>))(jnp.array(samples)[: : : -1].reshape((n_misspelled<times>len(vocab) -1)) n_char+1 "all")<line_sep>get_decoded_samples(decoded_words)<line_sep># + [markdown] id="xrRy8MG0afR8" # ### Figure # + id="O0-HaN5rQAvP" <def_stmt>plot_samples samples<block_start>samples=np.array(samples)[: : : :-1].reshape((-1 28 28))<line_sep>fig,axes=plt.subplots(ncols=4 nrows=10 figsize=(4 10))<line_sep>fig.subplots_adjust(hspace=.2 wspace=.001)<for_stmt>i,ax enumerate(axes.flatten())<block_start>ax.imshow(samples[i] cmap="gray")<line_sep>ax.set_axis_off()<block_end>fig.tight_layout()<line_sep>plt.show()<block_end># + id="EbZn9vrfhei4" colab={"base_uri": "https://localhost:8080/", "height": 728} outputId="114217bf-cadb-4331-82ef-b4844c038342" plot_samples(samples)<line_sep># + [markdown] id="eNDmwV7EPyrR" # ## Calculation of Log Likelihoods for Test Data # + id="525MUl5HPe1K" # noisy words test_words=['bo--' '-On-' 'b-N-' 'B---' '-OnD' 'b--D' '---D' '--Nd' 'B-nD' '-O--' 'b--d' '--n-']<line_sep>test_images=fake_test_data(test_words dataset targets n_char+1 "all")<line_sep># + id="1dFCdVNgPYtJ" <def_stmt>plot_log_likelihood hmms test_words test_images vocab<block_start>fig,axes=plt.subplots(4 3 figsize=(20 10))<for_stmt>i,(ax img word) enumerate(zip(axes.flat test_images test_words))<block_start>flattened_img=img.reshape((len(img) -1))<line_sep>loglikelihoods=jax.tree_map(<lambda>w:jnp.sum(hmms[w].loglikelihood(word flattened_img)) vocab)<line_sep>loglikelihoods=jnp.array(loglikelihoods)<line_sep>ax.bar(vocab jnp.exp(jax.nn.log_softmax(loglikelihoods)) color="black")<line_sep>ax.set_title(f'{word}')<block_end>plt.tight_layout()<line_sep>plt.show()<block_end># + id="qv-Df8GEhfC4" colab={"base_uri": "https://localhost:8080/", "height": 784} outputId="9be6abf3-0ecc-4ef5-e301-380c5eac38ff" plot_log_likelihood(hmms test_words test_images vocab)<line_sep>
<import_from_future_stmt> unicode_literals<import_stmt>unittest<import_from_stmt>ddf DDFManager DDF_HOME<class_stmt>BaseTest(unittest.TestCase)<block_start>@classmethod<def_stmt>setUpClass cls<block_start>cls.dm_spark=DDFManager('spark')<line_sep>cls.airlines=cls.loadAirlines(cls.dm_spark)<line_sep>cls.mtcars=cls.loadMtCars(cls.dm_spark)<block_end>@classmethod<def_stmt>tearDownClass cls<block_start>cls.dm_spark.shutdown()<block_end>@classmethod<def_stmt>loadAirlines cls dm<block_start>table_name='airlines_na_pyddf_unittest'<if_stmt>table_name<not><in>[x.split('\t')[0]<for>x dm.sql('show tables')]<block_start>dm.sql('set hive.metastore.warehouse.dir=/tmp' <false>)<line_sep>dm.sql('drop table if exists {}'.format(table_name) <false>)<line_sep>dm.sql("""create table {} (Year int,Month int,DayofMonth int, DayOfWeek int,DepTime int,CRSDepTime int,ArrTime int, CRSArrTime int,UniqueCarrier string, FlightNum int, TailNum string, ActualElapsedTime int, CRSElapsedTime int, AirTime int, ArrDelay int, DepDelay int, Origin string, Dest string, Distance int, TaxiIn int, TaxiOut int, Cancelled int, CancellationCode string, Diverted string, CarrierDelay int, WeatherDelay int, NASDelay int, SecurityDelay int, LateAircraftDelay int ) ROW FORMAT DELIMITED FIELDS TERMINATED BY ',' """.format(table_name) <false>)<line_sep>dm.sql("load data local inpath '{}/resources/test/airlineWithNA.csv' "<concat>"into table {}".format(DDF_HOME table_name) <false>)<block_end><return>dm.sql2ddf('select * from {}'.format(table_name) <false>)<block_end>@classmethod<def_stmt>loadMtCars cls dm<block_start>table_name='mtcars_pyddf_unittest'<if_stmt>table_name<not><in>[x.split('\t')[0]<for>x dm.sql('show tables')]<block_start>dm.sql('set shark.test.data.path=resources' <false>)<line_sep># session.sql('set hive.metastore.warehouse.dir=/tmp') dm.sql('drop table if exists {}'.format(table_name) <false>)<line_sep>dm.sql("CREATE TABLE {} (mpg double, cyl int, disp double, "<concat>"hp int, drat double, wt double, "<concat>"qesc double, vs int, am int, gear int, carb int)"<concat>" ROW FORMAT DELIMITED FIELDS TERMINATED BY ' '".format(table_name) <false>)<line_sep>dm.sql("LOAD DATA LOCAL INPATH '{}/resources/test/mtcars' "<concat>"INTO TABLE {}".format(DDF_HOME table_name) <false>)<block_end><return>dm.sql2ddf('select * from {}'.format(table_name) <false>)<block_end><block_end>
DEFINITIONS={"go":{"class_name":"CodeXGlueCcClozeTestingAll" "dataset_type":"Code-Code" "description":"CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "dir_name":"ClozeTesting-all" "name":"go" "parameters":{"language":"go"} "project_url":"https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "raw_url":"https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/go" "sizes":{"train":25282} } "java":{"class_name":"CodeXGlueCcClozeTestingAll" "dataset_type":"Code-Code" "description":"CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "dir_name":"ClozeTesting-all" "name":"java" "parameters":{"language":"java"} "project_url":"https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "raw_url":"https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/java" "sizes":{"train":40492} } "javascript":{"class_name":"CodeXGlueCcClozeTestingAll" "dataset_type":"Code-Code" "description":"CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "dir_name":"ClozeTesting-all" "name":"javascript" "parameters":{"language":"javascript"} "project_url":"https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "raw_url":"https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/javascript" "sizes":{"train":13837} } "php":{"class_name":"CodeXGlueCcClozeTestingAll" "dataset_type":"Code-Code" "description":"CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "dir_name":"ClozeTesting-all" "name":"php" "parameters":{"language":"php"} "project_url":"https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "raw_url":"https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/php" "sizes":{"train":51930} } "python":{"class_name":"CodeXGlueCcClozeTestingAll" "dataset_type":"Code-Code" "description":"CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "dir_name":"ClozeTesting-all" "name":"python" "parameters":{"language":"python"} "project_url":"https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "raw_url":"https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/python" "sizes":{"train":40137} } "ruby":{"class_name":"CodeXGlueCcClozeTestingAll" "dataset_type":"Code-Code" "description":"CodeXGLUE ClozeTesting-all dataset, available at https://github.com/microsoft/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "dir_name":"ClozeTesting-all" "name":"ruby" "parameters":{"language":"ruby"} "project_url":"https://github.com/madlag/CodeXGLUE/tree/main/Code-Code/ClozeTesting-all" "raw_url":"https://raw.githubusercontent.com/madlag/CodeXGLUE/main/Code-Code/ClozeTesting-all/data/cloze-all/ruby" "sizes":{"train":4437} } }<line_sep>
<import_from_stmt>var_plots plot_forecast<line_sep>plot_forecast()<line_sep>
# # Copyright (c) 2022 salesforce.com, inc. # All rights reserved. # SPDX-License-Identifier: BSD-3-Clause # For full license text, see the LICENSE file in the repo root or https://opensource.org/licenses/BSD-3-Clause # <import_from_stmt>abc ABC<import_stmt>logging<import_stmt>os<import_from_stmt>os.path abspath dirname join<import_stmt>sys<import_stmt>unittest<import_stmt>torch<import_stmt>random<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>merlion.models.defaults DefaultDetector DefaultDetectorConfig<import_from_stmt>merlion.plot plot_anoms_plotly<import_from_stmt>merlion.post_process.threshold AggregateAlarms<import_from_stmt>merlion.utils TimeSeries<import_from_stmt>ts_datasets.anomaly *<line_sep>rootdir=dirname(dirname(dirname(abspath(__file__))))<line_sep>logger=logging.getLogger(__name__)<def_stmt>set_random_seeds <block_start>torch.manual_seed(12345)<line_sep>random.seed(12345)<line_sep>np.random.seed(12345)<block_end><def_stmt>get_train_test_splits df:pd.DataFrame metadata:pd.DataFrame n:int<arrow>(pd.DataFrame pd.DataFrame np.ndarray)<block_start>train_df=df[metadata.trainval]<line_sep>test_df=df[~metadata.trainval]<line_sep>test_labels=pd.DataFrame(metadata[~metadata.trainval].anomaly)<line_sep><return>train_df.tail(n) test_df.head(n) test_labels[:n]<block_end><class_stmt>Mixin(ABC)<block_start><def_stmt>test_score self<block_start>print("-"<times>80)<line_sep>logger.info("test_score\n"+"-"<times>80+"\n")<line_sep>self.run_init()<line_sep>logger.info("Training model...\n")<line_sep>train_ts=TimeSeries.from_pd(self.train_df)<line_sep>self.model.train(train_ts)<line_sep>test_ts=TimeSeries.from_pd(self.test_df)<line_sep>score_ts=self.model.get_anomaly_score(test_ts)<line_sep>scores=score_ts.to_pd().values.flatten()<line_sep>min_score,max_score,sum_score=min(scores) max(scores) sum(scores)<line_sep>logger.info(f"scores look like: {scores[:10]}")<line_sep>logger.info(f"min score = {min_score}")<line_sep>logger.info(f"max score = {max_score}")<line_sep>logger.info(f"sum score = {sum_score}")<block_end><def_stmt>test_save_load self<block_start>print("-"<times>80)<line_sep>logger.info("test_save_load\n"+"-"<times>80+"\n")<line_sep>self.run_init()<line_sep>logger.info("Training model...\n")<line_sep>train_ts=TimeSeries.from_pd(self.train_df)<line_sep>self.model.train(train_ts)<line_sep>multi=train_ts.dim<g>1<line_sep>path=join(rootdir "tmp" "default" "anom" "multi"<if>multi<else>"uni")<line_sep>self.model.save(dirname=path)<line_sep>loaded_model=DefaultDetector.load(dirname=path)<line_sep>test_ts=TimeSeries.from_pd(self.test_df)<line_sep>scores=self.model.get_anomaly_score(test_ts)<line_sep>scores_np=scores.to_pd().values.flatten()<line_sep>loaded_model_scores=loaded_model.get_anomaly_score(test_ts)<line_sep>loaded_model_scores=loaded_model_scores.to_pd().values.flatten()<line_sep>self.assertEqual(len(scores_np) len(loaded_model_scores))<line_sep>alarms=self.model.post_rule(scores)<line_sep>loaded_model_alarms=loaded_model.post_rule(scores)<line_sep>self.assertSequenceEqual(list(alarms) list(loaded_model_alarms))<block_end><def_stmt>test_plot self<block_start><try_stmt><block_start><import_stmt>plotly<line_sep>print("-"<times>80)<line_sep>logger.info("test_plot\n"+"-"<times>80+"\n")<line_sep>self.run_init()<line_sep>logger.info("Training model...\n")<line_sep>train_ts=TimeSeries.from_pd(self.train_df)<line_sep>self.model.train(train_ts)<line_sep>multi=train_ts.dim<g>1<line_sep>savedir=join(rootdir "tmp" "default" "anom")<line_sep>os.makedirs(savedir exist_ok=<true>)<line_sep>path=join(savedir ("multi"<if>multi<else>"uni")+".png")<line_sep>test_ts=TimeSeries.from_pd(self.test_df)<line_sep>fig=self.model.plot_anomaly_plotly(time_series=test_ts time_series_prev=train_ts plot_time_series_prev=<true>)<line_sep>plot_anoms_plotly(fig TimeSeries.from_pd(self.test_labels))<try_stmt><block_start><import_stmt>kaleido<line_sep>fig.write_image(path engine="kaleido")<block_end><except_stmt>ImportError<block_start>logger.info("kaleido not installed, not trying to save image")<block_end><block_end><except_stmt>ImportError<block_start>logger.info("plotly not installed, skipping test case")<block_end><block_end><block_end><class_stmt>TestUnivariate(unittest.TestCase Mixin)<block_start><def_stmt>run_init self<block_start>set_random_seeds()<line_sep>self.model=DefaultDetector(DefaultDetectorConfig(granularity="1h" threshold=AggregateAlarms(alm_threshold=1.5)))<line_sep># Time series with anomalies in both train split and test split df=pd.read_csv(join(rootdir "data" "synthetic_anomaly" "horizontal_spike_anomaly.csv"))<line_sep>df.timestamp=pd.to_datetime(df.timestamp unit="s")<line_sep>df=df.set_index("timestamp")<line_sep># Get training & testing splits self.train_df=df.iloc[:-len(df)<floordiv>2 :1]<line_sep>self.test_df=df.iloc[-len(df)<floordiv>2: :1]<line_sep>self.test_labels=df.iloc[-len(df)<floordiv>2: -1:]<block_end><block_end><class_stmt>TestMultivariate(unittest.TestCase Mixin)<block_start><def_stmt>run_init self<block_start>set_random_seeds()<line_sep>self.model=DefaultDetector(DefaultDetectorConfig(threshold=AggregateAlarms(alm_threshold=2)))<line_sep>self.dataset=MSL(rootdir=join(rootdir "data" "smap"))<line_sep>df,metadata=self.dataset[0]<line_sep>self.train_df,self.test_df,self.test_labels=get_train_test_splits(df metadata 2000)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>logging.basicConfig(format="%(asctime)s (%(module)s:%(lineno)d) %(levelname)s: %(message)s" stream=sys.stdout level=logging.INFO)<line_sep>unittest.main()<block_end>
<import_from_stmt>unicorn.arm_const *<def_stmt>stop uc<block_start>print_context(uc)<line_sep>input("...")<block_end><def_stmt>print_context uc<block_start>print("==== State ====")<line_sep>r0=uc.reg_read(UC_ARM_REG_R0)<line_sep>r1=uc.reg_read(UC_ARM_REG_R1)<line_sep>r2=uc.reg_read(UC_ARM_REG_R2)<line_sep>r3=uc.reg_read(UC_ARM_REG_R3)<line_sep>r4=uc.reg_read(UC_ARM_REG_R4)<line_sep>r5=uc.reg_read(UC_ARM_REG_R5)<line_sep>r7=uc.reg_read(UC_ARM_REG_R7)<line_sep>sp=uc.reg_read(UC_ARM_REG_SP)<line_sep>pc=uc.reg_read(UC_ARM_REG_PC)<line_sep>print("r0: 0x{:x}\nr1: 0x{:x}\nr2: 0x{:x}\nr3: 0x{:x}\nr4: 0x{:x}\nr5: 0x{:x}\nr7: 0x{:x}\npc: 0x{:x}\nsp: 0x{:x}".format(r0 r1 r2 r3 r4 r5 r7 pc sp))<block_end><def_stmt>breakpoint uc<block_start><import_stmt>ipdb<line_sep>ipdb.set_trace()<block_end>
# Copyright (c) 2019 Microsoft Corporation # Distributed under the MIT software license <import_stmt>sys<import_from_stmt>interpret.ext.extension_utils load_class_extensions<import_from_stmt>interpret.ext.extension GLASSBOX_EXTENSION_KEY _is_valid_glassbox_explainer<line_sep>load_class_extensions(sys.modules[__name__] GLASSBOX_EXTENSION_KEY _is_valid_glassbox_explainer)<line_sep>
# Copyright (c) 2012-2021, <NAME> <<EMAIL>> # All rights reserved. # # See LICENSE file for full license. <import_from_stmt>.aws Action<as>BaseAction<import_from_stmt>.aws BaseARN<line_sep>service_name="AWS Device Farm"<line_sep>prefix="devicefarm"<class_stmt>Action(BaseAction)<block_start><def_stmt>__init__ self action:str=<none><arrow><none><block_start>super().__init__(prefix action)<block_end><block_end><class_stmt>ARN(BaseARN)<block_start><def_stmt>__init__ self resource:str="" region:str="" account:str=""<arrow><none><block_start>super().__init__(service=prefix resource=resource region=region account=account)<block_end><block_end>CreateDevicePool=Action("CreateDevicePool")<line_sep>CreateInstanceProfile=Action("CreateInstanceProfile")<line_sep>CreateNetworkProfile=Action("CreateNetworkProfile")<line_sep>CreateProject=Action("CreateProject")<line_sep>CreateRemoteAccessSession=Action("CreateRemoteAccessSession")<line_sep>CreateTestGridProject=Action("CreateTestGridProject")<line_sep>CreateTestGridUrl=Action("CreateTestGridUrl")<line_sep>CreateUpload=Action("CreateUpload")<line_sep>CreateVPCEConfiguration=Action("CreateVPCEConfiguration")<line_sep>DeleteDevicePool=Action("DeleteDevicePool")<line_sep>DeleteInstanceProfile=Action("DeleteInstanceProfile")<line_sep>DeleteNetworkProfile=Action("DeleteNetworkProfile")<line_sep>DeleteProject=Action("DeleteProject")<line_sep>DeleteRemoteAccessSession=Action("DeleteRemoteAccessSession")<line_sep>DeleteRun=Action("DeleteRun")<line_sep>DeleteTestGridProject=Action("DeleteTestGridProject")<line_sep>DeleteUpload=Action("DeleteUpload")<line_sep>DeleteVPCEConfiguration=Action("DeleteVPCEConfiguration")<line_sep>GetAccountSettings=Action("GetAccountSettings")<line_sep>GetDevice=Action("GetDevice")<line_sep>GetDeviceInstance=Action("GetDeviceInstance")<line_sep>GetDevicePool=Action("GetDevicePool")<line_sep>GetDevicePoolCompatibility=Action("GetDevicePoolCompatibility")<line_sep>GetInstanceProfile=Action("GetInstanceProfile")<line_sep>GetJob=Action("GetJob")<line_sep>GetNetworkProfile=Action("GetNetworkProfile")<line_sep>GetOfferingStatus=Action("GetOfferingStatus")<line_sep>GetProject=Action("GetProject")<line_sep>GetRemoteAccessSession=Action("GetRemoteAccessSession")<line_sep>GetRun=Action("GetRun")<line_sep>GetSuite=Action("GetSuite")<line_sep>GetTest=Action("GetTest")<line_sep>GetTestGridProject=Action("GetTestGridProject")<line_sep>GetTestGridSession=Action("GetTestGridSession")<line_sep>GetUpload=Action("GetUpload")<line_sep>GetVPCEConfiguration=Action("GetVPCEConfiguration")<line_sep>InstallToRemoteAccessSession=Action("InstallToRemoteAccessSession")<line_sep>ListArtifacts=Action("ListArtifacts")<line_sep>ListDeviceInstances=Action("ListDeviceInstances")<line_sep>ListDevicePools=Action("ListDevicePools")<line_sep>ListDevices=Action("ListDevices")<line_sep>ListInstanceProfiles=Action("ListInstanceProfiles")<line_sep>ListJobs=Action("ListJobs")<line_sep>ListNetworkProfiles=Action("ListNetworkProfiles")<line_sep>ListOfferingPromotions=Action("ListOfferingPromotions")<line_sep>ListOfferingTransactions=Action("ListOfferingTransactions")<line_sep>ListOfferings=Action("ListOfferings")<line_sep>ListProjects=Action("ListProjects")<line_sep>ListRemoteAccessSessions=Action("ListRemoteAccessSessions")<line_sep>ListRuns=Action("ListRuns")<line_sep>ListSamples=Action("ListSamples")<line_sep>ListSuites=Action("ListSuites")<line_sep>ListTagsForResource=Action("ListTagsForResource")<line_sep>ListTestGridProjects=Action("ListTestGridProjects")<line_sep>ListTestGridSessionActions=Action("ListTestGridSessionActions")<line_sep>ListTestGridSessionArtifacts=Action("ListTestGridSessionArtifacts")<line_sep>ListTestGridSessions=Action("ListTestGridSessions")<line_sep>ListTests=Action("ListTests")<line_sep>ListUniqueProblems=Action("ListUniqueProblems")<line_sep>ListUploads=Action("ListUploads")<line_sep>ListVPCEConfigurations=Action("ListVPCEConfigurations")<line_sep>PurchaseOffering=Action("PurchaseOffering")<line_sep>RenewOffering=Action("RenewOffering")<line_sep>ScheduleRun=Action("ScheduleRun")<line_sep>StopJob=Action("StopJob")<line_sep>StopRemoteAccessSession=Action("StopRemoteAccessSession")<line_sep>StopRun=Action("StopRun")<line_sep>TagResource=Action("TagResource")<line_sep>UntagResource=Action("UntagResource")<line_sep>UpdateDeviceInstance=Action("UpdateDeviceInstance")<line_sep>UpdateDevicePool=Action("UpdateDevicePool")<line_sep>UpdateInstanceProfile=Action("UpdateInstanceProfile")<line_sep>UpdateNetworkProfile=Action("UpdateNetworkProfile")<line_sep>UpdateProject=Action("UpdateProject")<line_sep>UpdateTestGridProject=Action("UpdateTestGridProject")<line_sep>UpdateUpload=Action("UpdateUpload")<line_sep>UpdateVPCEConfiguration=Action("UpdateVPCEConfiguration")<line_sep>
<import_stmt>collections<import_stmt>cPickle<as>pickle<import_stmt>glob<import_stmt>itertools<import_stmt>json<import_stmt>operator<import_stmt>os<import_stmt>re<import_stmt>sys<import_from_stmt>program_synthesis.karel.dataset dataset<import_from_stmt>program_synthesis.karel.dataset executor<import_from_stmt>program_synthesis.karel.dataset.karel_runtime KarelRuntime<import_from_stmt>program_synthesis.karel.models karel_model<import_from_stmt>program_synthesis.common.tools.saver restore_args<line_sep>BASE_DIR=""<with_stmt>open(BASE_DIR+"text2code-models/karel-sgd-cl1-lr1-lds100k-ldr0.5/report-dev-00100100.jsonl")<as>f<block_start>baseline_report=[]<line_sep>print(f.readline())<for_stmt>line f<block_start>baseline_report.append(json.loads(line))<block_end><block_end><class_stmt>Args(object)<block_start>model_dir=BASE_DIR+'program_synthesis-models/karel-lgrl-ref-m123-sgd-cl1-lr0.1-lds100k-ldr0.5'<line_sep>step=250100<block_end>args=Args()<line_sep>restore_args(args)<line_sep>args.word_vocab=',,/data/karel/word.vocab'<line_sep>m=karel_model.KarelLGRLRefineModel(args)<line_sep>batch_processor=m.batch_processor(for_eval=<true>)<line_sep>m.args.max_beam_trees=64<line_sep>m.args.max_eval_trials=64<line_sep>i=0<line_sep>result=[]<while_stmt>i<l>len(baseline_report)<block_start>batch=[]<while_stmt>len(batch)<l>32<and>i<l>len(baseline_report)<block_start><if_stmt>baseline_report[i]['code']['info']['trees_checked']<eq>1<block_start>i<augadd>1<line_sep><continue><block_end>e=dataset.KarelExample.from_dict(baseline_report[i]['example'])<line_sep>ref_code_sequence=baseline_report[i]['code']['info']['candidates'][0]<line_sep>e.ref_example=dataset.KarelExample(idx=<none> guid=<none> code_sequence=ref_code_sequence input_tests=e.input_tests tests=e.tests)<line_sep>batch.append(e)<line_sep>i<augadd>1<block_end>print("Starting batch (%d)..."%i)<line_sep>res=m.inference(batch_processor(batch))<for_stmt>example,infer zip(batch res)<block_start>result.append((example infer))<block_end><block_end># if i > 100: # break print(len(result) len(baseline_report))<line_sep>the_executor=executor.KarelExecutor()<line_sep>stats={'total':len(result) 'fixed':0}<line_sep>refinement_results=[]<for_stmt>example,infer result<block_start><if_stmt><not>infer.code_sequence<block_start><continue><block_end>correct=<true><for_stmt>test example.input_tests+example.tests<block_start><try_stmt><block_start>log=the_executor.execute(infer.code_sequence <none> test['input'])<if_stmt>log.result<ne>test['output']<block_start>correct=<false><line_sep><break><block_end><block_end><except_stmt>(executor.ExecutorRuntimeException executor.ExecutorSyntaxException)<as>e<block_start>correct=<false><line_sep><break><block_end><block_end>refinement_results.append(correct)<if_stmt>correct<block_start>stats['fixed']<augadd>1<block_end><block_end>print(float(stats['fixed'])/stats['total'] stats['fixed'] stats['total'])<line_sep>
<import_stmt>lxml.html<line_sep>html1=''' <!DOCTYPE html> <html> <head lang="en"> <meta charset="UTF-8"> <title></title> </head> <body> <div id="test-1-k">需要的内容1</div> <div id="test-2-k">需要的内容2</div> <div id="testfault-k">需要的内容3</div> <div id="useless">这是我不需要的内容</div> </body> </html> '''<line_sep># selector = lxml.html.fromstring(html1) # content = selector.xpath('//div[ends-with(@id, "-k")]/text()') # for each in content: # print(each) html2=''' <!DOCTYPE html> <html> <head lang="en"> <meta charset="UTF-8"> <title></title> </head> <body> <div id="abc-key-x">需要的内容1</div> <div id="123-key-000">需要的内容2</div> <div id="haha-key">需要的内容3</div> <div id="useless">这是我不需要的内容</div> </body> </html> '''<line_sep># selector = lxml.html.fromstring(html2) # content = selector.xpath('//div[contains(@id, "-key")]/text()') # for each in content: # print(each) html3=''' <!DOCTYPE html> <html> <head lang="en"> <meta charset="UTF-8"> <title></title> </head> <body> <div id="test3"> 我左青龙, <span id="tiger"> 右白虎, <ul>上朱雀, <li>下玄武。</li> </ul> 老牛在当中, </span> 龙头在胸口。 </div> </body> </html> '''<line_sep>#如果使用一般的办法,就会出现获取到的数据不完整的情况 selector=lxml.html.fromstring(html3)<line_sep># content_1 = selector.xpath('//div[@id="test3"]/text()') # for each in content_1: # print(each) # 使用string(.)就可以把数据获取完整 data=selector.xpath('//div[@id="test3"]')[0]<line_sep>info=data.xpath('string(.)')<line_sep>print(info)<line_sep>
<import_stmt>random<class_stmt>StockMarket(object)<block_start><def_stmt>__init__ self marketname symbols<block_start>self.name=marketname<line_sep>self.symbolmeans={}<for_stmt>symbol symbols<block_start>self.symbolmeans[symbol]=random.uniform(20 200)<block_end>self.aggregators=[]<block_end><def_stmt>generate self<block_start>quotes={}<for_stmt>symbol,mean self.symbolmeans.items()<block_start><if_stmt>random.random()<l>0.2<block_start>quotes[symbol]=round(random.normalvariate(mean 20) 2)<block_end><block_end><for_stmt>aggregator self.aggregators<block_start>aggregator.quotes(self.name quotes)<block_end><block_end><def_stmt>listener self aggregator<block_start>self.aggregators.append(aggregator)<block_end><def_stmt>symbols self<block_start><return>self.symbolmeans.keys()<block_end><block_end>
<import_stmt>json<import_from_stmt>dal autocomplete<import_from_stmt>django test<import_from_stmt>django.contrib.auth.models Group<class_stmt>Select2QuerySetSequenceViewTestCase(test.TestCase)<block_start><def_stmt>setUp self<block_start>self.expected={'pagination':{'more':<false>} 'results':[]}<block_end>@classmethod<def_stmt>setUpClass cls<block_start><for_stmt>i range(0 3)<block_start>Group.objects.create(name='ViewTestCase%s'%i)<block_end>cls.request=test.RequestFactory().get('?q=foo')<line_sep>super(Select2QuerySetSequenceViewTestCase cls).setUpClass()<block_end><def_stmt>get_view self **kwargs<block_start>view=autocomplete.Select2QuerySetSequenceView(queryset=autocomplete.QuerySetSequence(Group.objects.all() ) paginate_by=2 **kwargs)<line_sep>view.request=self.request<line_sep><return>view<block_end><def_stmt>get_view_response self **view_kwargs<block_start><return>self.get_view(**view_kwargs).dispatch(self.request)<block_end><def_stmt>get_view_response_json self **view_kwargs<block_start><return>json.loads(self.get_view_response(**view_kwargs).content)<block_end><def_stmt>test_get self<block_start>result=self.get_view_response_json()<assert_stmt>self.expected<eq>result<block_end><def_stmt>test_get_with_create_field self<block_start>self.expected['results'].append({'text':'Create "foo"' 'id':'foo' 'create_id':<true>})<line_sep>result=self.get_view_response_json(create_field='name')<assert_stmt>self.expected<eq>result<block_end><block_end>
# Copyright 2014 The Chromium Authors. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """IDL type handling. Classes: IdlTypeBase IdlType IdlUnionType IdlArrayOrSequenceType IdlSequenceType IdlFrozenArrayType IdlNullableType IdlAnnotatedType IdlTypes are picklable because we store them in interfaces_info. """<import_from_stmt>collections defaultdict<line_sep>################################################################################ # IDL types ################################################################################ INTEGER_TYPES=frozenset([# http://www.w3.org/TR/WebIDL/#dfn-integer-type 'byte' 'octet' 'short' 'unsigned short' # int and unsigned are not IDL types 'long' 'unsigned long' 'long long' 'unsigned long long' ])<line_sep>NUMERIC_TYPES=(INTEGER_TYPES|frozenset([# http://www.w3.org/TR/WebIDL/#dfn-numeric-type 'float' 'unrestricted float' 'double' 'unrestricted double' ]))<line_sep># http://www.w3.org/TR/WebIDL/#dfn-primitive-type PRIMITIVE_TYPES=(frozenset(['boolean'])|NUMERIC_TYPES)<line_sep>BASIC_TYPES=(PRIMITIVE_TYPES|frozenset([# Built-in, non-composite, non-object data types # http://heycam.github.io/webidl/#idl-types 'DOMString' 'ByteString' 'USVString' # http://heycam.github.io/webidl/#idl-types 'void' ]))<line_sep>TYPE_NAMES={# http://heycam.github.io/webidl/#dfn-type-name 'any':'Any' 'boolean':'Boolean' 'byte':'Byte' 'octet':'Octet' 'short':'Short' 'unsigned short':'UnsignedShort' 'long':'Long' 'unsigned long':'UnsignedLong' 'long long':'LongLong' 'unsigned long long':'UnsignedLongLong' 'float':'Float' 'unrestricted float':'UnrestrictedFloat' 'double':'Double' 'unrestricted double':'UnrestrictedDouble' 'DOMString':'String' 'ByteString':'ByteString' 'USVString':'USVString' 'object':'Object' }<line_sep>STRING_TYPES=frozenset([# http://heycam.github.io/webidl/#es-interface-call (step 10.11) # (Interface object [[Call]] method's string types.) 'String' 'ByteString' 'USVString' ])<line_sep>EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES=frozenset(['AllowShared' 'Clamp' 'EnforceRange' 'StringContext' 'TreatNullAs' ])<line_sep>################################################################################ # Inheritance ################################################################################ ancestors=defaultdict(list)# interface_name -> ancestors <def_stmt>inherits_interface interface_name ancestor_name<block_start><return>(interface_name<eq>ancestor_name<or>ancestor_name<in>ancestors[interface_name])<block_end><def_stmt>set_ancestors new_ancestors<block_start>ancestors.update(new_ancestors)<block_end><class_stmt>IdlTypeBase(object)<block_start>"""Base class for IdlType, IdlUnionType, IdlArrayOrSequenceType and IdlNullableType. """<def_stmt>__str__ self<block_start><raise>NotImplementedError('__str__() should be defined in subclasses')<block_end><def_stmt>__getattr__ self name# Default undefined attributes to None (analogous to Jinja variables). # This allows us to not define default properties in the base class, and # allows us to relay __getattr__ in IdlNullableType to the inner type. <block_start><return><none><block_end><def_stmt>resolve_typedefs self typedefs<block_start><raise>NotImplementedError('resolve_typedefs should be defined in subclasses')<block_end><def_stmt>idl_types self<block_start>"""A generator which yields IdlTypes which are referenced from |self|, including itself."""<line_sep><yield>self<block_end><block_end>################################################################################ # IdlType ################################################################################ <class_stmt>IdlType(IdlTypeBase)# FIXME: incorporate Nullable, etc. # to support types like short?[] vs. short[]?, instead of treating these # as orthogonal properties (via flags). <block_start>callback_functions={}<line_sep>callback_interfaces=set()<line_sep>dictionaries=set()<line_sep>enums={}# name -> values <def_stmt>__init__ self base_type is_unrestricted=<false><block_start>super(IdlType self).__init__()<if_stmt>is_unrestricted<block_start>self.base_type='unrestricted %s'%base_type<block_end><else_stmt><block_start>self.base_type=base_type<block_end><block_end><def_stmt>__str__ self<block_start><return>self.base_type<block_end><def_stmt>__getstate__ self<block_start><return>{'base_type':self.base_type }<block_end><def_stmt>__setstate__ self state<block_start>self.base_type=state['base_type']<block_end>@property<def_stmt>is_basic_type self<block_start><return>self.base_type<in>BASIC_TYPES<block_end>@property<def_stmt>is_callback_function self# pylint: disable=C0103 <block_start><return>self.base_type<in>IdlType.callback_functions<block_end>@property<def_stmt>is_custom_callback_function self<block_start>entry=IdlType.callback_functions.get(self.base_type)<line_sep>callback_function=entry.get('callback_function')<if_stmt><not>callback_function<block_start><return><false><block_end><return>'Custom'<in>callback_function.extended_attributes<block_end>@property<def_stmt>is_callback_interface self<block_start><return>self.base_type<in>IdlType.callback_interfaces<block_end>@property<def_stmt>is_dictionary self<block_start><return>self.base_type<in>IdlType.dictionaries<block_end>@property<def_stmt>is_enum self# FIXME: add an IdlEnumType class and a resolve_enums step # at end of IdlDefinitions constructor <block_start><return>self.name<in>IdlType.enums<block_end>@property<def_stmt>enum_values self<block_start><return>IdlType.enums.get(self.name)<block_end>@property<def_stmt>enum_type self<block_start><return>self.name<if>self.is_enum<else><none><block_end>@property<def_stmt>is_integer_type self<block_start><return>self.base_type<in>INTEGER_TYPES<block_end>@property<def_stmt>is_void self<block_start><return>self.base_type<eq>'void'<block_end>@property<def_stmt>is_numeric_type self<block_start><return>self.base_type<in>NUMERIC_TYPES<block_end>@property<def_stmt>is_primitive_type self<block_start><return>self.base_type<in>PRIMITIVE_TYPES<block_end>@property<def_stmt>is_interface_type self# Anything that is not another type is an interface type. # http://www.w3.org/TR/WebIDL/#idl-types # http://www.w3.org/TR/WebIDL/#idl-interface # In C++ these are RefPtr types. <block_start><return><not>(self.is_basic_type<or>self.is_callback_function<or>self.is_dictionary<or>self.is_enum<or>self.name<eq>'Any'<or>self.name<eq>'Object'<or>self.name<eq>'Promise')<block_end># Promise will be basic in future @property<def_stmt>is_string_type self<block_start><return>self.name<in>STRING_TYPES<block_end>@property<def_stmt>name self<block_start>"""Return type name http://heycam.github.io/webidl/#dfn-type-name """<line_sep>base_type=self.base_type<line_sep><return>TYPE_NAMES.get(base_type base_type)<block_end>@classmethod<def_stmt>set_callback_functions cls new_callback_functions<block_start>cls.callback_functions.update(new_callback_functions)<block_end>@classmethod<def_stmt>set_callback_interfaces cls new_callback_interfaces<block_start>cls.callback_interfaces.update(new_callback_interfaces)<block_end>@classmethod<def_stmt>set_dictionaries cls new_dictionaries<block_start>cls.dictionaries.update(new_dictionaries)<block_end>@classmethod<def_stmt>set_enums cls new_enums<block_start>cls.enums.update(new_enums)<block_end><def_stmt>resolve_typedefs self typedefs<block_start>base_type=self.base_type<if_stmt>base_type<in>typedefs<block_start>resolved_type=typedefs[base_type]<if_stmt>resolved_type.base_type<in>typedefs<block_start><raise>ValueError("We can't typedef a typedef'ed type.")<block_end># For the case that the resolved type contains other typedef'ed # type(s). <return>resolved_type.resolve_typedefs(typedefs)<block_end><return>self<block_end><block_end>################################################################################ # IdlUnionType ################################################################################ <class_stmt>IdlUnionType(IdlTypeBase)# http://heycam.github.io/webidl/#idl-union # IdlUnionType has __hash__() and __eq__() methods because they are stored # in sets. <block_start><def_stmt>__init__ self member_types<block_start>super(IdlUnionType self).__init__()<line_sep>self.member_types=member_types<block_end><def_stmt>__str__ self<block_start><return>'('+' or '.join(str(member_type)<for>member_type self.member_types)+')'<block_end><def_stmt>__hash__ self<block_start><return>hash(self.name)<block_end><def_stmt>__eq__ self rhs<block_start><return>self.name<eq>rhs.name<block_end><def_stmt>__getstate__ self<block_start><return>{'member_types':self.member_types }<block_end><def_stmt>__setstate__ self state<block_start>self.member_types=state['member_types']<block_end>@property<def_stmt>flattened_member_types self<block_start>"""Returns the set of the union's flattened member types. https://heycam.github.io/webidl/#dfn-flattened-union-member-types """<line_sep># We cannot use a set directly because each member is an # IdlTypeBase-derived class, and comparing two objects of the # same type is not the same as comparing their names. # In other words: # x = IdlType('ByteString') # y = IdlType('ByteString') # x == y # False # x.name == y.name # True # |flattened_members|'s keys are type names, the values are type # |objects|. # We assume we can use two IDL objects of the same type interchangeably. flattened_members={}<for_stmt>member self.member_types<block_start><if_stmt>member.is_nullable<block_start>member=member.inner_type<block_end><if_stmt>member.is_union_type<block_start><for_stmt>inner_member member.flattened_member_types<block_start>flattened_members[inner_member.name]=inner_member<block_end><block_end><else_stmt><block_start>flattened_members[member.name]=member<block_end><block_end><return>set(flattened_members.values())<block_end>@property<def_stmt>number_of_nullable_member_types self<block_start>"""Returns the union's number of nullable types. http://heycam.github.io/webidl/#dfn-number-of-nullable-member-types """<line_sep>count=0<for_stmt>member self.member_types<block_start><if_stmt>member.is_nullable<block_start>count<augadd>1<line_sep>member=member.inner_type<block_end><if_stmt>member.is_union_type<block_start>count<augadd>member.number_of_nullable_member_types<block_end><block_end><return>count<block_end>@property<def_stmt>is_union_type self<block_start><return><true><block_end><def_stmt>single_matching_member_type self predicate<block_start>matching_types=list(filter(predicate self.flattened_member_types))<if_stmt>len(matching_types)<g>1<block_start><raise>ValueError('%s is ambiguous.'%self.name)<block_end><return>matching_types[0]<if>matching_types<else><none><block_end>@property<def_stmt>string_member_type self<block_start><return>self.single_matching_member_type(<lambda>member_type:(member_type.is_string_type<or>member_type.is_enum))<block_end>@property<def_stmt>numeric_member_type self<block_start><return>self.single_matching_member_type(<lambda>member_type:member_type.is_numeric_type)<block_end>@property<def_stmt>boolean_member_type self<block_start><return>self.single_matching_member_type(<lambda>member_type:member_type.base_type<eq>'boolean')<block_end>@property<def_stmt>sequence_member_type self<block_start><return>self.single_matching_member_type(<lambda>member_type:member_type.is_sequence_type)<block_end>@property<def_stmt>dictionary_member_type self<block_start><return>self.single_matching_member_type(<lambda>member_type:member_type.is_dictionary)<block_end>@property<def_stmt>as_union_type self# Note: Use this to "look through" a possible IdlNullableType wrapper. <block_start><return>self<block_end>@property<def_stmt>name self<block_start>"""Return type name (or inner type name if nullable) http://heycam.github.io/webidl/#dfn-type-name """<line_sep><return>'Or'.join(member_type.name<for>member_type self.member_types)<block_end><def_stmt>resolve_typedefs self typedefs<block_start>self.member_types=[member_type.resolve_typedefs(typedefs)<for>member_type self.member_types]<line_sep><return>self<block_end><def_stmt>idl_types self<block_start><yield>self<for_stmt>member_type self.member_types<block_start><for_stmt>idl_type member_type.idl_types()<block_start><yield>idl_type<block_end><block_end><block_end><block_end>################################################################################ # IdlArrayOrSequenceType, IdlSequenceType, IdlFrozenArrayType ################################################################################ # TODO(bashi): Rename this like "IdlArrayTypeBase" or something. <class_stmt>IdlArrayOrSequenceType(IdlTypeBase)<block_start>"""Base class for array-like types."""<def_stmt>__init__ self element_type<block_start>super(IdlArrayOrSequenceType self).__init__()<line_sep>self.element_type=element_type<block_end><def_stmt>__getstate__ self<block_start><return>{'element_type':self.element_type }<block_end><def_stmt>__setstate__ self state<block_start>self.element_type=state['element_type']<block_end><def_stmt>resolve_typedefs self typedefs<block_start>self.element_type=self.element_type.resolve_typedefs(typedefs)<line_sep><return>self<block_end>@property<def_stmt>is_array_or_sequence_type self<block_start><return><true><block_end>@property<def_stmt>is_sequence_type self<block_start><return><false><block_end>@property<def_stmt>is_frozen_array self<block_start><return><false><block_end>@property<def_stmt>enum_values self<block_start><return>self.element_type.enum_values<block_end>@property<def_stmt>enum_type self<block_start><return>self.element_type.enum_type<block_end><def_stmt>idl_types self<block_start><yield>self<for_stmt>idl_type self.element_type.idl_types()<block_start><yield>idl_type<block_end><block_end><block_end><class_stmt>IdlSequenceType(IdlArrayOrSequenceType)<block_start><def_stmt>__init__ self element_type<block_start>super(IdlSequenceType self).__init__(element_type)<block_end><def_stmt>__str__ self<block_start><return>'sequence<%s>'%self.element_type<block_end>@property<def_stmt>name self<block_start><return>self.element_type.name+'Sequence'<block_end>@property<def_stmt>is_sequence_type self<block_start><return><true><block_end><block_end><class_stmt>IdlFrozenArrayType(IdlArrayOrSequenceType)<block_start><def_stmt>__init__ self element_type<block_start>super(IdlFrozenArrayType self).__init__(element_type)<block_end><def_stmt>__str__ self<block_start><return>'FrozenArray<%s>'%self.element_type<block_end>@property<def_stmt>name self<block_start><return>self.element_type.name+'Array'<block_end>@property<def_stmt>is_frozen_array self<block_start><return><true><block_end><block_end>################################################################################ # IdlRecordType ################################################################################ <class_stmt>IdlRecordType(IdlTypeBase)<block_start><def_stmt>__init__ self key_type value_type<block_start>super(IdlRecordType self).__init__()<line_sep>self.key_type=key_type<line_sep>self.value_type=value_type<block_end><def_stmt>__str__ self<block_start><return>'record<%s, %s>'%(self.key_type self.value_type)<block_end><def_stmt>__getstate__ self<block_start><return>{'key_type':self.key_type 'value_type':self.value_type }<block_end><def_stmt>__setstate__ self state<block_start>self.key_type=state['key_type']<line_sep>self.value_type=state['value_type']<block_end><def_stmt>idl_types self<block_start><yield>self<for_stmt>idl_type self.key_type.idl_types()<block_start><yield>idl_type<block_end><for_stmt>idl_type self.value_type.idl_types()<block_start><yield>idl_type<block_end><block_end><def_stmt>resolve_typedefs self typedefs<block_start>self.key_type=self.key_type.resolve_typedefs(typedefs)<line_sep>self.value_type=self.value_type.resolve_typedefs(typedefs)<line_sep><return>self<block_end>@property<def_stmt>is_record_type self<block_start><return><true><block_end>@property<def_stmt>name self<block_start><return>self.key_type.name+self.value_type.name+'Record'<block_end><block_end>################################################################################ # IdlNullableType ################################################################################ # https://heycam.github.io/webidl/#idl-nullable-type <class_stmt>IdlNullableType(IdlTypeBase)<block_start><def_stmt>__init__ self inner_type<block_start>super(IdlNullableType self).__init__()<if_stmt>inner_type.name<eq>'Any'<block_start><raise>ValueError('Inner type of nullable type must not be any.')<block_end><if_stmt>inner_type.name<eq>'Promise'<block_start><raise>ValueError('Inner type of nullable type must not be a promise.')<block_end><if_stmt>inner_type.is_nullable<block_start><raise>ValueError('Inner type of nullable type must not be a nullable type.')<block_end><if_stmt>inner_type.is_union_type<block_start><if_stmt>inner_type.number_of_nullable_member_types<g>0<block_start><raise>ValueError('Inner type of nullable type must not be a union type that '<concat>'itself includes a nullable type.')<block_end><if_stmt>any(member.is_dictionary<for>member inner_type.flattened_member_types)<block_start><raise>ValueError('Inner type of nullable type must not be a union type that '<concat>'has a dictionary type as its members.')<block_end><block_end>self.inner_type=inner_type<block_end><def_stmt>__str__ self# FIXME: Dictionary::ConversionContext::setConversionType can't # handle the '?' in nullable types (passes nullability separately). # Update that function to handle nullability from the type name, # simplifying its signature. # return str(self.inner_type) + '?' <block_start><return>str(self.inner_type)<block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self.inner_type name)<block_end><def_stmt>__getstate__ self<block_start><return>{'inner_type':self.inner_type }<block_end><def_stmt>__setstate__ self state<block_start>self.inner_type=state['inner_type']<block_end>@property<def_stmt>is_nullable self<block_start><return><true><block_end>@property<def_stmt>name self<block_start><return>self.inner_type.name+'OrNull'<block_end>@property<def_stmt>enum_values self# Nullable enums are handled by preprending a None value to the list of # enum values. This None value is converted to nullptr on the C++ side, # which matches the JavaScript 'null' in the enum parsing code. <block_start>inner_values=self.inner_type.enum_values<if_stmt>inner_values<block_start><return>[<none>]+inner_values<block_end><return><none><block_end><def_stmt>resolve_typedefs self typedefs<block_start>self.inner_type=self.inner_type.resolve_typedefs(typedefs)<line_sep><return>self<block_end><def_stmt>idl_types self<block_start><yield>self<for_stmt>idl_type self.inner_type.idl_types()<block_start><yield>idl_type<block_end><block_end><block_end>################################################################################ # IdlAnnotatedType ################################################################################ <class_stmt>IdlAnnotatedType(IdlTypeBase)<block_start>"""IdlAnnoatedType represents an IDL type with extended attributes. [Clamp], [EnforceRange], [StringContext], and [TreatNullAs] are applicable to types. https://heycam.github.io/webidl/#idl-annotated-types """<def_stmt>__init__ self inner_type extended_attributes<block_start>super(IdlAnnotatedType self).__init__()<line_sep>self.inner_type=inner_type<line_sep>self.extended_attributes=extended_attributes<if_stmt>any(key<not><in>EXTENDED_ATTRIBUTES_APPLICABLE_TO_TYPES<for>key extended_attributes)<block_start><raise>ValueError('Extended attributes not applicable to types: %s'%self)<block_end><if_stmt>('StringContext'<in>extended_attributes<and>inner_type.base_type<not><in>['DOMString' 'USVString'])<block_start><raise>ValueError('StringContext is only applicable to string types.')<block_end><block_end><def_stmt>__str__ self<block_start>annotation=', '.join((key+(''<if>val<is><none><else>'='+val))<for>key,val self.extended_attributes.items())<line_sep><return>'[%s] %s'%(annotation str(self.inner_type))<block_end><def_stmt>__getattr__ self name<block_start><return>getattr(self.inner_type name)<block_end><def_stmt>__getstate__ self<block_start><return>{'inner_type':self.inner_type 'extended_attributes':self.extended_attributes }<block_end><def_stmt>__setstate__ self state<block_start>self.inner_type=state['inner_type']<line_sep>self.extended_attributes=state['extended_attributes']<block_end>@property<def_stmt>is_annotated_type self<block_start><return><true><block_end>@property<def_stmt>has_string_context self<block_start><return>'StringContext'<in>self.extended_attributes<block_end>@property<def_stmt>name self<block_start>annotation=''.join((key+(''<if>val<is><none><else>val))<for>key,val sorted(self.extended_attributes.items()))<line_sep><return>self.inner_type.name+annotation<block_end><def_stmt>resolve_typedefs self typedefs<block_start>self.inner_type=self.inner_type.resolve_typedefs(typedefs)<line_sep><return>self<block_end><def_stmt>idl_types self<block_start><yield>self<line_sep><yield>self.inner_type<block_end><block_end>
<import_stmt>hashlib<import_stmt>json<import_stmt>logging<import_from_stmt>collections.abc Mapping Sequence<import_from_stmt>typing Any List Tuple<import_from_stmt>nested_lookup nested_lookup<import_from_stmt>ordered_set OrderedSet<import_from_stmt>.pointer fragment_decode fragment_encode<line_sep>LOG=logging.getLogger(__name__)<line_sep>NON_MERGABLE_KEYS=("uniqueItems" "insertionOrder")<line_sep>TYPE="type"<line_sep>REF="$ref"<line_sep>UNPACK_SEQUENCE_IDENTIFIER="*"<class_stmt>FlatteningError(Exception)<block_start><pass><block_end><def_stmt>item_hash item # assumption -> input is only json comparable type (dict/list/scalar) <block_start>"""MD5 hash for an item (Dictionary/Iterable/Scalar)"""<line_sep>dhash=hashlib.md5()# nosec <if_stmt>isinstance(item dict)<block_start>item={k:item_hash(v)<for>k,v item.items()}<block_end><if_stmt>isinstance(item list)<block_start>item=[item_hash(i)<for>i item].sort()<block_end>encoded=json.dumps(item sort_keys=<true>).encode()<line_sep>dhash.update(encoded)<line_sep><return>dhash.hexdigest()<block_end><def_stmt>to_set value:Any<arrow>OrderedSet<block_start><return>(OrderedSet(value)<if>isinstance(value (list OrderedSet))<else>OrderedSet([value]))<block_end><class_stmt>ConstraintError(FlatteningError ValueError)<block_start><def_stmt>__init__ self message path *args<block_start>self.path=fragment_encode(path)<line_sep>message=message.format(*args path=self.path)<line_sep>super().__init__(message)<block_end><block_end><class_stmt>BaseRefPlaceholder<block_start>"""A sentinel object representing a reference inside the base document."""<def_stmt>__repr__ self<block_start>"""Readable representation for debugging. >>> repr(BaseRefPlaceholder()) '<BASE>' """<line_sep><return>"<BASE>"<block_end><block_end>#: The sentinel instance representing a reference inside the base document. BASE=BaseRefPlaceholder()<def_stmt>rewrite_ref ref<block_start>"""Rewrite a reference to be inside of the base document. A relative JSON pointer is returned (in URI fragment identifier representation). If the reference is already inside the base document (:const:`BASE`), the parts are simply encoded into a pointer. If the reference is outside of the base document, a unique pointer inside the base document is made by namespacing the reference under the remote base name inside the remote section. >>> rewrite_ref((BASE, "foo", "bar")) '#/foo/bar' >>> rewrite_ref((BASE,)) '#' >>> rewrite_ref(("remote", "foo", "bar")) '#/remote/remote/foo/bar' >>> rewrite_ref(("remote",)) '#/remote/remote' """<line_sep>base,*parts=ref<if_stmt>base<is><not>BASE<block_start>parts=["remote" base]+parts<block_end><return>fragment_encode(parts)<block_end><def_stmt>traverse document path_parts<block_start>"""Traverse the document according to the reference. Since the document is presumed to be the reference's base, the base is discarded. There is no validation that the reference is valid. :raises ValueError, LookupError: the reference is invalid for this document >>> traverse({"foo": {"bar": [42]}}, tuple()) ({'foo': {'bar': [42]}}, (), None) >>> traverse({"foo": {"bar": [42]}}, ["foo"]) ({'bar': [42]}, ('foo',), {'foo': {'bar': [42]}}) >>> traverse({"foo": {"bar": [42]}}, ("foo", "bar")) ([42], ('foo', 'bar'), {'bar': [42]}) >>> traverse({"foo": {"bar": [42]}}, ("foo", "bar", "0")) (42, ('foo', 'bar', 0), [42]) >>> traverse({}, ["foo"]) Traceback (most recent call last): ... KeyError: 'foo' >>> traverse([], ["foo"]) Traceback (most recent call last): ... ValueError: invalid literal for int() with base 10: 'foo' >>> traverse([], [0]) Traceback (most recent call last): ... IndexError: list index out of range """<line_sep>parent=<none><line_sep>path=[]<for_stmt>part path_parts<block_start><if_stmt>isinstance(document Sequence)<block_start>part=int(part)<block_end>parent=document<line_sep>document=document[part]<line_sep>path.append(part)<block_end><return>document tuple(path) parent<block_end><def_stmt>_resolve_ref sub_schema:dict definitions:dict last_step:bool=<false># resolve $ref <block_start>ref=nested_lookup(REF sub_schema)# should be safe (always single value) # bc sub_schema is always per paranet property # (taken from definitions) <if_stmt>last_step<and>REF<not><in>sub_schema# dont traverse deeper than requested # check if $ref is used directly -> # means that we need to check definition # otherwise it's an array and return subschema <block_start><return>sub_schema<block_end><if_stmt>ref# [0] should be a single $ref in subschema on the top level # [-1] $ref must follow #/definitions/object <block_start>sub_schema=definitions[fragment_decode(ref[0])[-1]]<block_end># resolve properties properties=nested_lookup("properties" sub_schema)<if_stmt>properties<block_start>sub_schema=properties[0]<block_end><return>sub_schema<block_end># pylint: disable=C0301 <def_stmt>traverse_raw_schema schema:dict path:tuple<block_start>"""Traverse the raw json schema resolving $ref :raises TypeError: either schema is not of type dict :raises ConstraintError: the schema tries to override "type" or "$ref" >>> traverse_raw_schema({"properties": {"bar": [42]}}, tuple()) {'bar': [42]} >>> traverse_raw_schema({"properties": {"bar": [42]}}, ("bar",)) [42] >>> traverse_raw_schema({"definitions": {"bar": {"type": "boolean"}},"properties": {"bar": {"$ref": "#/definitions/bar"}}}, ("bar",)) {'type': 'boolean'} >>> traverse_raw_schema({"definitions":{"b":[1],"f":{"properties":{"b":{"$ref":"#/definitions/b"}}}},"properties":{"f":{"$ref":"#/definitions/f"}}},("f", "b")) # noqa: B950 [1] >>> traverse_raw_schema({}, ("foo")) {} >>> traverse_raw_schema([], ["foo"]) Traceback (most recent call last): ... TypeError: Schema must be a dictionary """<if_stmt><not>isinstance(schema Mapping)<block_start><raise>TypeError("Schema must be a dictionary")<block_end><try_stmt><block_start>properties=schema["properties"]<line_sep>definitions=schema.get("definitions" {})<line_sep>sub_properties=properties<line_sep>last_step=(len(path)-1)<line_sep># get amount of steps to prevent deeper traversal than requested <for_stmt>step path<block_start>sub_properties=_resolve_ref(sub_properties[step] definitions last_step=path.index(step)<eq>last_step )<block_end><return>sub_properties<block_end><except_stmt>KeyError<as>e<block_start>LOG.debug("Malformed Schema or incorrect path provided\n%s\n%s" path e)<line_sep><return>{}<block_end><block_end><def_stmt>traverse_path_for_sequence_members document:dict path_parts:Sequence path:list=<none><arrow>Tuple[List[object] List[tuple]]<block_start>"""Traverse the paths for all sequence members in the document according to the reference. Since the document is presumed to be the reference's base, the base is discarded. There is no validation that the reference is valid. Differing from traverse, this returns a list of documents and a list of resolved paths. :parameter document: document to traverse (dict or list) :parameter path_parts: document paths to traverse :parameter path: traversed path so far :raises ValueError, LookupError: the reference is invalid for this document >>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, tuple()) ([{'foo': {'bar': [42, 43, 44]}}], [()]) >>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ["foo"]) ([{'bar': [42, 43, 44]}], [('foo',)]) >>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ("foo", "bar")) ([[42, 43, 44]], [('foo', 'bar')]) >>> traverse_path_for_sequence_members({"foo": {"bar": [42, 43, 44]}}, ("foo", "bar", "*")) ([42, 43, 44], [('foo', 'bar', 0), ('foo', 'bar', 1), ('foo', 'bar', 2)]) >>> traverse_path_for_sequence_members({"foo": {"bar": [{"baz": 1, "bin": 1}, {"baz": 2, "bin": 2}]}}, ("foo", "bar", "*")) ([{'baz': 1, 'bin': 1}, {'baz': 2, 'bin': 2}], [('foo', 'bar', 0), ('foo', 'bar', 1)]) >>> traverse_path_for_sequence_members({"foo": {"bar": [{"baz": 1, "bin": 1}, {"baz": 2, "bin": 2}]}}, ("foo", "bar", "*", "baz")) ([1, 2], [('foo', 'bar', 0, 'baz'), ('foo', 'bar', 1, 'baz')]) >>> traverse_path_for_sequence_members({}, ["foo"]) Traceback (most recent call last): ... KeyError: 'foo' >>> traverse_path_for_sequence_members([], ["foo"]) Traceback (most recent call last): ... ValueError: invalid literal for int() with base 10: 'foo' >>> traverse_path_for_sequence_members([], [0]) Traceback (most recent call last): ... IndexError: list index out of range """<if_stmt>path<is><none><block_start>path=[]<block_end><if_stmt><not>path_parts<block_start><return>[document] [tuple(path)]<block_end>path_parts=list(path_parts)<if_stmt><not>isinstance(document Sequence)<block_start><return>_handle_non_sequence_for_traverse(document path_parts path)<block_end><return>_handle_sequence_for_traverse(document path_parts path)<block_end><def_stmt>_handle_non_sequence_for_traverse current_document:dict current_path_parts:list current_path:list<arrow>Tuple[List[object] List[tuple]]<block_start>""" Handling a non-sequence member for `traverse_path_for_sequence_members` is like the loop block in `traverse`: The next path part is the first part in the list of path parts. The new document is obtained from the current document using the new path part as the key. The next path part is added to the traversed path. The traversal continues by recursively calling `traverse_path_for_sequence_members` """<line_sep>part_to_handle=current_path_parts.pop(0)<line_sep>current_document=current_document[part_to_handle]<line_sep>current_path.append(part_to_handle)<line_sep><return>traverse_path_for_sequence_members(current_document current_path_parts current_path)<block_end><def_stmt>_handle_sequence_for_traverse current_document:Sequence current_path_parts:list current_path:list<arrow>Tuple[List[object] List[tuple]]<block_start>""" Check the new path part for the unpack sequence identifier (e.g. '*'), otherwise traverse index and continue: The new document is obtained from the current document (a sequence) using the new path part as the index. The next path part is added to the traversed path """<line_sep>sequence_part=current_path_parts.pop(0)<if_stmt>sequence_part<eq>UNPACK_SEQUENCE_IDENTIFIER<block_start><return>_handle_unpack_sequence_for_traverse(current_document current_path_parts current_path)<block_end># otherwise, sequence part should be a valid index current_sequence_part=int(sequence_part)<line_sep>current_document=current_document[current_sequence_part]<line_sep>current_path.append(current_sequence_part)<line_sep><return>[current_document] [tuple(current_path)]<block_end><def_stmt>_handle_unpack_sequence_for_traverse current_document:Sequence current_path_parts:list current_path:list<arrow>Tuple[List[object] List[tuple]]<block_start>""" When unpacking a sequence, we need to include multiple paths and multiple documents, one for each sequence member. For each sequence member: Append the traversed paths w/ the sequence index, and get the new document. The new document is obtained by traversing the current document using the sequence index. The new document is appended to the list of new documents. For each new document: The remaining document is traversed using the remaining path parts. The list of traversed documents and traversed paths are returned. """<line_sep>documents=[]<line_sep>resolved_paths=[]<line_sep>new_documents=[]<line_sep>new_paths=[]<for_stmt>sequence_index range(len(current_document))<block_start>new_paths.append(current_path.copy()+[sequence_index])<line_sep>new_document=traverse_path_for_sequence_members(current_document [sequence_index]+current_path_parts current_path.copy())[0]<line_sep>new_documents.extend(new_document)<block_end><for_stmt>i range(len(new_documents))# pylint: disable=consider-using-enumerate <block_start>new_document=new_documents[i]<line_sep>newer_documents,newer_paths=traverse_path_for_sequence_members(new_document current_path_parts new_paths[i])<line_sep>documents.extend(newer_documents)<line_sep>resolved_paths.extend(newer_paths)<block_end><return>documents resolved_paths<block_end><def_stmt>schema_merge target src path# noqa: C901 # pylint: disable=R0912 <block_start>"""Merges the src schema into the target schema in place. If there are duplicate keys, src will overwrite target. :raises TypeError: either schema is not of type dict :raises ConstraintError: the schema tries to override "type" or "$ref" >>> schema_merge({}, {}, ()) {} >>> schema_merge({'foo': 'a'}, {}, ()) {'foo': 'a'} >>> schema_merge({}, {'foo': 'a'}, ()) {'foo': 'a'} >>> schema_merge({'foo': 'a'}, {'foo': 'b'}, ()) {'foo': 'b'} >>> schema_merge({'required': 'a'}, {'required': 'b'}, ()) {'required': ['a', 'b']} >>> a, b = {'$ref': 'a'}, {'foo': 'b'} >>> schema_merge(a, b, ('foo',)) {'$ref': 'a', 'foo': 'b'} >>> a, b = {'$ref': 'a'}, {'type': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'$ref': 'b'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b'])} >>> a, b = {'$ref': 'a'}, {'type': ['b', 'c']} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'$ref': 'a'}, {'type': OrderedSet(['b', 'c'])} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': ['a', 'b']}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'type': OrderedSet(['a', 'b'])}, {'$ref': 'c'} >>> schema_merge(a, b, ('foo',)) {'type': OrderedSet(['a', 'b', 'c'])} >>> a, b = {'Foo': {'$ref': 'a'}}, {'Foo': {'type': 'b'}} >>> schema_merge(a, b, ('foo',)) {'Foo': {'type': OrderedSet(['a', 'b'])}} >>> schema_merge({'type': 'a'}, {'type': 'b'}, ()) # doctest: +NORMALIZE_WHITESPACE {'type': OrderedSet(['a', 'b'])} >>> schema_merge({'type': 'string'}, {'type': 'integer'}, ()) {'type': OrderedSet(['string', 'integer'])} """<if_stmt><not>(isinstance(target Mapping)<and>isinstance(src Mapping))<block_start><raise>TypeError("Both schemas must be dictionaries")<block_end><for_stmt>key,src_schema src.items()<block_start><try_stmt><block_start><if_stmt>key<in>(REF TYPE )# $ref and type are treated similarly and unified <block_start>target_schema=target.get(key)<or>target.get(TYPE)<or>target[REF]<block_end><else_stmt><block_start>target_schema=target[key]# carry over existing properties <block_end><block_end><except_stmt>KeyError<block_start>target[key]=src_schema<block_end><else_stmt><block_start>next_path=path+(key )<try_stmt><block_start>target[key]=schema_merge(target_schema src_schema next_path)<block_end><except_stmt>TypeError<block_start><if_stmt>key<in>(TYPE REF)# combining multiple $ref and types <block_start>src_set=to_set(src_schema)<try_stmt><block_start>target[TYPE]=to_set(target[TYPE])<line_sep># casting to ordered set as lib # implicitly converts strings to sets target[TYPE]<augor>src_set<block_end><except_stmt>(TypeError KeyError)<block_start>target_set=to_set(target_schema)<line_sep>target[TYPE]=target_set|src_set<block_end><try_stmt># check if there are conflicting $ref and type # at the same sub schema. Conflicting $ref could only # happen on combiners because method merges two json # objects without losing any previous info: # e.g. "oneOf": [{"$ref": "..#1.."},{"$ref": "..#2.."}] -> # { "ref": "..#1..", "type": [{},{}] } <block_start>target.pop(REF)<block_end><except_stmt>KeyError<block_start><pass><block_end><block_end><elif_stmt>key<eq>"required"<block_start>target[key]=sorted(set(target_schema)|set(src_schema))<block_end><else_stmt><block_start><if_stmt>key<in>NON_MERGABLE_KEYS<and>target_schema<ne>src_schema<block_start>msg=("Object at path '{path}' declared multiple values "<concat>"for '{}': found '{}' and '{}'")<line_sep># pylint: disable=W0707 <raise>ConstraintError(msg path key target_schema src_schema)<block_end>target[key]=src_schema<block_end><block_end><block_end><block_end><return>target<block_end>
""" Tests for mail module. """<import_from_stmt>django.contrib.auth get_user_model<import_from_stmt>django.utils.translation ugettext_lazy<as>_<import_from_stmt>..mail translate_for_user<class_stmt>TestTransUser(object)<block_start><def_stmt>test self<block_start>User=get_user_model()<line_sep>user=User.objects.get(username='test')<line_sep>user.profile.language='de'<line_sep>msgs=[_('German') _('English')]<line_sep>msgs=translate_for_user(user *msgs)<assert_stmt>msgs<eq>['Deutsch' 'Englisch']<block_end><block_end>
"""Shared functions for the config module."""<def_stmt>to_dict value<block_start>"""Recursively transform a class from `config.models` to a dict."""<if_stmt>hasattr(value 'as_dict')<block_start><return>value.as_dict()<block_end><if_stmt>isinstance(value list)<block_start><return>[to_dict(v)<for>v value]<block_end><if_stmt>isinstance(value dict)<block_start><return>{k:to_dict(v)<for>k,v value.items()}<block_end><return>value<block_end><def_stmt>list_to_dict list_<block_start>"""Transform a list to a dictionary with its indices as keys."""<line_sep>dict_={str(i):element<for>i,element enumerate(list_)}<line_sep><return>dict_<block_end>
# Copyright 2020 The TensorFlow Authors # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """General helper functions."""<import_from_stmt>os path<import_stmt>numpy<as>np<import_from_stmt>skimage measure<import_stmt>tensorflow.compat.v1<as>tf<import_from_stmt>tensorflow_graphics.projects.cvxnet.lib.libmise mise<import_from_stmt>tensorflow_graphics.projects.nasa.lib datasets<import_from_stmt>tensorflow_graphics.projects.nasa.lib models<import_stmt>tensorflow_probability<as>tfp<import_from_stmt>tqdm trange<import_stmt>trimesh<line_sep>tf.disable_eager_execution()<line_sep>tfd=tfp.distributions<def_stmt>define_flags <block_start>"""Define command line flags."""<line_sep>flags=tf.app.flags<line_sep># Dataset Parameters flags.DEFINE_enum("dataset" "amass" list(k<for>k datasets.dataset_dict.keys()) "Name of the dataset.")<line_sep>flags.DEFINE_string("data_dir" <none> "Directory to load data from.")<line_sep>flags.mark_flag_as_required("data_dir")<line_sep>flags.DEFINE_integer("sample_bbox" 1024 "Number of bbox samples.")<line_sep>flags.DEFINE_integer("sample_surf" 1024 "Number of surface samples.")<line_sep>flags.DEFINE_integer("batch_size" 12 "Batch size.")<line_sep>flags.DEFINE_integer("motion" 0 "Index of the motion for evaluation.")<line_sep>flags.DEFINE_integer("subject" 0 "Index of the subject for training.")<line_sep># Model Parameters flags.DEFINE_enum("model" "nasa" list(k<for>k models.model_dict.keys()) "Name of the model.")<line_sep>flags.DEFINE_integer("n_parts" 24 "Number of parts.")<line_sep>flags.DEFINE_integer("total_dim" 960 "Dimension of the latent vector (in total).")<line_sep>flags.DEFINE_bool("shared_decoder" <false> "Whether to use shared decoder.")<line_sep>flags.DEFINE_float("soft_blend" 5. "The constant to blend parts.")<line_sep>flags.DEFINE_bool("projection" <true> "Whether to use projected shape features.")<line_sep>flags.DEFINE_float("level_set" 0.5 "The value of the level_set.")<line_sep>flags.DEFINE_integer("n_dims" 3 "The dimension of the query points.")<line_sep># Training Parameters flags.DEFINE_float("lr" 1e-4 "Learning rate")<line_sep>flags.DEFINE_string("train_dir" <none> "Training directory.")<line_sep>flags.mark_flag_as_required("train_dir")<line_sep>flags.DEFINE_integer("max_steps" 200000 "Number of optimization steps.")<line_sep>flags.DEFINE_integer("save_every" 5000 "Number of steps to save checkpoint.")<line_sep>flags.DEFINE_integer("summary_every" 500 "Number of steps to save checkpoint.")<line_sep>flags.DEFINE_float("label_w" 0.5 "Weight of labed vertices loss.")<line_sep>flags.DEFINE_float("minimal_w" 0.05 "Weight of minimal loss.")<line_sep>flags.DEFINE_bool("use_vert" <true> "Whether to use vertices on the mesh for training.")<line_sep>flags.DEFINE_bool("use_joint" <true> "Whether to use joint-based transformation.")<line_sep>flags.DEFINE_integer("sample_vert" 2048 "Number of vertex samples.")<line_sep># Evalulation Parameters flags.DEFINE_bool("gen_mesh_only" <false> "Whether to generate meshes only.")<line_sep># Tracking Parameters flags.DEFINE_float("theta_lr" 5e-4 "Learning rate")<line_sep>flags.DEFINE_integer("max_steps_per_frame" 1792 "Number of optimization steps for tracking each frame.")<line_sep>flags.DEFINE_enum("gradient_type" "reparam" ["vanilla" "reparam"] "Type of gradient to use in theta optimization.")<line_sep>flags.DEFINE_integer("sample_track_vert" 1024 "Number of vertex samples for tracking each frame.")<line_sep>flags.DEFINE_integer("n_noisy_samples" 8 "Number of noisy samples per vertex")<line_sep>flags.DEFINE_float("bandwidth" 1e-2 "Bandwidth of the gaussian noises.")<line_sep>flags.DEFINE_bool("left_trans" <false> "Whether to use left side transformation (True) or right side (False).")<line_sep>flags.DEFINE_string("joint_data" <none> "Path to load joint data.")<line_sep>flags.DEFINE_float("glue_w" 20. "Weight of length constraint loss.")<line_sep>flags.DEFINE_float("trans_range" 1. "The range of allowed translations.")<block_end><def_stmt>gen_mesh sess feed_dict latent_holder point_holder latent occ batch_val hparams idx=0<block_start>"""Generating meshes given a trained NASA model."""<line_sep>scale=1.1# Scale of the padded bbox regarding the tight one. level_set=hparams.level_set<line_sep>latent_val=sess.run(latent feed_dict)<line_sep>mesh_extractor=mise.MISE(32 3 level_set)<line_sep>points=mesh_extractor.query()<line_sep>gt_verts=batch_val["vert"].reshape([-1 3])<line_sep>gt_bbox=np.stack([gt_verts.min(axis=0) gt_verts.max(axis=0)] axis=0)<line_sep>gt_center=(gt_bbox[0]+gt_bbox[1])<times>0.5<line_sep>gt_scale=(gt_bbox[1]-gt_bbox[0]).max()<while_stmt>points.shape[0]<ne>0<block_start>orig_points=points<line_sep>points=points.astype(np.float32)<line_sep>points=(np.expand_dims(points axis=0)/mesh_extractor.resolution-0.5)<times>scale<line_sep>points=points<times>gt_scale+gt_center<line_sep>n_points=points.shape[1]<line_sep>values=[]<for_stmt>i range(0 n_points 100000)# Add this to prevent OOM due to points overload. <block_start>feed_dict[latent_holder]=latent_val<line_sep>feed_dict[point_holder]=np.expand_dims(points[: i:i+100000] axis=1)<line_sep>value=sess.run(occ[: idx] feed_dict)<line_sep>values.append(value)<block_end>values=np.concatenate(values axis=1)<line_sep>values=values[0 : 0].astype(np.float64)<line_sep>mesh_extractor.update(orig_points values)<line_sep>points=mesh_extractor.query()<block_end>value_grid=mesh_extractor.to_dense()<try_stmt><block_start>value_grid=np.pad(value_grid 1 "constant" constant_values=-1e6)<line_sep>verts,faces,normals,unused_var=measure.marching_cubes_lewiner(value_grid min(level_set value_grid.max()))<del_stmt>normals<line_sep>verts<augsub>1<line_sep>verts<augdiv>np.array([value_grid.shape[0]-3 value_grid.shape[1]-3 value_grid.shape[2]-3] dtype=np.float32)<line_sep>verts=scale<times>(verts-0.5)<line_sep>verts=verts<times>gt_scale+gt_center<line_sep>faces=np.stack([faces[<ellipsis> 1] faces[<ellipsis> 0] faces[<ellipsis> 2]] axis=-1)<line_sep>mesh=trimesh.Trimesh(vertices=verts faces=faces)<line_sep><return>mesh<block_end><except_stmt># pylint: disable=bare-except <block_start><return><none><block_end><block_end><def_stmt>save_mesh sess feed_dict latent_holder point_holder latent occ batch_val hparams pth="meshes"<block_start>"""Generate and save meshes to disk given a trained NASA model."""<line_sep>name=batch_val["name"][0].decode("utf-8")<line_sep>subject,motion,frame=amass_name_helper(name)<line_sep>pth=path.join(hparams.train_dir pth frame)<if_stmt><not>tf.io.gfile.isdir(pth)<block_start>tf.io.gfile.makedirs(pth)<block_end>start=hparams.n_parts<for_stmt>i range(start hparams.n_parts+1)<block_start>mesh_model=gen_mesh(sess feed_dict latent_holder point_holder latent occ batch_val hparams idx=i)<line_sep>mesh_name="full_pred.obj"<if_stmt>mesh_model<is><not><none><block_start><with_stmt>tf.io.gfile.GFile(path.join(pth mesh_name) "w")<as>fout<block_start>mesh_model.export(fout file_type="obj")<block_end><block_end><block_end><return>subject motion frame mesh_model<block_end><def_stmt>save_pointcloud data hparams pth="pointcloud"<block_start>"""Save pointcloud to disk."""<line_sep>name=data["name"][0].decode("utf-8")<line_sep>unused_subject,unused_motion,frame=amass_name_helper(name)<line_sep>pth=path.join(hparams.train_dir pth frame)<if_stmt><not>tf.io.gfile.isdir(pth)<block_start>tf.io.gfile.makedirs(pth)<block_end>mesh_name="pointcloud.obj"<with_stmt>tf.io.gfile.GFile(path.join(pth mesh_name) "w")<as>fout<block_start>pointcloud=data["vert"].reshape([-1 3])<for_stmt>v pointcloud<block_start>fout.write("v {0} {1} {2}\n".format(*v.tolist()))<block_end><block_end><block_end><def_stmt>amass_name_helper name<block_start>name,frame=name.split("-")<line_sep>subject=name[:5]<line_sep>motion=name[6:]<line_sep><return>subject motion frame<block_end><def_stmt>make_summary_feed_dict iou_hook iou best_hook best_iou <block_start>feed_dict={}<line_sep>feed_dict[iou_hook]=iou<line_sep>feed_dict[best_hook]=best_iou<line_sep><return>feed_dict<block_end><def_stmt>parse_global_step ckpt<block_start>basename=path.basename(ckpt)<line_sep><return>int(basename.split("-")[-1])<block_end><def_stmt>compute_iou sess feed_dict latent_holder point_holder latent occ point label hparams<block_start>"""Compute IoU."""<line_sep>iou=0.<line_sep>eps=1e-9<line_sep>latent_val=sess.run(latent feed_dict)<line_sep>n_points=point.shape[2]<line_sep>preds=[]<for_stmt>start range(0 n_points 100000)<block_start>feed_dict[point_holder]=point[: : start:start+100000]<line_sep>feed_dict[latent_holder]=latent_val<line_sep>pred=sess.run(occ feed_dict)<line_sep>preds.append(pred)<block_end>pred=np.concatenate(preds axis=2)<line_sep>pred=(pred<ge>hparams.level_set).astype(np.float32)<line_sep>label=(label[: :1]<ge>0.5).astype(np.float32).squeeze(axis=1)<line_sep>iou<augadd>np.sum(pred<times>label)/np.maximum(np.sum(np.maximum(pred label)) eps)<line_sep><return>iou<block_end><def_stmt>compute_glue_loss connect end_pts inv_transforms inv_first_frame_trans joints hparams<block_start>"""Compute the prior term as a glue loss."""<line_sep>n_dims=hparams.n_dims<line_sep># Invert the transformation r_inv=inv_transforms[<ellipsis> :n_dims :n_dims]<line_sep>t_inv=inv_transforms[<ellipsis> :n_dims -1:]<line_sep>r=tf.transpose(r_inv [0 2 1])<line_sep>t=-tf.matmul(r t_inv)<line_sep>transforms=tf.concat([tf.concat([r t] axis=-1) inv_transforms[<ellipsis> -1: :]] axis=-2)<line_sep>transforms=tf.matmul(transforms inv_first_frame_trans)<line_sep># Compute transformations of father joints and apply it to vectors from frame0 father_transforms=tf.reduce_sum(tf.expand_dims(transforms axis=1)<times>connect.reshape([hparams.n_parts hparams.n_parts 1 1]) axis=0)<line_sep>end_pts_homo=tf.expand_dims(tf.concat([end_pts tf.ones_like(end_pts[<ellipsis> :1])] axis=-1) axis=-1)<line_sep>end_pts_transformed=tf.matmul(father_transforms end_pts_homo)<line_sep>end_pts_transformed=tf.squeeze(end_pts_transformed axis=-1)[<ellipsis> :n_dims]<line_sep># Compute vectors in current configuration pred_links=tf.reshape(joints [hparams.n_parts n_dims])<line_sep># Compute distance between links and transformed vectors <return>tf.reduce_sum(tf.square(pred_links-end_pts_transformed))<block_end><def_stmt>vanilla_theta_gradient model_fn batch_holder hparams<block_start>"""A vanilla gradient estimator for the pose, theta."""<line_sep>latent_holder,latent,occ_eval=model_fn(batch_holder <none> <none> "gen_mesh")<if_stmt>hparams.sample_vert<g>0<block_start>points=batch_holder["point"]<line_sep>weights=batch_holder["weight"]<line_sep>n_vert=tf.shape(points)[2]<line_sep>sample_indices=tf.random.uniform([1 1 hparams.sample_vert] minval=0 maxval=n_vert dtype=tf.int32)<line_sep>points=tf.gather(points sample_indices axis=2 batch_dims=2)<line_sep>weights=tf.gather(weights sample_indices axis=2 batch_dims=2)<line_sep>batch_holder["point"]=points<line_sep>batch_holder["weight"]=weights<block_end>unused_var0,unused_var1,occ=model_fn(batch_holder <none> <none> "gen_mesh")<line_sep><return>latent_holder latent occ_eval tf.reduce_mean(tf.square(occ-hparams.level_set))<block_end><def_stmt>reparam_theta_gradient model_fn batch_holder hparams<block_start>"""A gradient estimaor for the pose, theta, using the reparam trick."""<line_sep>sigma=hparams.bandwidth<line_sep>n_samples=hparams.n_noisy_samples<line_sep>latent_holder,latent,occ_eval=model_fn(batch_holder <none> <none> "gen_mesh")<if_stmt>hparams.sample_vert<g>0<block_start>points=batch_holder["point"]<line_sep>weights=batch_holder["weight"]<line_sep>n_vert=tf.shape(points)[2]<line_sep>sample_indices=tf.random.uniform([1 1 hparams.sample_vert] minval=0 maxval=n_vert dtype=tf.int32)<line_sep>points=tf.gather(points sample_indices axis=2 batch_dims=2)<line_sep>weights=tf.gather(weights sample_indices axis=2 batch_dims=2)<line_sep>batch_holder["point"]=points<line_sep>batch_holder["weight"]=weights<block_end>dist=tfd.Normal(loc=0. scale=sigma)<line_sep>n_pts=hparams.sample_vert<if>hparams.sample_vert<g>0<else>hparams.n_vert<line_sep>noises=dist.sample((1 hparams.n_parts n_pts n_samples hparams.n_dims))<line_sep>unused_var0,unused_var1,occ=model_fn(batch_holder noises <none> "gen_mesh")<line_sep>occ=tf.reshape(occ [1 hparams.n_parts+1 -1 n_samples 1])<line_sep>occ=tf.reduce_mean(occ[: hparams.n_parts:] axis=3)<line_sep><return>latent_holder latent occ_eval tf.reduce_mean(tf.square(occ-hparams.level_set))<block_end><def_stmt>optimize_theta feed_dict loss reset_op train_op rec_loss glue_loss sess k hparams<block_start>"""Optimize the pose, theta, during tracking."""<line_sep>sess.run(reset_op)<line_sep>loss_val=0<line_sep>glue_val=0<with_stmt>trange(hparams.max_steps_per_frame)<as>t<block_start><for_stmt>unused_i t<block_start>loss_val,unused_var,rec_val,glue_val=sess.run([loss train_op rec_loss glue_loss] feed_dict)<line_sep>t.set_description("Frame_{0} {1:.4f}|{2:.4f}".format(k rec_val glue_val))<block_end><block_end><return>loss_val glue_val<block_end>
<import_stmt>logging<import_from_stmt>pathlib Path<import_from_stmt>scrapy Spider Request<import_from_stmt>scrapy.crawler CrawlerProcess<import_from_stmt>scrapy_playwright.page PageCoroutine<class_stmt>HandleTimeoutMiddleware<block_start><def_stmt>process_exception self request exception spider<block_start>logging.info("Caught exception: %s" exception.__class__)<line_sep><return>Request(url="https://httpbin.org/get" meta={"playwright":<true> "playwright_page_coroutines":[PageCoroutine("screenshot" path=Path(__file__).parent/"recovered.png" full_page=<true>) ] } )<block_end><block_end><class_stmt>HandleExceptionSpider(Spider)<block_start>""" Handle exceptions in the Playwright downloader, such as TimeoutError """<line_sep>name="awesome"<line_sep>custom_settings={"PLAYWRIGHT_DEFAULT_NAVIGATION_TIMEOUT":1000 "DOWNLOADER_MIDDLEWARES":{HandleTimeoutMiddleware:100 } }<def_stmt>start_requests self<block_start><yield>Request(url="https://httpbin.org/delay/300" meta={"playwright":<true>} )<block_end><def_stmt>parse self response<block_start><yield>{"url":response.url}<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>process=CrawlerProcess(settings={"TWISTED_REACTOR":"twisted.internet.asyncioreactor.AsyncioSelectorReactor" "DOWNLOAD_HANDLERS":{"https":"scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler" # "http": "scrapy_playwright.handler.ScrapyPlaywrightDownloadHandler", } "RETRY_TIMES":0 })<line_sep>process.crawl(HandleExceptionSpider)<line_sep>process.start()<block_end>
# vim: ft=python fileencoding=utf-8 sw=4 et sts=4 """Test the opening of different file-types with vimiv."""<import_stmt>os<import_from_stmt>unittest main<import_from_stmt>vimiv_testcase VimivTestCase<class_stmt>OpeningTest(VimivTestCase)<block_start>"""Open with different file-types Test."""<line_sep>@classmethod<def_stmt>setUpClass cls<block_start>cls.init_test(cls)<block_end><def_stmt>test_opening_with_directory self<block_start>"""Opening with a directory."""<line_sep>expected_dir=os.path.abspath("vimiv/testimages")<line_sep>self.init_test(["vimiv/testimages"])<line_sep>self.assertEqual(expected_dir os.getcwd())<line_sep>expected_files=["animation" "arch-logo.png" "arch_001.jpg" "directory" "symlink_to_image" "vimiv.bmp" "vimiv.svg" "vimiv.tiff"]<line_sep>self.assertEqual(self.vimiv["library"].files expected_files)<line_sep>self.assertTrue(self.vimiv["library"].is_focus())<line_sep>self.assertTrue(self.vimiv["library"].grid.is_visible())<block_end><def_stmt>test_opening_with_image self<block_start>"""Open with an image."""<line_sep>expected_dir=os.path.abspath("vimiv/testimages")<line_sep>self.init_test(["vimiv/testimages/arch_001.jpg"])<line_sep># Check moving and image population self.assertEqual(expected_dir os.getcwd())<line_sep>expected_images=["arch_001.jpg" "symlink_to_image" "vimiv.bmp" "vimiv.svg" "vimiv.tiff" "arch-logo.png"]<for_stmt>image [os.path.abspath(im)<for>im expected_images]<block_start>self.assertIn(image self.vimiv.get_paths())<block_end><block_end><def_stmt>test_opening_with_symlink self<block_start>"""Open with a symlink to an image."""<line_sep>expected_dir=os.path.abspath("vimiv/testimages")<line_sep>self.init_test(["vimiv/testimages/symlink_to_image"])<line_sep># Check moving and image population self.assertEqual(expected_dir os.getcwd())<line_sep>expected_images=["symlink_to_image" "vimiv.bmp" "vimiv.svg" "vimiv.tiff" "arch-logo.png" "arch_001.jpg"]<line_sep>expected_images=[os.path.abspath(image)<for>image expected_images]<for_stmt>image [os.path.abspath(im)<for>im expected_images]<block_start>self.assertIn(image self.vimiv.get_paths())<block_end><block_end><def_stmt>test_opening_with_whitespace self<block_start>"""Open an image with whitespace and symlink in directory."""<line_sep>expected_dir=os.path.abspath("vimiv/testimages/directory/")<line_sep>self.init_test(["vimiv/testimages/directory/symlink with spaces .jpg"])<line_sep># Check moving and image population self.assertEqual(expected_dir os.getcwd())<line_sep>expected_images=["symlink with spaces .jpg"]<line_sep>expected_images=[os.path.abspath(image)<for>image expected_images]<line_sep>self.assertEqual(expected_images self.vimiv.get_paths())<block_end><def_stmt>test_opening_recursively self<block_start>"""Open all images recursively."""<line_sep># Need to backup because we init in the wrong directory here working_dir=self.working_directory<line_sep>os.chdir("vimiv/testimages")<line_sep>self.init_test(["."] to_set=["recursive"] values=["true"])<line_sep>self.assertEqual(8 len(self.vimiv.get_paths()))<line_sep>self.settings.reset()<line_sep>self.working_directory=working_dir<block_end><def_stmt>tearDown self<block_start>self.vimiv.quit()<line_sep>os.chdir(self.working_directory)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
<import_from_stmt>gpiozero Robot Motor MotionSensor<import_from_stmt>signal pause<line_sep>robot=Robot(left=Motor(4 14) right=Motor(17 18))<line_sep>pir=MotionSensor(5)<line_sep>pir.when_motion=robot.forward<line_sep>pir.when_no_motion=robot.stop<line_sep>pause()<line_sep>
<import_from_stmt>typing Any<import_from_stmt>gevent.monkey patch_thread# type: ignore <import_from_stmt>doge.common.doge Executer Request Response<import_from_stmt>doge.common.utils import_string<line_sep>patch_thread()<class_stmt>BaseFilter(Executer)<block_start><def_stmt>__init__ self context:Any _next:Executer<block_start>self.next=_next<block_end><def_stmt>execute self req:Request<arrow>Response<block_start><return>self.next.execute(req)<block_end><block_end><class_stmt>FilterChain<block_start><def_stmt>__init__ self context:Any<block_start>self.context=context<block_end><def_stmt>then self executer:Executer<arrow>Executer<block_start>filters=self.context.url.get_param("filters" [])<for_stmt>cls reversed([import_string(f)<for>f filters])<block_start>executer=cls(self.context executer)<block_end><return>executer<block_end><block_end>
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("TEST")<line_sep>process.source=cms.Source("PoolSource" fileNames=cms.untracked.vstring('file:/afs/cern.ch/cms/CAF/CMSCOMM/COMM_GLOBAL/CRUZET3/CMSSW_2_1_2/src/DPGAnalysis/Skims/python/reco_50908_210_CRZT210_V1P.root'))<line_sep>process.configurationMetadata=cms.untracked.PSet(version=cms.untracked.string('$Revision: 1.5 $') name=cms.untracked.string('$Source: /cvs_server/repositories/CMSSW/CMSSW/DPGAnalysis/Skims/python/DoubleMuon_cfg.py,v $') annotation=cms.untracked.string('CRUZET4 DoubleMuon skim'))<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(-1))<line_sep>process.options=cms.untracked.PSet(wantSummary=cms.untracked.bool(<true>))<line_sep>process.load("Configuration.StandardSequences.MagneticField_cff")<line_sep>process.load("Configuration.StandardSequences.Geometry_cff")<line_sep>process.load("Configuration.StandardSequences.FrontierConditions_GlobalTag_cff")<line_sep>process.GlobalTag.globaltag='CRZT210_V1::All'<line_sep>process.prefer("GlobalTag")<line_sep>process.load("Configuration.StandardSequences.ReconstructionCosmics_cff")<line_sep>process.doubleMuonFilter=cms.EDFilter("TrackCountFilter" src=cms.InputTag('cosmicMuonsBarrelOnly') minNumber=cms.uint32(2))<line_sep>process.doubleMuonPath=cms.Path(process.doubleMuonFilter)<line_sep>process.out=cms.OutputModule("PoolOutputModule" SelectEvents=cms.untracked.PSet(SelectEvents=cms.vstring('doubleMuonPath')) dataset=cms.untracked.PSet(dataTier=cms.untracked.string('RECO') filterName=cms.untracked.string('doubleMuonPath')) fileName=cms.untracked.string('doubleMuon.root'))<line_sep>process.this_is_the_end=cms.EndPath(process.out)<line_sep>
<import_stmt>responses<import_stmt>pytest<import_from_stmt>binance.spot Spot<as>Client<import_from_stmt>tests.util mock_http_response<import_from_stmt>tests.util random_str<import_from_stmt>binance.lib.utils encoded_string<import_from_stmt>binance.error ParameterRequiredError<line_sep>mock_item={"key_1":"value_1" "key_2":"value_2"}<line_sep>key=random_str()<line_sep>secret=random_str()<line_sep>email="<EMAIL>"<line_sep>subAccountApiKey=random_str()<line_sep>complete_params={"email":email "subAccountApiKey":subAccountApiKey}<line_sep>parameterized_test_params=[({"email":<none> "subAccountApiKey":<none>}) ({"email":"" "subAccountApiKey":subAccountApiKey}) ({"email":email "subAccountApiKey":""}) ]<line_sep>client=Client(key secret)<line_sep>@pytest.mark.parametrize("params" parameterized_test_params)<def_stmt>test_sub_account_api_get_ip_restriction_without_missing_param params<block_start>"""Tests the API endpoint to get IP Restriction for a sub-account API key without subAccountApiKey"""<line_sep>client.sub_account_api_get_ip_restriction.when.called_with(**params).should.throw(ParameterRequiredError)<block_end>@mock_http_response(responses.GET "/sapi/v1/sub-account/subAccountApi/ipRestriction\\?"+encoded_string(complete_params) mock_item 200 )<def_stmt>test_sub_account_api_get_ip_restriction <block_start>"""Tests the API endpoint to get IP Restriction for a sub-account API key"""<line_sep>client.sub_account_api_get_ip_restriction(**complete_params).should.equal(mock_item)<block_end>
# Generated by Django 3.2.6 on 2021-09-06 01:58 <import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<import_stmt>django.db.models.deletion<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[('reviews' '0035_auto_20210829_0005') migrations.swappable_dependency(settings.AUTH_USER_MODEL) ('activities' '0002_alter_activity_id') ]<line_sep>operations=[migrations.AlterField(model_name='activity' name='activity_type' field=models.CharField(choices=[('F' 'Follow') ('C' 'Comment') ('S' 'Star')] max_length=1 verbose_name='type') ) migrations.AlterField(model_name='activity' name='content' field=models.CharField(blank=<true> max_length=500 verbose_name='content') ) migrations.AlterField(model_name='activity' name='date' field=models.DateTimeField(auto_now_add=<true> verbose_name='date') ) migrations.AlterField(model_name='activity' name='from_user' field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to=settings.AUTH_USER_MODEL verbose_name='from user') ) migrations.AlterField(model_name='activity' name='review' field=models.ForeignKey(null=<true> on_delete=django.db.models.deletion.CASCADE to='reviews.review' verbose_name='review') ) migrations.AlterField(model_name='activity' name='to_user' field=models.ForeignKey(null=<true> on_delete=django.db.models.deletion.CASCADE related_name='+' to=settings.AUTH_USER_MODEL verbose_name='to user') ) ]<block_end>
<import_stmt>pandas<as>pd<import_from_stmt>pathlib Path<import_stmt>json<def_stmt>load_results_df folder='results'<block_start>folder=Path(folder)<line_sep>results_dicts=[]<for_stmt>p sorted(folder.glob('**/results.json'))<block_start><with_stmt>p.open('r')<as>f<block_start>results_dicts.append(json.load(f))<block_end><block_end><return>pd.DataFrame.from_dict(results_dicts)<block_end><if_stmt>__name__<eq>"__main__"<block_start>df=load_results_df().sort_values(by=['test_mae'] ascending=<true>)<line_sep>print(df.head(5))<block_end>
# Copyright 2017-2020 Fitbit, Inc # SPDX-License-Identifier: Apache-2.0 """ Invoke configuration for Golden Gate """<line_sep># First check that we are running in a Python >= 3.5 environment <import_from_future_stmt> print_function<import_stmt>sys<if_stmt><not>sys.version_info.major<eq>3<and>sys.version_info.minor<ge>5<block_start>print("""You are using 'invoke' in a Python 2.x environment, but Python >= 3.5 is required. You have probably not activated the 'gg' conda environment, please check the 'Getting Started' guide for more details on how to setup your environment""")<line_sep>sys.exit(1)<block_end># Imports <import_stmt>os<import_stmt>subprocess<import_from_stmt>invoke Collection Config task<import_from_stmt>. android<import_from_stmt>. apple<import_from_stmt>. pylon<import_from_stmt>. native<import_from_stmt>. clean<import_from_stmt>. wasm<import_from_stmt>. doc<import_from_stmt>. docker<line_sep># Assuming you haven't moved the default location of '.git', the .git/ folder (even for submodules) # will be at the root of the repo. Thus, find the folder .git/ is within and assume that's the root GIT_DIR=subprocess.check_output("git rev-parse --show-toplevel" shell=<true>).strip().decode("utf-8")<line_sep>ROOT_DIR=GIT_DIR<line_sep># Initialize constants that are common among all platforms/products <def_stmt>initialize_constants cfg<block_start>cfg.C={}<line_sep># We can't access the paths variable by using dot notation, since there is a paths() function # on a Config object. We much use Dictionary syntax. # http://docs.pyinvoke.org/en/0.15.0/api/config.html#module-invoke.config cfg.C.ROOT_DIR=ROOT_DIR<line_sep>cfg.C.BIN_DIR=os.path.join(cfg.C.ROOT_DIR "bin")<line_sep>cfg.C.BUILD_ROOT_DIR=os.path.join(cfg.C.ROOT_DIR "xp/build")<line_sep>cfg.C.BUILD_DIR=os.path.join(cfg.C.ROOT_DIR "xp/build/cmake")<line_sep>cfg.C.BUILD_DIR_NATIVE=os.path.join(cfg.C.BUILD_DIR "native")<line_sep>cfg.C.PLATFORM_DIR=os.path.join(cfg.C.ROOT_DIR "platform")<line_sep>cfg.C.APPS_DIR=os.path.join(cfg.C.BUILD_DIR_NATIVE "apps")<line_sep>cfg.C.APPLE_BUILD_TEMP_DIR=os.path.join(cfg.C.PLATFORM_DIR "apple/output")<line_sep>cfg.C.DOC_DIR=os.path.join(cfg.C.ROOT_DIR "docs")<block_end>config=Config(project_location=ROOT_DIR)<line_sep>initialize_constants(config)<line_sep># Add collections ns=Collection()<line_sep>ns.add_collection(android)<line_sep>ns.add_collection(apple)<line_sep>ns.add_collection(pylon)<line_sep>ns.add_collection(native)<line_sep>ns.add_collection(clean)<line_sep>ns.add_collection(wasm)<line_sep>ns.add_collection(doc)<line_sep>ns.add_collection(docker)<line_sep># After collections are set up, set the config. ns.configure(config)<line_sep>ns.configure(android.config)<line_sep>ns.configure(apple.config)<line_sep>ns.configure(pylon.config)<line_sep>