content
stringlengths 0
1.55M
|
---|
<import_from_stmt>typing List<import_from_stmt>backend.common.cache_clearing get_affected_queries<import_from_stmt>backend.common.manipulators.manipulator_base ManipulatorBase<import_from_stmt>backend.common.models.cached_model TAffectedReferences<import_from_stmt>backend.common.models.match Match<class_stmt>MatchManipulator(ManipulatorBase[Match])<block_start>"""
Handle Match database writes.
"""<line_sep>@classmethod<def_stmt>getCacheKeysAndQueries cls affected_refs:TAffectedReferences<arrow>List[get_affected_queries.TCacheKeyAndQuery]<block_start><return>get_affected_queries.match_updated(affected_refs)<block_end>"""
@classmethod
def postDeleteHook(cls, matches):
'''
To run after the match has been deleted.
'''
for match in matches:
try:
FirebasePusher.delete_match(match)
except Exception:
logging.warning("Firebase delete_match failed!")
"""<line_sep>"""
@classmethod
def postUpdateHook(cls, matches, updated_attr_list, is_new_list):
'''
To run after the match has been updated.
Send push notifications to subscribed users
Only if the match is part of an active event
'''
unplayed_match_events = []
for (match, updated_attrs, is_new) in zip(matches, updated_attr_list, is_new_list):
event = match.event.get()
# Only continue if the event is currently happening
if event.now:
if match.has_been_played:
if is_new or 'alliances_json' in updated_attrs:
# There is a score update for this match, push a notification
logging.info("Sending push notifications for {}".format(match.key_name))
try:
NotificationHelper.send_match_score_update(match)
except Exception, exception:
logging.error("Error sending match updates: {}".format(exception))
logging.error(traceback.format_exc())
try:
TBANSHelper.match_score(match)
except Exception, exception:
logging.error("Error sending match {} updates: {}".format(match.key_name, exception))
logging.error(traceback.format_exc())
else:
if is_new or (set(['alliances_json', 'time', 'time_string']).intersection(set(updated_attrs)) != set()):
# The match has not been played and we're changing a property that affects the event's schedule
# So send a schedule update notification for the parent event
if event not in unplayed_match_events:
unplayed_match_events.append(event)
# Try to send video notifications
if '_video_added' in updated_attrs:
try:
NotificationHelper.send_match_video(match)
except Exception, exception:
logging.error("Error sending match video updates: {}".format(exception))
logging.error(traceback.format_exc())
try:
TBANSHelper.match_video(match)
except Exception, exception:
logging.error("Error sending match video updates: {}".format(exception))
logging.error(traceback.format_exc())
'''
If we have an unplayed match during an event within a day, send out a schedule update notification
'''
for event in unplayed_match_events:
try:
logging.info("Sending schedule updates for: {}".format(event.key_name))
NotificationHelper.send_schedule_update(event)
except Exception, exception:
logging.error("Eror sending schedule updates for: {}".format(event.key_name))
try:
TBANSHelper.event_schedule(event)
except Exception, exception:
logging.error("Eror sending schedule updates for: {}".format(event.key_name))
logging.error(traceback.format_exc())
try:
# When an event gets a new schedule, we should schedule `match_upcoming` notifications for the first matches for the event
TBANSHelper.schedule_upcoming_matches(event)
except Exception, exception:
logging.error("Eror scheduling match_upcoming for: {}".format(event.key_name))
logging.error(traceback.format_exc())
'''
Enqueue firebase push
'''
affected_stats_event_keys = set()
for (match, updated_attrs, is_new) in zip(matches, updated_attr_list, is_new_list):
# Only attrs that affect stats
if is_new or set(['alliances_json', 'score_breakdown_json']).intersection(set(updated_attrs)) != set():
affected_stats_event_keys.add(match.event.id())
try:
FirebasePusher.update_match(match, updated_attrs)
except Exception:
logging.warning("Firebase update_match failed!")
logging.warning(traceback.format_exc())
# Enqueue statistics
for event_key in affected_stats_event_keys:
# Enqueue task to calculate matchstats
try:
taskqueue.add(
url='/tasks/math/do/event_matchstats/' + event_key,
method='GET')
except Exception:
logging.error("Error enqueuing event_matchstats for {}".format(event_key))
logging.error(traceback.format_exc())
# Enqueue task to calculate district points
try:
taskqueue.add(
url='/tasks/math/do/district_points_calc/{}'.format(event_key),
method='GET')
except Exception:
logging.error("Error enqueuing district_points_calc for {}".format(event_key))
logging.error(traceback.format_exc())
# Enqueue task to calculate event team status
try:
taskqueue.add(
url='/tasks/math/do/event_team_status/{}'.format(event_key),
method='GET')
except Exception:
logging.error("Error enqueuing event_team_status for {}".format(event_key))
logging.error(traceback.format_exc())
# Enqueue updating playoff advancement
try:
taskqueue.add(
url='/tasks/math/do/playoff_advancement_update/{}'.format(event_key),
method='GET')
except Exception:
logging.error("Error enqueuing advancement update for {}".format(event_key))
logging.error(traceback.format_exc())
"""<line_sep>@classmethod<def_stmt>updateMerge cls new_model:Match old_model:Match auto_union:bool=<true><arrow>Match# Lets postUpdateHook know if videos went from 0 to >0
<block_start>added_video=<not>old_model.has_video<and>new_model.has_video<line_sep>cls._update_attrs(new_model old_model auto_union)<if_stmt>added_video<block_start>old_model._updated_attrs.add("_video_added")<block_end><return>old_model<block_end><block_end>
|
<import_from_stmt>nose.tools *<import_stmt>scipy.stats<import_stmt>torch<import_stmt>numpy<as>np<import_from_stmt>stable_nalu.dataset SimpleFunctionStaticDataset<def_stmt>test_solveable_by_linear_algebra <block_start>dataset=SimpleFunctionStaticDataset(operation='add' seed=0)<line_sep>dataset_test=iter(dataset.fork(input_range=1).dataloader(batch_size=100))<line_sep>x_batch,t_batch=next(dataset_test)<line_sep>x_batch_np=np.stack(x_batch)<line_sep>t_batch_np=np.stack(t_batch)<line_sep>w_merged_np=np.linalg.solve(x_batch_np t_batch_np.ravel())<line_sep>w_merged_np_int=np.round(w_merged_np 0).astype('int8')<line_sep># W is whole numbers
np.testing.assert_almost_equal(w_merged_np-w_merged_np_int np.zeros(100) decimal=4)<line_sep># W is either 0, 1, 2
# NOTE: a different seed might not result in an overlap, thus {2} might
# not be present.
assert_equal(set(w_merged_np_int.tolist()) {0 1 2})<line_sep># Compute a, b range parameters
# For seed=0, the b subset, is a subset of the a subset, which is assumed
# by the following algorithm.
a_start=<none><line_sep>a_end=<none><line_sep>b_start=<none><line_sep>b_end=<none><line_sep>previuse_w_value=0<for_stmt>w_index,w_value enumerate(w_merged_np_int.tolist())<block_start><if_stmt>w_value<eq>1<and>previuse_w_value<eq>0<block_start>a_start=w_index<block_end><elif_stmt>w_value<eq>0<and>previuse_w_value<eq>1<block_start>a_end=w_index<block_end><elif_stmt>w_value<eq>2<and>previuse_w_value<eq>1<block_start>b_start=w_index<block_end><elif_stmt>w_value<eq>1<and>previuse_w_value<eq>2<block_start>b_end=w_index<block_end>previuse_w_value=w_value<block_end># Compare a and b range parameters
assert_equal(a_start dataset.a_start)<line_sep>assert_equal(a_end dataset.a_end)<line_sep>assert_equal(b_start dataset.b_start)<line_sep>assert_equal(b_end dataset.b_end)<block_end><def_stmt>test_input_range <block_start>dataset=SimpleFunctionStaticDataset(operation='add' vector_size=10000 seed=0)<line_sep>x,t=dataset.fork(input_range=5)[0]<line_sep>_,p=scipy.stats.kstest(x scipy.stats.uniform(loc=0 scale=5).cdf)<assert_stmt>p<g>0.5<block_end><def_stmt>test_output_shape <block_start>dataset=SimpleFunctionStaticDataset(operation='add' seed=0)<line_sep>x,t=dataset.fork(input_range=5)[0]<line_sep>assert_equal(x.shape (100 ))<line_sep># Note, t.shape should be a 1-long vector, not a scalar. Otherwise
# the loss function gets confused about what the observation dimention
# is.
assert_equal(t.shape (1 ))<block_end>
|
<import_from_stmt>.. *<def_stmt>run image<block_start>scene=Scene()<line_sep>scene.add(Triangle([Vector(0.5 0.5) Vector(0.8 0.5) Vector(0.5 0.8)] Color(1 0 0 1)))<line_sep>scene.draw(image)<block_end>
|
<import_stmt>json<import_stmt>os<import_from_stmt>. constants<as>Constants<class_stmt>Logger<block_start><def_stmt>__init__ self dirname config=<none> overwrite=<false> logging=<true><block_start>self.logging=logging<if_stmt>os.path.exists(dirname)<block_start><if_stmt><not>overwrite<block_start><raise>Exception("Directory already exists: {}".format(dirname))<block_end><block_end><else_stmt><block_start>os.makedirs(dirname)<block_end><if_stmt>config<is><not><none><block_start>self.log_json(config os.path.join(dirname Constants._CONFIG_FILE))<block_end><if_stmt>logging<block_start>self.fout=open(os.path.join(dirname Constants._SAVED_METRICS_FILE) "a")<block_end><block_end><def_stmt>log_json self data filename mode="w"<block_start><with_stmt>open(filename mode)<as>outfile<block_start>outfile.write(json.dumps(data indent=4 ensure_ascii=<false>))<block_end><block_end><def_stmt>write self text<block_start><if_stmt>self.logging<block_start>self.fout.writelines(text+"\n")<line_sep>self.fout.flush()<block_end><block_end><def_stmt>close self<block_start><if_stmt>self.logging<block_start>self.fout.close()<block_end><block_end><block_end>
|
<import_from_stmt>django.core.urlresolvers reverse<import_from_stmt>djangorestframework.compat View<import_from_stmt>djangorestframework.mixins ResponseMixin<import_from_stmt>djangorestframework.renderers JSONRenderer<import_from_stmt>django.contrib.auth.models User<import_from_stmt>djangorestframework.resources ModelResource<import_from_stmt>djangorestframework.response Response<import_from_stmt>accounts.models UserProfile<import_from_stmt>videos.models Video Channel<def_stmt>is_allowed user<block_start>"""
"""<line_sep><return>(user.userprofile.is_paid)<or>(user.userprofile.is_using_trial)<or>(user.userprofile.api_key)<block_end><class_stmt>VideoResource(ResponseMixin View)<block_start>"""
Returns all videos under the account of the api_key provided.
The format of the response is in JSON.
"""<line_sep>renderers=[JSONRenderer ]<line_sep>csrf_exempt=<true><def_stmt>get self request<block_start>key=request.GET.get('api_key' <none>)<line_sep>account_id=-1<if_stmt>key<block_start>userprofile=<none><try_stmt><block_start>userprofile=UserProfile.objects.get(api_key=key)<line_sep>account_id=userprofile.user_id<block_end><except_stmt><block_start><pass><block_end><if_stmt>userprofile<block_start>user=<none><try_stmt><block_start>user=User.objects.get(pk=account_id)<block_end><except_stmt><block_start><pass><block_end><if_stmt>user<block_start><if_stmt>is_allowed(user)<block_start>videos=Video.objects.filter(uploader__id=account_id)<line_sep>json_videos=[]<for_stmt>video videos<block_start>channel_name=<none><if_stmt>video.channel<block_start>channel_name=video.channel.name<block_end>json_videos.append({'id':video.id 'channel':channel_name 'url':video.get_absolute_url() 'title':video.title 'embed_code':video.get_embed_code()})<block_end>response=Response(200 {'success':<true> 'videos':json_videos})<block_end><else_stmt><block_start>response=Response(401)<block_end><block_end><else_stmt><block_start>response=Response(401)<block_end><block_end><else_stmt><block_start>response=Response(401)<block_end><block_end><else_stmt><block_start>response=Response(400)<block_end><return>self.render(response)<block_end><block_end><class_stmt>ChannelVideoResource(ResponseMixin View)<block_start>"""
Returns all videos under the channel of an account of the api_key provided.
The format of the response is in JSON.
"""<line_sep>renderers=[JSONRenderer ]<line_sep>csrf_exempt=<true><def_stmt>get self request<block_start>channel_link=request.GET.get('channel_link' <none>)<line_sep>key=request.GET.get('api_key' <none>)<line_sep>account_id=-1<if_stmt>key<and>channel_link<block_start>userprofile=<none><try_stmt><block_start>userprofile=UserProfile.objects.get(api_key=key)<line_sep>account_id=userprofile.user_id<block_end><except_stmt><block_start><pass><block_end>channel=<none><try_stmt><block_start>channel=Channel.objects.get(api_link=channel_link)<block_end><except_stmt><block_start><pass><block_end><if_stmt>channel<block_start><if_stmt>(channel.owner.id<eq>account_id)<and>is_allowed(channel.owner)<block_start>videos=Video.objects.filter(channel=channel)<line_sep>json_videos=[]<for_stmt>video videos<block_start>channel_name=<none><if_stmt>video.channel<block_start>channel_name=video.channel.name<block_end>json_videos.append({'id':video.id 'channel':channel_name 'url':video.get_absolute_url() 'title':video.title 'embed_code':video.get_embed_code()})<block_end>response=Response(200 {'success':<true> 'videos':json_videos})<block_end><else_stmt><block_start>response=Response(401)<block_end><block_end><else_stmt><block_start>response=Response(401)<block_end><block_end><else_stmt><block_start>response=Response(401)<block_end><return>self.render(response)<block_end><block_end><class_stmt>LatestVideoResource(ResponseMixin View)<block_start>"""
Returns the latest video under the account of the api_key provided.
The format of the response is in JSON.
"""<line_sep>renderers=[JSONRenderer ]<line_sep>csrf_exempt=<true><def_stmt>get self request<block_start>key=request.GET.get('api_key' <none>)<line_sep>account_id=-1<if_stmt>key<block_start>userprofile=<none><try_stmt><block_start>userprofile=UserProfile.objects.get(api_key=key)<line_sep>account_id=userprofile.user_id<block_end><except_stmt><block_start><pass><block_end><if_stmt>userprofile<block_start>user=<none><try_stmt><block_start>user=User.objects.get(pk=account_id)<block_end><except_stmt><block_start><pass><block_end><if_stmt>user<block_start><if_stmt>is_allowed(user)<block_start>videos=Video.objects.filter(uploader__id=account_id).order_by('-created')[:1:]<line_sep>json_videos=[]<for_stmt>video videos<block_start>channel_name=<none><if_stmt>video.channel<block_start>channel_name=video.channel.name<block_end>json_videos.append({'id':video.id 'channel':channel_name 'url':video.get_absolute_url() 'title':video.title 'embed_code':video.get_embed_code()})<block_end>response=Response(200 {'success':<true> 'videos':json_videos})<block_end><else_stmt><block_start>response=Response(401)<block_end><block_end><else_stmt><block_start>response=Response(401)<block_end><block_end><else_stmt><block_start>response=Response(401)<block_end><block_end><else_stmt><block_start>response=Response(400)<block_end><return>self.render(response)<block_end><block_end><class_stmt>LatestChannelVideoResource(ResponseMixin View)<block_start>renderers=[JSONRenderer ]<line_sep>csrf_exempt=<true><def_stmt>get self request<block_start>key=request.GET.get('api_key' <none>)<line_sep>channel_link=request.GET.get('channel_link' <none>)<line_sep>account_id=-1<if_stmt>key<and>channel_link<block_start>userprofile=<none><try_stmt><block_start>userprofile=UserProfile.objects.get(api_key=key)<line_sep>account_id=userprofile.user_id<block_end><except_stmt><block_start><pass><block_end>channel=<none><try_stmt><block_start>channel=Channel.objects.get(api_link=channel_link)<block_end><except_stmt><block_start><pass><block_end><if_stmt>channel<block_start><if_stmt>(channel.owner.id<eq>account_id)<and>is_allowed(channel.owner)<block_start>videos=Video.objects.filter(channel=channel).order_by('-created')[:1:]<line_sep>json_videos=[]<for_stmt>video videos<block_start>channel_name=<none><if_stmt>video.channel<block_start>channel_name=video.channel.name<block_end>json_videos.append({'id':video.id 'channel':channel_name 'url':video.get_absolute_url() 'title':video.title 'embed_code':video.get_embed_code()})<block_end>response=Response(200 {'success':<true> 'videos':json_videos})<block_end><else_stmt><block_start>response=Response(401)<block_end><block_end><else_stmt><block_start>response=Response(401)<block_end><block_end><else_stmt><block_start>response=Response(401)<block_end><return>self.render(response)<block_end><block_end>
|
"""Utility to add editor syntax highlighting to literal code strings.
Example:
from google.colab import syntax
query = syntax.sql('''
SELECT * from tablename
''')
"""<def_stmt>html s<block_start>"""Noop function to enable HTML highlighting for its argument."""<line_sep><return>s<block_end><def_stmt>javascript s<block_start>"""Noop function to enable JavaScript highlighting for its argument."""<line_sep><return>s<block_end><def_stmt>sql s<block_start>"""Noop function to enable SQL highlighting for its argument."""<line_sep><return>s<block_end><def_stmt>css s<block_start>"""Noop function to enable CSS highlighting for its argument."""<line_sep><return>s<block_end>
|
# from the paper `using cython to speedup numerical python programs'
#pythran export timeloop(float, float, float, float, float, float list list, float list list, float list list)
#pythran export timeloop(float, float, float, float, float, int list list, int list list, int list list)
#bench A=[list(range(70)) for i in range(100)] ; B=[list(range(70)) for i in range(100)] ; C=[list(range(70)) for i in range(100)] ; timeloop(1.,2.,.01,.1,.18, A,B,C )
#runas A=[list(range(10)) for i in range(5)] ; B=[list(range(10)) for i in range(5)] ; C=[list(range(10)) for i in range(5)] ; timeloop(1.,2.,.1,.1,.2, A,B,C )
<def_stmt>timeloop t t_stop dt dx dy u um k<block_start><while_stmt>t<le>t_stop<block_start>t<augadd>dt<line_sep>new_u=calculate_u(dt dx dy u um k)<line_sep>um=u<line_sep>u=new_u<block_end><return>u<block_end><def_stmt>calculate_u dt dx dy u um k<block_start>up=[[0.]<times>len(u[0])<for>i range(len(u))]<line_sep>"omp parallel for"<for_stmt>i range(1 len(u)-1)<block_start><for_stmt>j range(1 len(u[0])-1)<block_start>up[i][j]=2<times>u[i][j]-um[i][j]+(dt/dx)<power>2<times>((0.5<times>(k[i+1][j]+k[i][j])<times>(u[i+1][j]-u[i][j])-0.5<times>(k[i][j]+k[i-1][j])<times>(u[i][j]-u[i-1][j])))+(dt/dy)<power>2<times>((0.5<times>(k[i][j+1]+k[i][j])<times>(u[i][j+1]-u[i][j])-0.5<times>(k[i][j]+k[i][j-1])<times>(u[i][j]-u[i][j-1])))<block_end><block_end><return>up<block_end>
|
# coding=utf-8
# Copyright 2020 HuggingFace Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
"""Introduction to AMTTL CWS Dataset"""<import_stmt>datasets<line_sep>logger=datasets.logging.get_logger(__name__)<line_sep>_CITATION="""\
@inproceedings{xing2018adaptive,
title={Adaptive multi-task transfer learning for Chinese word segmentation in medical text},
author={<NAME> and <NAME> and <NAME>},
booktitle={Proceedings of the 27th International Conference on Computational Linguistics},
pages={3619--3630},
year={2018}
}
"""<line_sep>_DESCRIPTION="""\
Chinese word segmentation (CWS) trained from open source corpus faces dramatic performance drop
when dealing with domain text, especially for a domain with lots of special terms and diverse
writing styles, such as the biomedical domain. However, building domain-specific CWS requires
extremely high annotation cost. In this paper, we propose an approach by exploiting domain-invariant
knowledge from high resource to low resource domains. Extensive experiments show that our mode
achieves consistently higher accuracy than the single-task CWS and other transfer learning
baselines, especially when there is a large disparity between source and target domains.
This dataset is the accompanied medical Chinese word segmentation (CWS) dataset.
The tags are in BIES scheme.
For more details see https://www.aclweb.org/anthology/C18-1307/
"""<line_sep>_URL="https://raw.githubusercontent.com/adapt-sjtu/AMTTL/master/medical_data/"<line_sep>_TRAINING_FILE="forum_train.txt"<line_sep>_DEV_FILE="forum_dev.txt"<line_sep>_TEST_FILE="forum_test.txt"<class_stmt>AmttlConfig(datasets.BuilderConfig)<block_start>"""BuilderConfig for AMTTL"""<def_stmt>__init__ self **kwargs<block_start>"""BuilderConfig for AMTTL.
Args:
**kwargs: keyword arguments forwarded to super.
"""<line_sep>super(AmttlConfig self).__init__(**kwargs)<block_end><block_end><class_stmt>Amttl(datasets.GeneratorBasedBuilder)<block_start>"""AMTTL Chinese Word Segmentation dataset."""<line_sep>BUILDER_CONFIGS=[AmttlConfig(name="amttl" version=datasets.Version("1.0.0") description="AMTTL medical Chinese word segmentation dataset" ) ]<def_stmt>_info self<block_start><return>datasets.DatasetInfo(description=_DESCRIPTION features=datasets.Features({"id":datasets.Value("string") "tokens":datasets.Sequence(datasets.Value("string")) "tags":datasets.Sequence(datasets.features.ClassLabel(names=["B" "I" "E" "S" ])) }) supervised_keys=<none> homepage="https://www.aclweb.org/anthology/C18-1307/" citation=_CITATION )<block_end><def_stmt>_split_generators self dl_manager<block_start>"""Returns SplitGenerators."""<line_sep>urls_to_download={"train":f"{_URL}{_TRAINING_FILE}" "dev":f"{_URL}{_DEV_FILE}" "test":f"{_URL}{_TEST_FILE}" }<line_sep>downloaded_files=dl_manager.download_and_extract(urls_to_download)<line_sep><return>[datasets.SplitGenerator(name=datasets.Split.TRAIN gen_kwargs={"filepath":downloaded_files["train"]}) datasets.SplitGenerator(name=datasets.Split.VALIDATION gen_kwargs={"filepath":downloaded_files["dev"]}) datasets.SplitGenerator(name=datasets.Split.TEST gen_kwargs={"filepath":downloaded_files["test"]}) ]<block_end><def_stmt>_generate_examples self filepath<block_start>logger.info("⏳ Generating examples from = %s" filepath)<with_stmt>open(filepath encoding="utf-8")<as>f<block_start>guid=0<line_sep>tokens=[]<line_sep>tags=[]<for_stmt>line f<block_start>line_stripped=line.strip()<if_stmt>line_stripped<eq>""<block_start><if_stmt>tokens<block_start><yield>guid {"id":str(guid) "tokens":tokens "tags":tags }<line_sep>guid<augadd>1<line_sep>tokens=[]<line_sep>tags=[]<block_end><block_end><else_stmt><block_start>splits=line_stripped.split("\t")<if_stmt>len(splits)<eq>1<block_start>splits.append("O")<block_end>tokens.append(splits[0])<line_sep>tags.append(splits[1])<block_end><block_end># last example
<yield>guid {"id":str(guid) "tokens":tokens "tags":tags }<block_end><block_end><block_end>
|
# -*- coding: ascii -*-
"""
Calculates the stellar lifetime in a range of masses between
Mmax and Mmin using SSE (or another stellar evolution code)
and an analytic expression.
"""<import_from_future_stmt> print_function<import_stmt>numpy<import_from_stmt>optparse OptionParser<import_from_stmt>amuse.units units<import_from_stmt>amuse.datamodel Particle<import_from_stmt>amuse.plot plot<import_from_stmt>matplotlib pyplot<as>plt<import_from_stmt>amuse.community.sse.interface SSE<line_sep>se=<none><def_stmt>stellar_remnant_state star<block_start><return>10<le>star.stellar_type.value_in(units.stellar_type)<l>16<block_end><def_stmt>stellar_lifetime mZAMS z=0.02<block_start><global>se<if_stmt>se<is><none><block_start>se=SSE()<line_sep>se.parameters.metallicity=z<block_end>se.particles.add_particle(Particle(mass=mZAMS))<while_stmt><not>stellar_remnant_state(se.particles[0])<block_start>se.evolve_model()<block_end>t_end=se.particles[0].age<line_sep># tpe = se.particles[0].stellar_type
se.particles.remove_particle(se.particles[0])<line_sep><return>t_end<block_end><def_stmt>power_law_fit_to_main_sequence_lifetime mZAMS<block_start><return>2+1.0E+4/pow(mZAMS.value_in(units.MSun) 2.5)|units.Myr<block_end><def_stmt>main n=10 mmin=1.0 mmax=100 z=0.02<block_start>dm=(mmax-mmin)/n<line_sep>mZAMS=numpy.arange(mmin mmax dm)|units.MSun<line_sep>mmin=mmin|units.MSun<line_sep>mmax=mmax|units.MSun<line_sep>print(mZAMS)<line_sep>t_sse=[]|units.Myr<line_sep>t_analytic=[]|units.Myr<for_stmt>mi mZAMS<block_start>t_sse.append(stellar_lifetime(mi z))<line_sep>t_analytic.append(power_law_fit_to_main_sequence_lifetime(mi))<block_end>plot(mZAMS t_sse label="sse")<line_sep>plot(mZAMS t_analytic label="analytic")<line_sep>plt.loglog()<line_sep>plt.legend()<line_sep>plt.title("comparison between SSE and analytic with z="+str(z))<line_sep>plt.show()<block_end><def_stmt>new_option_parser <block_start>result=OptionParser()<line_sep>result.add_option("-n" dest="n" type="int" default=10 help="number of stars")<line_sep>result.add_option("-m" dest="mmin" type="float" default=1.0 help="Minimal mass [1.0] MSun")<line_sep>result.add_option("-M" dest="mmax" type="float" default=100.0 help="Maximal mass [100] MSun")<line_sep>result.add_option("-z" dest="z" type="float" default=0.02 help="metalicity [0.02]")<line_sep><return>result<block_end><if_stmt>__name__<eq>"__main__"<block_start>o,arguments=new_option_parser().parse_args()<line_sep>main(**o.__dict__)<block_end>
|
<import_stmt>mistune<import_from_stmt>common.markdown.club_renderer ClubRenderer<import_from_stmt>common.markdown.email_renderer EmailRenderer<import_from_stmt>common.markdown.plain_renderer PlainRenderer<def_stmt>markdown_text text renderer=ClubRenderer<block_start>markdown=mistune.create_markdown(escape=<true> renderer=renderer() plugins=["strikethrough" "url"])<line_sep><return>(markdown(text)<or>"").strip()<block_end><def_stmt>markdown_plain text<block_start><return>markdown_text(text renderer=PlainRenderer)<block_end><def_stmt>markdown_email text<block_start><return>markdown_text(text renderer=EmailRenderer)<block_end>
|
<import_stmt>pytest<import_from_stmt>..protocol Packet Message FiniteStream<import_from_stmt>..exception DeserializationException<class_stmt>TestPacket(object)# test data definitions for outgoing packet types
<block_start>cmd_request=b"\x00\x0c"<concat>b"command_type"<line_sep>cmd_request_msg=b"\x00\x07"<concat>b"command"<concat>b"payload"<line_sep>event_register=b"\x03\x0a"<concat>b"event_type"<line_sep>event_unregister=b"\x04\x0a"<concat>b"event_type"<line_sep># test data definitions for incoming packet types
cmd_response=b"\x01"<concat>b"reply"<line_sep>cmd_unknown=b"\x02"<line_sep>event_confirm=b"\x05"<line_sep>event_unknown=b"\x06"<line_sep>event=b"\x07\x03"<concat>b"log"<concat>b"message"<def_stmt>test_request self<block_start><assert_stmt>Packet.request("command_type")<eq>self.cmd_request<assert_stmt>Packet.request("command" b"payload")<eq>self.cmd_request_msg<block_end><def_stmt>test_register_event self<block_start><assert_stmt>Packet.register_event("event_type")<eq>self.event_register<block_end><def_stmt>test_unregister_event self<block_start><assert_stmt>Packet.unregister_event("event_type")<eq>self.event_unregister<block_end><def_stmt>test_parse self<block_start>parsed_cmd_response=Packet.parse(self.cmd_response)<assert_stmt>parsed_cmd_response.response_type<eq>Packet.CMD_RESPONSE<assert_stmt>parsed_cmd_response.payload.getvalue()<eq>self.cmd_response<line_sep>parsed_cmd_unknown=Packet.parse(self.cmd_unknown)<assert_stmt>parsed_cmd_unknown.response_type<eq>Packet.CMD_UNKNOWN<assert_stmt>parsed_cmd_unknown.payload.getvalue()<eq>self.cmd_unknown<line_sep>parsed_event_confirm=Packet.parse(self.event_confirm)<assert_stmt>parsed_event_confirm.response_type<eq>Packet.EVENT_CONFIRM<assert_stmt>parsed_event_confirm.payload.getvalue()<eq>self.event_confirm<line_sep>parsed_event_unknown=Packet.parse(self.event_unknown)<assert_stmt>parsed_event_unknown.response_type<eq>Packet.EVENT_UNKNOWN<assert_stmt>parsed_event_unknown.payload.getvalue()<eq>self.event_unknown<line_sep>parsed_event=Packet.parse(self.event)<assert_stmt>parsed_event.response_type<eq>Packet.EVENT<assert_stmt>parsed_event.payload.getvalue()<eq>self.event<block_end><block_end><class_stmt>TestMessage(object)<block_start>"""Message (de)serialization test."""<line_sep># data definitions for test of de(serialization)
# serialized messages holding a section
ser_sec_unclosed=b"\x01\x08unclosed"<line_sep>ser_sec_single=b"\x01\x07section\x02"<line_sep>ser_sec_nested=b"\x01\x05outer\x01\x0asubsection\x02\x02"<line_sep># serialized messages holding a list
ser_list_invalid=b"\x04\x07invalid\x05\x00\x02e1\x02\x03sec\x06"<line_sep>ser_list_0_item=b"\x04\x05empty\x06"<line_sep>ser_list_1_item=b"\x04\x01l\x05\x00\x02e1\x06"<line_sep>ser_list_2_item=b"\x04\x01l\x05\x00\x02e1\x05\x00\x02e2\x06"<line_sep># serialized messages with key value pairs
ser_kv_pair=b"\<KEY>"<line_sep>ser_kv_zero=b"\x03\x0azerolength\x00\x00"<line_sep># deserialized messages holding a section
des_sec_single={"section":{}}<line_sep>des_sec_nested={"outer":{"subsection":{}}}<line_sep># deserialized messages holding a list
des_list_0_item={"empty":[]}<line_sep>des_list_1_item={"l":[b"e1"]}<line_sep>des_list_2_item={"l":[b"e1" b"e2"]}<line_sep># deserialized messages with key value pairs
des_kv_pair={"key":b"value"}<line_sep>des_kv_zero={"zerolength":b""}<def_stmt>test_section_serialization self<block_start><assert_stmt>Message.serialize(self.des_sec_single)<eq>self.ser_sec_single<assert_stmt>Message.serialize(self.des_sec_nested)<eq>self.ser_sec_nested<block_end><def_stmt>test_list_serialization self<block_start><assert_stmt>Message.serialize(self.des_list_0_item)<eq>self.ser_list_0_item<assert_stmt>Message.serialize(self.des_list_1_item)<eq>self.ser_list_1_item<assert_stmt>Message.serialize(self.des_list_2_item)<eq>self.ser_list_2_item<block_end><def_stmt>test_key_serialization self<block_start><assert_stmt>Message.serialize(self.des_kv_pair)<eq>self.ser_kv_pair<assert_stmt>Message.serialize(self.des_kv_zero)<eq>self.ser_kv_zero<block_end><def_stmt>test_section_deserialization self<block_start>single=Message.deserialize(FiniteStream(self.ser_sec_single))<line_sep>nested=Message.deserialize(FiniteStream(self.ser_sec_nested))<assert_stmt>single<eq>self.des_sec_single<assert_stmt>nested<eq>self.des_sec_nested<with_stmt>pytest.raises(DeserializationException)<block_start>Message.deserialize(FiniteStream(self.ser_sec_unclosed))<block_end><block_end><def_stmt>test_list_deserialization self<block_start>l0=Message.deserialize(FiniteStream(self.ser_list_0_item))<line_sep>l1=Message.deserialize(FiniteStream(self.ser_list_1_item))<line_sep>l2=Message.deserialize(FiniteStream(self.ser_list_2_item))<assert_stmt>l0<eq>self.des_list_0_item<assert_stmt>l1<eq>self.des_list_1_item<assert_stmt>l2<eq>self.des_list_2_item<with_stmt>pytest.raises(DeserializationException)<block_start>Message.deserialize(FiniteStream(self.ser_list_invalid))<block_end><block_end><def_stmt>test_key_deserialization self<block_start>pair=Message.deserialize(FiniteStream(self.ser_kv_pair))<line_sep>zerolength=Message.deserialize(FiniteStream(self.ser_kv_zero))<assert_stmt>pair<eq>self.des_kv_pair<assert_stmt>zerolength<eq>self.des_kv_zero<block_end><def_stmt>test_roundtrip self<block_start>message={"key1":"value1" "section1":{"sub-section":{"key2":b"value2" } "list1":["item1" "item2"] } }<line_sep>serialized_message=FiniteStream(Message.serialize(message))<line_sep>deserialized_message=Message.deserialize(serialized_message)<line_sep># ensure that list items and key values remain as undecoded bytes
deserialized_section=deserialized_message["section1"]<assert_stmt>deserialized_message["key1"]<eq>b"value1"<assert_stmt>deserialized_section["sub-section"]["key2"]<eq>b"value2"<assert_stmt>deserialized_section["list1"]<eq>[b"item1" b"item2"]<block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_stmt>factory<import_from_stmt>factory.django DjangoModelFactory<import_from_stmt>ralph.reports.models Report ReportLanguage ReportTemplate<class_stmt>ReportFactory(DjangoModelFactory)<block_start>name=factory.Sequence(<lambda>n:'Report {}'.format(n))<class_stmt>Meta<block_start>model=Report<block_end><block_end><class_stmt>ReportLanguageFactory(DjangoModelFactory)<block_start>name=factory.Sequence(<lambda>n:'Report-lang {}'.format(n))<line_sep>default=<false><class_stmt>Meta<block_start>model=ReportLanguage<block_end><block_end><class_stmt>ReportTemplateFactory(DjangoModelFactory)<block_start>template=factory.django.FileField(filename='the_file.dat')<line_sep>language=factory.SubFactory(ReportLanguageFactory)<line_sep>default=<false><line_sep>report=factory.SubFactory(ReportFactory)<class_stmt>Meta<block_start>model=ReportTemplate<block_end><block_end>
|
<import_stmt>pytest<import_from_stmt>vyper.utils annotate_source_code indent<line_sep>TEST_TEXT="""
test
lines
to
indent
"""[1:-1]<def_stmt>test_indent_indents_text <block_start><assert_stmt>(indent(TEST_TEXT indent_chars="-" level=1)<eq>"""
-test
-lines
-to
-indent
"""[1:-1])<assert_stmt>(indent(TEST_TEXT indent_chars=" " level=4)<eq>"""
test
lines
to
indent
"""[1:-1])<assert_stmt>(indent(TEST_TEXT indent_chars=[" " "*" "-" "="] level=4)<eq>"""
test
****lines
----to
====indent
"""[1:-1])<block_end><def_stmt>test_indent_raises_value_errors <block_start><with_stmt>pytest.raises(ValueError match="Must provide indentation chars for each line" )<block_start>indent(TEST_TEXT indent_chars=[" "] level=1)<block_end><with_stmt>pytest.raises(ValueError match="Unrecognized indentation characters value" )<block_start>indent(TEST_TEXT indent_chars=<none> level=1)<block_end><block_end># type: ignore
TEST_SOURCE_CODE=r"""
# Attempts to display the line and column of violating code.
class ParserException(Exception):
def __init__(self, message='Error Message not found.', item=None):
self.message = message
self.lineno = None
self.col_offset = None
if isinstance(item, tuple): # is a position.
self.lineno, self.col_offset = item
elif item and hasattr(item, 'lineno'):
self.set_err_pos(item.lineno, item.col_offset)
if hasattr(item, 'source_code'):
self.source_code = item.source_code.splitlines()
def set_err_pos(self, lineno, col_offset):
if not self.lineno:
self.lineno = lineno
if not self.col_offset:
self.col_offset = col_offset
def __str__(self):
output = self.message
if self.lineno and hasattr(self, 'source_code'):
output = f'line {self.lineno}: {output}\n{self.source_code[self.lineno -1]}'
if self.col_offset:
col = '-' * self.col_offset + '^'
output += '\n' + col
elif self.lineno is not None and self.col_offset is not None:
output = f'line {self.lineno}:{self.col_offset} {output}'
return output
"""[1:-1]<def_stmt>test_annotate_source_code_marks_positions_in_source_code <block_start>annotation=annotate_source_code(TEST_SOURCE_CODE 22 col_offset=16 context_lines=0 line_numbers=<false> )<assert_stmt>(annotation<eq>r"""
def __str__(self):
----------------^
"""[1:-1])<line_sep>annotation=annotate_source_code(TEST_SOURCE_CODE 22 col_offset=15 context_lines=1 line_numbers=<false> )<assert_stmt>(annotation<eq>r"""
def __str__(self):
---------------^
output = self.message
"""[1:-1])<line_sep>annotation=annotate_source_code(TEST_SOURCE_CODE 22 col_offset=20 context_lines=2 line_numbers=<false> )<assert_stmt>(annotation<eq>r"""
self.col_offset = col_offset
def __str__(self):
--------------------^
output = self.message
"""[1:-1])<line_sep>annotation=annotate_source_code(TEST_SOURCE_CODE 1 col_offset=5 context_lines=3 line_numbers=<true> )<assert_stmt>(annotation<eq>r"""
---> 1 # Attempts to display the line and column of violating code.
------------^
2 class ParserException(Exception):
3 def __init__(self, message='Error Message not found.', item=None):
4 self.message = message
"""[1:-1])<line_sep>annotation=annotate_source_code(TEST_SOURCE_CODE 36 col_offset=8 context_lines=4 line_numbers=<true> )<assert_stmt>(annotation<eq>r"""
32
33 elif self.lineno is not None and self.col_offset is not None:
34 output = f'line {self.lineno}:{self.col_offset} {output}'
35
---> 36 return output
----------------^
"""[1:-1])<line_sep>annotation=annotate_source_code(TEST_SOURCE_CODE 15 col_offset=8 context_lines=11 line_numbers=<true> )<assert_stmt>(annotation<eq>r"""
4 self.message = message
5 self.lineno = None
6 self.col_offset = None
7
8 if isinstance(item, tuple): # is a position.
9 self.lineno, self.col_offset = item
10 elif item and hasattr(item, 'lineno'):
11 self.set_err_pos(item.lineno, item.col_offset)
12 if hasattr(item, 'source_code'):
13 self.source_code = item.source_code.splitlines()
14
---> 15 def set_err_pos(self, lineno, col_offset):
----------------^
16 if not self.lineno:
17 self.lineno = lineno
18
19 if not self.col_offset:
20 self.col_offset = col_offset
21
22 def __str__(self):
23 output = self.message
24
25 if self.lineno and hasattr(self, 'source_code'):
26
"""[1:-1])<line_sep>annotation=annotate_source_code(TEST_SOURCE_CODE 15 col_offset=<none> context_lines=3 line_numbers=<true> )<assert_stmt>(annotation<eq>r"""
12 if hasattr(item, 'source_code'):
13 self.source_code = item.source_code.splitlines()
14
---> 15 def set_err_pos(self, lineno, col_offset):
16 if not self.lineno:
17 self.lineno = lineno
18
"""[1:-1])<line_sep>annotation=annotate_source_code(TEST_SOURCE_CODE 15 col_offset=<none> context_lines=2 line_numbers=<false> )<assert_stmt>(annotation<eq>r"""
self.source_code = item.source_code.splitlines()
def set_err_pos(self, lineno, col_offset):
if not self.lineno:
self.lineno = lineno
"""[1:-1])<block_end>@pytest.mark.parametrize("bad_lineno" (-100 -1 0 45 1000) )<def_stmt>test_annotate_source_code_raises_value_errors bad_lineno<block_start><with_stmt>pytest.raises(ValueError match="Line number is out of range" )<block_start>annotate_source_code(TEST_SOURCE_CODE bad_lineno)<block_end><block_end>
|
<import_stmt>io<import_stmt>os<import_stmt>unittest<import_stmt>zstandard<as>zstd<import_from_stmt>.common CustomBytesIO <class_stmt>TestDecompressor_stream_reader(unittest.TestCase)<block_start><def_stmt>test_context_manager self<block_start>dctx=zstd.ZstdDecompressor()<with_stmt>dctx.stream_reader(b"foo")<as>reader<block_start><with_stmt>self.assertRaisesRegex(ValueError "cannot __enter__ multiple times")<block_start><with_stmt>reader<as>reader2<block_start><pass><block_end><block_end><block_end><block_end><def_stmt>test_not_implemented self<block_start>dctx=zstd.ZstdDecompressor()<with_stmt>dctx.stream_reader(b"foo")<as>reader<block_start><with_stmt>self.assertRaises(io.UnsupportedOperation)<block_start>reader.readline()<block_end><with_stmt>self.assertRaises(io.UnsupportedOperation)<block_start>reader.readlines()<block_end><with_stmt>self.assertRaises(io.UnsupportedOperation)<block_start>iter(reader)<block_end><with_stmt>self.assertRaises(io.UnsupportedOperation)<block_start>next(reader)<block_end><with_stmt>self.assertRaises(io.UnsupportedOperation)<block_start>reader.write(b"foo")<block_end><with_stmt>self.assertRaises(io.UnsupportedOperation)<block_start>reader.writelines([])<block_end><block_end><block_end><def_stmt>test_constant_methods self<block_start>dctx=zstd.ZstdDecompressor()<with_stmt>dctx.stream_reader(b"foo")<as>reader<block_start>self.assertFalse(reader.closed)<line_sep>self.assertTrue(reader.readable())<line_sep>self.assertFalse(reader.writable())<line_sep>self.assertFalse(reader.seekable())<line_sep>self.assertFalse(reader.isatty())<line_sep>self.assertFalse(reader.closed)<line_sep>self.assertIsNone(reader.flush())<line_sep>self.assertFalse(reader.closed)<block_end>self.assertTrue(reader.closed)<block_end><def_stmt>test_read_closed self<block_start>dctx=zstd.ZstdDecompressor()<with_stmt>dctx.stream_reader(b"foo")<as>reader<block_start>reader.close()<line_sep>self.assertTrue(reader.closed)<with_stmt>self.assertRaisesRegex(ValueError "stream is closed")<block_start>reader.read(1)<block_end><block_end><block_end><def_stmt>test_read_sizes self<block_start>cctx=zstd.ZstdCompressor()<line_sep>foo=cctx.compress(b"foo")<line_sep>dctx=zstd.ZstdDecompressor()<with_stmt>dctx.stream_reader(foo)<as>reader<block_start><with_stmt>self.assertRaisesRegex(ValueError "cannot read negative amounts less than -1")<block_start>reader.read(-2)<block_end>self.assertEqual(reader.read(0) b"")<line_sep>self.assertEqual(reader.read() b"foo")<block_end><block_end><def_stmt>test_read_buffer self<block_start>cctx=zstd.ZstdCompressor()<line_sep>source=b"".join([b"foo"<times>60 b"bar"<times>60 b"baz"<times>60])<line_sep>frame=cctx.compress(source)<line_sep>dctx=zstd.ZstdDecompressor()<with_stmt>dctx.stream_reader(frame)<as>reader<block_start>self.assertEqual(reader.tell() 0)<line_sep># We should get entire frame in one read.
result=reader.read(8192)<line_sep>self.assertEqual(result source)<line_sep>self.assertEqual(reader.tell() len(source))<line_sep># Read after EOF should return empty bytes.
self.assertEqual(reader.read(1) b"")<line_sep>self.assertEqual(reader.tell() len(result))<block_end>self.assertTrue(reader.closed)<block_end><def_stmt>test_read_buffer_small_chunks self<block_start>cctx=zstd.ZstdCompressor()<line_sep>source=b"".join([b"foo"<times>60 b"bar"<times>60 b"baz"<times>60])<line_sep>frame=cctx.compress(source)<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>chunks=[]<with_stmt>dctx.stream_reader(frame read_size=1)<as>reader<block_start><while_stmt><true><block_start>chunk=reader.read(1)<if_stmt><not>chunk<block_start><break><block_end>chunks.append(chunk)<line_sep>self.assertEqual(reader.tell() sum(map(len chunks)))<block_end><block_end>self.assertEqual(b"".join(chunks) source)<block_end><def_stmt>test_read_stream self<block_start>cctx=zstd.ZstdCompressor()<line_sep>source=b"".join([b"foo"<times>60 b"bar"<times>60 b"baz"<times>60])<line_sep>frame=cctx.compress(source)<line_sep>dctx=zstd.ZstdDecompressor()<with_stmt>dctx.stream_reader(io.BytesIO(frame))<as>reader<block_start>self.assertEqual(reader.tell() 0)<line_sep>chunk=reader.read(8192)<line_sep>self.assertEqual(chunk source)<line_sep>self.assertEqual(reader.tell() len(source))<line_sep>self.assertEqual(reader.read(1) b"")<line_sep>self.assertEqual(reader.tell() len(source))<line_sep>self.assertFalse(reader.closed)<block_end>self.assertTrue(reader.closed)<block_end><def_stmt>test_read_stream_small_chunks self<block_start>cctx=zstd.ZstdCompressor()<line_sep>source=b"".join([b"foo"<times>60 b"bar"<times>60 b"baz"<times>60])<line_sep>frame=cctx.compress(source)<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>chunks=[]<with_stmt>dctx.stream_reader(io.BytesIO(frame) read_size=1)<as>reader<block_start><while_stmt><true><block_start>chunk=reader.read(1)<if_stmt><not>chunk<block_start><break><block_end>chunks.append(chunk)<line_sep>self.assertEqual(reader.tell() sum(map(len chunks)))<block_end><block_end>self.assertEqual(b"".join(chunks) source)<block_end><def_stmt>test_close self<block_start>foo=zstd.ZstdCompressor().compress(b"foo"<times>1024)<line_sep>buffer=io.BytesIO(foo)<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>reader=dctx.stream_reader(buffer)<line_sep>reader.read(3)<line_sep>self.assertFalse(reader.closed)<line_sep>self.assertFalse(buffer.closed)<line_sep>reader.close()<line_sep>self.assertTrue(reader.closed)<line_sep>self.assertTrue(buffer.closed)<with_stmt>self.assertRaisesRegex(ValueError "stream is closed")<block_start>reader.read()<block_end><with_stmt>self.assertRaisesRegex(ValueError "stream is closed")<block_start><with_stmt>reader<block_start><pass><block_end><block_end># Context manager exit should not close stream.
buffer=io.BytesIO(foo)<line_sep>reader=dctx.stream_reader(buffer)<with_stmt>reader<block_start>reader.read(3)<block_end>self.assertTrue(reader.closed)<line_sep>self.assertTrue(buffer.closed)<line_sep># Context manager exit should close stream if an exception raised.
buffer=io.BytesIO(foo)<line_sep>reader=dctx.stream_reader(buffer)<with_stmt>self.assertRaisesRegex(Exception "ignore")<block_start><with_stmt>reader<block_start>reader.read(3)<line_sep><raise>Exception("ignore")<block_end><block_end>self.assertTrue(reader.closed)<line_sep>self.assertTrue(buffer.closed)<line_sep># Test with non-file source variant.
<with_stmt>dctx.stream_reader(foo)<as>reader<block_start>reader.read(3)<line_sep>self.assertFalse(reader.closed)<block_end>self.assertTrue(reader.closed)<block_end><def_stmt>test_close_closefd_false self<block_start>foo=zstd.ZstdCompressor().compress(b"foo"<times>1024)<line_sep>buffer=io.BytesIO(foo)<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>reader=dctx.stream_reader(buffer closefd=<false>)<line_sep>reader.read(3)<line_sep>self.assertFalse(reader.closed)<line_sep>self.assertFalse(buffer.closed)<line_sep>reader.close()<line_sep>self.assertTrue(reader.closed)<line_sep>self.assertFalse(buffer.closed)<with_stmt>self.assertRaisesRegex(ValueError "stream is closed")<block_start>reader.read()<block_end><with_stmt>self.assertRaisesRegex(ValueError "stream is closed")<block_start><with_stmt>reader<block_start><pass><block_end><block_end># Context manager exit should not close stream.
buffer=io.BytesIO(foo)<line_sep>reader=dctx.stream_reader(buffer closefd=<false>)<with_stmt>reader<block_start>reader.read(3)<block_end>self.assertTrue(reader.closed)<line_sep>self.assertFalse(buffer.closed)<line_sep># Context manager exit should close stream if an exception raised.
buffer=io.BytesIO(foo)<line_sep>reader=dctx.stream_reader(buffer closefd=<false>)<with_stmt>self.assertRaisesRegex(Exception "ignore")<block_start><with_stmt>reader<block_start>reader.read(3)<line_sep><raise>Exception("ignore")<block_end><block_end>self.assertTrue(reader.closed)<line_sep>self.assertFalse(buffer.closed)<line_sep># Test with non-file source variant.
<with_stmt>dctx.stream_reader(foo closefd=<false>)<as>reader<block_start>reader.read(3)<line_sep>self.assertFalse(reader.closed)<block_end>self.assertTrue(reader.closed)<block_end><def_stmt>test_read_after_exit self<block_start>cctx=zstd.ZstdCompressor()<line_sep>frame=cctx.compress(b"foo"<times>60)<line_sep>dctx=zstd.ZstdDecompressor()<with_stmt>dctx.stream_reader(frame)<as>reader<block_start><while_stmt>reader.read(16)<block_start><pass><block_end><block_end>self.assertTrue(reader.closed)<with_stmt>self.assertRaisesRegex(ValueError "stream is closed")<block_start>reader.read(10)<block_end><block_end><def_stmt>test_illegal_seeks self<block_start>cctx=zstd.ZstdCompressor()<line_sep>frame=cctx.compress(b"foo"<times>60)<line_sep>dctx=zstd.ZstdDecompressor()<with_stmt>dctx.stream_reader(frame)<as>reader<block_start><with_stmt>self.assertRaisesRegex(OSError "cannot seek to negative position")<block_start>reader.seek(-1 os.SEEK_SET)<block_end>reader.read(1)<with_stmt>self.assertRaisesRegex(OSError "cannot seek zstd decompression stream backwards")<block_start>reader.seek(0 os.SEEK_SET)<block_end><with_stmt>self.assertRaisesRegex(OSError "cannot seek zstd decompression stream backwards")<block_start>reader.seek(-1 os.SEEK_CUR)<block_end><with_stmt>self.assertRaisesRegex(OSError "zstd decompression streams cannot be seeked with SEEK_END" )<block_start>reader.seek(0 os.SEEK_END)<block_end>reader.close()<with_stmt>self.assertRaisesRegex(ValueError "stream is closed")<block_start>reader.seek(4 os.SEEK_SET)<block_end><block_end><with_stmt>self.assertRaisesRegex(ValueError "stream is closed")<block_start>reader.seek(0)<block_end><block_end><def_stmt>test_seek self<block_start>source=b"foobar"<times>60<line_sep>cctx=zstd.ZstdCompressor()<line_sep>frame=cctx.compress(source)<line_sep>dctx=zstd.ZstdDecompressor()<with_stmt>dctx.stream_reader(frame)<as>reader<block_start>reader.seek(3)<line_sep>self.assertEqual(reader.read(3) b"bar")<line_sep>reader.seek(4 os.SEEK_CUR)<line_sep>self.assertEqual(reader.read(2) b"ar")<block_end><block_end><def_stmt>test_no_context_manager self<block_start>source=b"foobar"<times>60<line_sep>cctx=zstd.ZstdCompressor()<line_sep>frame=cctx.compress(source)<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>reader=dctx.stream_reader(frame)<line_sep>self.assertEqual(reader.read(6) b"foobar")<line_sep>self.assertEqual(reader.read(18) b"foobar"<times>3)<line_sep>self.assertFalse(reader.closed)<line_sep># Calling close prevents subsequent use.
reader.close()<line_sep>self.assertTrue(reader.closed)<with_stmt>self.assertRaisesRegex(ValueError "stream is closed")<block_start>reader.read(6)<block_end><block_end><def_stmt>test_read_after_error self<block_start>source=io.BytesIO(b"")<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>reader=dctx.stream_reader(source)<with_stmt>reader<block_start>reader.read(0)<block_end><with_stmt>self.assertRaisesRegex(ValueError "stream is closed")<block_start><with_stmt>reader<block_start><pass><block_end><block_end><block_end><def_stmt>test_partial_read self# Inspired by https://github.com/indygreg/python-zstandard/issues/71.
<block_start>buffer=io.BytesIO()<line_sep>cctx=zstd.ZstdCompressor()<line_sep>writer=cctx.stream_writer(buffer)<line_sep>writer.write(bytearray(os.urandom(1000000)))<line_sep>writer.flush(zstd.FLUSH_FRAME)<line_sep>buffer.seek(0)<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>reader=dctx.stream_reader(buffer)<while_stmt><true><block_start>chunk=reader.read(8192)<if_stmt><not>chunk<block_start><break><block_end><block_end><block_end><def_stmt>test_read_multiple_frames self<block_start>cctx=zstd.ZstdCompressor()<line_sep>source=io.BytesIO()<line_sep>writer=cctx.stream_writer(source)<line_sep>writer.write(b"foo")<line_sep>writer.flush(zstd.FLUSH_FRAME)<line_sep>writer.write(b"bar")<line_sep>writer.flush(zstd.FLUSH_FRAME)<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>reader=dctx.stream_reader(source.getvalue())<line_sep>self.assertEqual(reader.read(2) b"fo")<line_sep>self.assertEqual(reader.read(2) b"o")<line_sep>self.assertEqual(reader.read(2) b"ba")<line_sep>self.assertEqual(reader.read(2) b"r")<line_sep>source.seek(0)<line_sep>reader=dctx.stream_reader(source)<line_sep>self.assertEqual(reader.read(2) b"fo")<line_sep>self.assertEqual(reader.read(2) b"o")<line_sep>self.assertEqual(reader.read(2) b"ba")<line_sep>self.assertEqual(reader.read(2) b"r")<line_sep>reader=dctx.stream_reader(source.getvalue())<line_sep>self.assertEqual(reader.read(3) b"foo")<line_sep>self.assertEqual(reader.read(3) b"bar")<line_sep>source.seek(0)<line_sep>reader=dctx.stream_reader(source)<line_sep>self.assertEqual(reader.read(3) b"foo")<line_sep>self.assertEqual(reader.read(3) b"bar")<line_sep>reader=dctx.stream_reader(source.getvalue())<line_sep>self.assertEqual(reader.read(4) b"foo")<line_sep>self.assertEqual(reader.read(4) b"bar")<line_sep>source.seek(0)<line_sep>reader=dctx.stream_reader(source)<line_sep>self.assertEqual(reader.read(4) b"foo")<line_sep>self.assertEqual(reader.read(4) b"bar")<line_sep>reader=dctx.stream_reader(source.getvalue())<line_sep>self.assertEqual(reader.read(128) b"foo")<line_sep>self.assertEqual(reader.read(128) b"bar")<line_sep>source.seek(0)<line_sep>reader=dctx.stream_reader(source)<line_sep>self.assertEqual(reader.read(128) b"foo")<line_sep>self.assertEqual(reader.read(128) b"bar")<line_sep># Now tests for reads spanning frames.
reader=dctx.stream_reader(source.getvalue() read_across_frames=<true>)<line_sep>self.assertEqual(reader.read(3) b"foo")<line_sep>self.assertEqual(reader.read(3) b"bar")<line_sep>source.seek(0)<line_sep>reader=dctx.stream_reader(source read_across_frames=<true>)<line_sep>self.assertEqual(reader.read(3) b"foo")<line_sep>self.assertEqual(reader.read(3) b"bar")<line_sep>reader=dctx.stream_reader(source.getvalue() read_across_frames=<true>)<line_sep>self.assertEqual(reader.read(6) b"foobar")<line_sep>source.seek(0)<line_sep>reader=dctx.stream_reader(source read_across_frames=<true>)<line_sep>self.assertEqual(reader.read(6) b"foobar")<line_sep>reader=dctx.stream_reader(source.getvalue() read_across_frames=<true>)<line_sep>self.assertEqual(reader.read(7) b"foobar")<line_sep>source.seek(0)<line_sep>reader=dctx.stream_reader(source read_across_frames=<true>)<line_sep>self.assertEqual(reader.read(7) b"foobar")<line_sep>reader=dctx.stream_reader(source.getvalue() read_across_frames=<true>)<line_sep>self.assertEqual(reader.read(128) b"foobar")<line_sep>source.seek(0)<line_sep>reader=dctx.stream_reader(source read_across_frames=<true>)<line_sep>self.assertEqual(reader.read(128) b"foobar")<block_end><def_stmt>test_readinto self<block_start>cctx=zstd.ZstdCompressor()<line_sep>foo=cctx.compress(b"foo")<line_sep>dctx=zstd.ZstdDecompressor()<line_sep># Attempting to readinto() a non-writable buffer fails.
# The exact exception varies based on the backend.
reader=dctx.stream_reader(foo)<with_stmt>self.assertRaises(Exception)<block_start>reader.readinto(b"foobar")<block_end># readinto() with sufficiently large destination.
b=bytearray(1024)<line_sep>reader=dctx.stream_reader(foo)<line_sep>self.assertEqual(reader.readinto(b) 3)<line_sep>self.assertEqual(b[0:3] b"foo")<line_sep>self.assertEqual(reader.readinto(b) 0)<line_sep>self.assertEqual(b[0:3] b"foo")<line_sep># readinto() with small reads.
b=bytearray(1024)<line_sep>reader=dctx.stream_reader(foo read_size=1)<line_sep>self.assertEqual(reader.readinto(b) 3)<line_sep>self.assertEqual(b[0:3] b"foo")<line_sep># Too small destination buffer.
b=bytearray(2)<line_sep>reader=dctx.stream_reader(foo)<line_sep>self.assertEqual(reader.readinto(b) 2)<line_sep>self.assertEqual(b[:] b"fo")<block_end><def_stmt>test_readinto1 self<block_start>cctx=zstd.ZstdCompressor()<line_sep>foo=cctx.compress(b"foo")<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>reader=dctx.stream_reader(foo)<with_stmt>self.assertRaises(Exception)<block_start>reader.readinto1(b"foobar")<block_end># Sufficiently large destination.
b=bytearray(1024)<line_sep>reader=dctx.stream_reader(foo)<line_sep>self.assertEqual(reader.readinto1(b) 3)<line_sep>self.assertEqual(b[0:3] b"foo")<line_sep>self.assertEqual(reader.readinto1(b) 0)<line_sep>self.assertEqual(b[0:3] b"foo")<line_sep># readinto() with small reads.
b=bytearray(1024)<line_sep>reader=dctx.stream_reader(foo read_size=1)<line_sep>self.assertEqual(reader.readinto1(b) 3)<line_sep>self.assertEqual(b[0:3] b"foo")<line_sep># Too small destination buffer.
b=bytearray(2)<line_sep>reader=dctx.stream_reader(foo)<line_sep>self.assertEqual(reader.readinto1(b) 2)<line_sep>self.assertEqual(b[:] b"fo")<block_end><def_stmt>test_readall self<block_start>cctx=zstd.ZstdCompressor()<line_sep>foo=cctx.compress(b"foo")<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>reader=dctx.stream_reader(foo)<line_sep>self.assertEqual(reader.readall() b"foo")<block_end><def_stmt>test_read1 self<block_start>cctx=zstd.ZstdCompressor()<line_sep>foo=cctx.compress(b"foo")<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>b=CustomBytesIO(foo)<line_sep>reader=dctx.stream_reader(b)<line_sep>self.assertEqual(reader.read1() b"foo")<line_sep>self.assertEqual(b._read_count 1)<line_sep>b=CustomBytesIO(foo)<line_sep>reader=dctx.stream_reader(b)<line_sep>self.assertEqual(reader.read1(0) b"")<line_sep>self.assertEqual(reader.read1(2) b"fo")<line_sep>self.assertEqual(b._read_count 1)<line_sep>self.assertEqual(reader.read1(1) b"o")<line_sep>self.assertEqual(b._read_count 1)<line_sep>self.assertEqual(reader.read1(1) b"")<line_sep>self.assertEqual(b._read_count 2)<block_end><def_stmt>test_read_lines self<block_start>cctx=zstd.ZstdCompressor()<line_sep>source=b"\n".join(("line %d"%i).encode("ascii")<for>i range(1024))<line_sep>frame=cctx.compress(source)<line_sep>dctx=zstd.ZstdDecompressor()<line_sep>reader=dctx.stream_reader(frame)<line_sep>tr=io.TextIOWrapper(reader encoding="utf-8")<line_sep>lines=[]<for_stmt>line tr<block_start>lines.append(line.encode("utf-8"))<block_end>self.assertEqual(len(lines) 1024)<line_sep>self.assertEqual(b"".join(lines) source)<line_sep>reader=dctx.stream_reader(frame)<line_sep>tr=io.TextIOWrapper(reader encoding="utf-8")<line_sep>lines=tr.readlines()<line_sep>self.assertEqual(len(lines) 1024)<line_sep>self.assertEqual("".join(lines).encode("utf-8") source)<line_sep>reader=dctx.stream_reader(frame)<line_sep>tr=io.TextIOWrapper(reader encoding="utf-8")<line_sep>lines=[]<while_stmt><true><block_start>line=tr.readline()<if_stmt><not>line<block_start><break><block_end>lines.append(line.encode("utf-8"))<block_end>self.assertEqual(len(lines) 1024)<line_sep>self.assertEqual(b"".join(lines) source)<block_end><block_end>
|
# License: BSD 3 clause
<import_from_stmt>.model Model<import_from_stmt>.model_first_order ModelFirstOrder<import_from_stmt>.model_labels_features ModelLabelsFeatures<import_from_stmt>.model_second_order ModelSecondOrder<import_from_stmt>.model_self_concordant ModelSelfConcordant<import_from_stmt>.model_lipschitz ModelLipschitz<import_from_stmt>.model_generalized_linear ModelGeneralizedLinear<import_from_stmt>.model LOSS<import_from_stmt>.model GRAD<import_from_stmt>.model LOSS_AND_GRAD<import_from_stmt>.model HESSIAN_NORM<import_from_stmt>.model N_CALLS_LOSS<import_from_stmt>.model N_CALLS_GRAD<import_from_stmt>.model N_CALLS_LOSS_AND_GRAD<import_from_stmt>.model N_CALLS_HESSIAN_NORM<import_from_stmt>.model PASS_OVER_DATA<line_sep>__all__=["Model" "ModelFirstOrder" "ModelSecondOrder" "ModelLabelsFeatures" "ModelSelfConcordant" "ModelGeneralizedLinear" "ModelLipschitz" ]<line_sep>
|
<import_from_future_stmt> division<import_stmt>numpy<as>np<import_stmt>torch.backends.cudnn<as>cudnn<import_from_stmt>torch.distributions kl_divergence<import_from_stmt>torch.distributions.normal Normal<import_from_stmt>carla log<import_from_stmt>carla.recourse_methods.catalog.clue.library.clue_ml.src.gauss_cat *<import_from_stmt>carla.recourse_methods.catalog.clue.library.clue_ml.src.probability normal_parse_params <import_from_stmt>carla.recourse_methods.catalog.clue.library.clue_ml.src.radam RAdam<import_from_stmt>carla.recourse_methods.catalog.clue.library.clue_ml.src.utils BaseNet to_variable <import_from_stmt>.models MLP_preact_generator_net MLP_preact_recognition_net<line_sep># TODO: implement for std changeable gaussian instead of rms
<class_stmt>VAE_gauss_cat(nn.Module)<block_start><def_stmt>__init__ self input_dim_vec width depth latent_dim pred_sig=<false><block_start>super(VAE_gauss_cat self).__init__()<line_sep>input_dim=0<line_sep>self.input_dim_vec=input_dim_vec<for_stmt>e input_dim_vec<block_start>input_dim<augadd>e<block_end>self.encoder=MLP_preact_recognition_net(input_dim width depth latent_dim)<if_stmt>pred_sig<block_start><raise>NotImplementedError()<block_end><else_stmt><block_start>self.decoder=MLP_preact_generator_net(input_dim width depth latent_dim)<line_sep>self.rec_loglike=rms_cat_loglike(self.input_dim_vec reduction="none")<block_end>self.pred_sig=pred_sig<block_end><def_stmt>encode self x<block_start>"""Works with flattened representATION"""<line_sep>approx_post_params=self.encoder(x)<line_sep>approx_post=normal_parse_params(approx_post_params 1e-3)<line_sep><return>approx_post<block_end><def_stmt>decode self z_sample<block_start>"""Works with flattened representATION"""<line_sep>rec_params=self.decoder(z_sample)<line_sep><return>rec_params<block_end><def_stmt>vlb self prior approx_post x rec_params<block_start>"""Works with flattened representATION"""<if_stmt>self.pred_sig<block_start><pass><block_end><else_stmt><block_start>rec=self.rec_loglike(rec_params x).view(x.shape[0] -1).sum(-1)<block_end>kl=kl_divergence(approx_post prior).view(x.shape[0] -1).sum(-1)<line_sep><return>rec-kl<block_end><def_stmt>iwlb self prior approx_post x K=50<block_start>estimates=[]<for_stmt>i range(K)<block_start>latent=approx_post.rsample()<line_sep>rec_params=self.decode(latent)<if_stmt>self.pred_sig<block_start><pass><block_end><else_stmt><block_start>rec_loglike=(self.rec_loglike(rec_params x).view(x.shape[0] -1).sum(-1))<block_end>prior_log_prob=prior.log_prob(latent)<line_sep>prior_log_prob=prior_log_prob.view(x.shape[0] -1)<line_sep>prior_log_prob=prior_log_prob.sum(-1)<line_sep>proposal_log_prob=approx_post.log_prob(latent)<line_sep>proposal_log_prob=proposal_log_prob.view(x.shape[0] -1)<line_sep>proposal_log_prob=proposal_log_prob.sum(-1)<line_sep>estimate=rec_loglike+prior_log_prob-proposal_log_prob<line_sep>estimates.append(estimate[: <none>])<block_end><return>torch.logsumexp(torch.cat(estimates 1) 1)-np.log(K)<block_end><block_end><class_stmt>VAE_gauss_cat_net(BaseNet)<block_start><def_stmt>__init__ self input_dim_vec width depth latent_dim pred_sig=<false> lr=1e-3 cuda=<true> flatten=<true> <block_start>super(VAE_gauss_cat_net self).__init__()<line_sep>log.info("VAE_gauss_net")<line_sep>self.cuda=cuda<line_sep>self.input_dim=0<line_sep>self.input_dim_vec=input_dim_vec<for_stmt>e self.input_dim_vec<block_start>self.input_dim<augadd>e<block_end>self.flatten=flatten<if_stmt><not>self.flatten<block_start><pass><block_end>self.width=width<line_sep>self.depth=depth<line_sep>self.latent_dim=latent_dim<line_sep>self.lr=lr<line_sep>self.pred_sig=pred_sig<line_sep>self.create_net()<line_sep>self.create_opt()<line_sep>self.epoch=0<line_sep>self.schedule=<none><if_stmt>self.cuda<block_start>self.prior=self.prior=Normal(loc=torch.zeros(latent_dim).cuda() scale=torch.ones(latent_dim).cuda())<block_end><else_stmt><block_start>self.prior=Normal(loc=torch.zeros(latent_dim) scale=torch.ones(latent_dim))<block_end>self.vlb_scale=1/len(self.input_dim_vec)<block_end># scale for dimensions of input so we can use same LR always
<def_stmt>create_net self<block_start>torch.manual_seed(42)<line_sep>torch.cuda.manual_seed(42)<line_sep>self.model=VAE_gauss_cat(self.input_dim_vec self.width self.depth self.latent_dim self.pred_sig)<if_stmt>self.cuda<block_start>self.model=self.model.cuda()<line_sep>cudnn.benchmark=<true><block_end>log.info("Total params: %.2fM"%(self.get_nb_parameters()/1000000.0))<block_end><def_stmt>create_opt self<block_start>self.optimizer=RAdam(self.model.parameters() lr=self.lr)<block_end><def_stmt>fit self x<block_start>self.set_mode_train(train=<true>)<if_stmt>self.flatten<block_start>x_flat=gauss_cat_to_flat(x self.input_dim_vec)<block_end><else_stmt><block_start>x_flat=x<line_sep>x=flat_to_gauss_cat(x self.input_dim_vec)<block_end>x,x_flat=to_variable(var=(x x_flat) cuda=self.cuda)<line_sep>self.optimizer.zero_grad()<line_sep>approx_post=self.model.encode(x_flat)<line_sep>z_sample=approx_post.rsample()<line_sep>rec_params=self.model.decode(z_sample)<line_sep>vlb=self.model.vlb(self.prior approx_post x rec_params)<line_sep>loss=(-vlb<times>self.vlb_scale).mean()<line_sep>loss.backward()<line_sep>self.optimizer.step()<line_sep><return>vlb.mean().item() rec_params<block_end><def_stmt>eval self x sample=<false><block_start>self.set_mode_train(train=<false>)<if_stmt>self.flatten<block_start>x_flat=gauss_cat_to_flat(x self.input_dim_vec)<block_end><else_stmt><block_start>x_flat=x<line_sep>x=flat_to_gauss_cat(x self.input_dim_vec)<block_end>x,x_flat=to_variable(var=(x x_flat) cuda=self.cuda)<line_sep>approx_post=self.model.encode(x_flat)<if_stmt>sample<block_start>z_sample=approx_post.sample()<block_end><else_stmt><block_start>z_sample=approx_post.loc<block_end>rec_params=self.model.decode(z_sample)<line_sep>vlb=self.model.vlb(self.prior approx_post x rec_params)<line_sep><return>vlb.mean().item() rec_params<block_end><def_stmt>eval_iw self x k=50<block_start>self.set_mode_train(train=<false>)<if_stmt>self.flatten<block_start>x_flat=gauss_cat_to_flat(x self.input_dim_vec)<block_end><else_stmt><block_start>x_flat=x<line_sep>x=flat_to_gauss_cat(x self.input_dim_vec)<block_end>x,x_flat=to_variable(var=(x x_flat) cuda=self.cuda)<line_sep>approx_post=self.model.recognition_encode(x)<line_sep>iw_lb=self.model.iwlb(self.prior approx_post x k)<line_sep><return>iw_lb.mean().item()<block_end><def_stmt>recongnition self x grad=<false> flatten=<none><block_start><if_stmt>flatten<is><none><block_start>flatten=self.flatten<block_end><if_stmt>flatten<and>grad<block_start><raise>Exception("flatten and grad options are not compatible")<block_end>self.set_mode_train(train=<false>)<if_stmt>flatten<block_start>x=gauss_cat_to_flat(x self.input_dim_vec)<block_end><if_stmt>grad<block_start><if_stmt><not>x.requires_grad<block_start>x.requires_grad=<true><block_end><block_end><else_stmt><block_start>(x )=to_variable(var=(x ) volatile=<true> cuda=self.cuda)<block_end>approx_post=self.model.encode(x)<line_sep><return>approx_post<block_end><def_stmt>regenerate self z grad=<false> unflatten=<false><block_start><if_stmt>unflatten<and>grad<block_start><raise>Exception("flatten and grad options are not compatible")<block_end>self.set_mode_train(train=<false>)<if_stmt>grad<block_start><if_stmt><not>z.requires_grad<block_start>z.requires_grad=<true><block_end><block_end><else_stmt><block_start>(z )=to_variable(var=(z ) volatile=<true> cuda=self.cuda)<block_end>out=self.model.decode(z)<if_stmt>unflatten<block_start>out=flat_to_gauss_cat(out self.input_dim_vec)<block_end><else_stmt><block_start>out=selective_softmax(out self.input_dim_vec grad=grad)<block_end><if_stmt>self.pred_sig<block_start><raise>Exception("Not implemented")<block_end><else_stmt><block_start><return>out<block_end><block_end><block_end>
|
# Copyright (c) 2021, Xilinx
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# * Neither the name of FINN nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
<import_stmt>pytest<import_stmt>os<import_stmt>numpy<as>np<import_stmt>torch<import_stmt>brevitas.onnx<as>bo<import_from_stmt>brevitas.nn QuantLinear<import_from_stmt>brevitas.core.quant QuantType<import_from_stmt>finn.core.modelwrapper ModelWrapper<import_from_stmt>finn.core.datatype DataType<import_stmt>finn.core.onnx_exec<as>oxe<import_from_stmt>finn.transformation.infer_shapes InferShapes<import_from_stmt>finn.util.basic gen_finn_dt_tensor<line_sep>export_onnx_path="test_brevitas_qlinear.onnx"<line_sep>@pytest.mark.parametrize("bias" [<false> <true>])@pytest.mark.parametrize("out_features" [4])@pytest.mark.parametrize("in_features" [3])@pytest.mark.parametrize("w_bits" [4])@pytest.mark.parametrize("i_dtype" [DataType.UINT4])<def_stmt>test_brevitas_qlinear bias out_features in_features w_bits i_dtype<block_start>i_shape=(1 in_features)<line_sep>w_shape=(out_features in_features)<line_sep>b_linear=QuantLinear(out_features=out_features in_features=in_features bias=bias bias_quant_type=QuantType.FP weight_bit_width=w_bits weight_quant_type=QuantType.INT weight_scaling_per_output_channel=<true> )<line_sep>weight_tensor_fp=np.random.uniform(low=-1.0 high=1.0 size=w_shape).astype(np.float32)<line_sep>b_linear.weight.data=torch.from_numpy(weight_tensor_fp)<line_sep>b_linear.eval()<line_sep>bo.export_finn_onnx(b_linear i_shape export_onnx_path)<line_sep>model=ModelWrapper(export_onnx_path)<line_sep>model=model.transform(InferShapes())<line_sep>inp_tensor=gen_finn_dt_tensor(i_dtype i_shape)<line_sep>idict={model.graph.input[0].name:inp_tensor}<line_sep>odict=oxe.execute_onnx(model idict <true>)<line_sep>produced=odict[model.graph.output[0].name]<line_sep>inp_tensor=torch.from_numpy(inp_tensor).float()<line_sep>expected=b_linear.forward(inp_tensor).detach().numpy()<assert_stmt>np.isclose(produced expected atol=1e-3).all()<line_sep>os.remove(export_onnx_path)<block_end>
|
# Copyright 2019,2020,2021 Sony Corporation.
# Copyright 2021 Sony Group Corporation.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>os<import_stmt>numpy<as>np<import_stmt>nnabla<as>nn<import_stmt>nnabla.logger<as>logger<import_stmt>nnabla.functions<as>F<import_stmt>nnabla.parametric_functions<as>PF<import_stmt>nnabla.solvers<as>S<import_from_stmt>nnabla.monitor Monitor MonitorSeries MonitorTimeElapsed MonitorImageTile<import_stmt>nnabla.utils.save<as>save<import_from_stmt>nnabla.ext_utils get_extension_context<import_from_stmt>args get_args save_args<import_from_stmt>helpers denormalize<import_from_stmt>models generator discriminator gan_loss<import_from_stmt>cifar10_data data_iterator_cifar10<def_stmt>train args# Context
<block_start>ctx=get_extension_context(args.context device_id=args.device_id type_config=args.type_config)<line_sep>nn.set_default_context(ctx)<line_sep># Args
latent=args.latent<line_sep>maps=args.maps<line_sep>batch_size=args.batch_size<line_sep>image_size=args.image_size<line_sep>lambda_=args.lambda_<line_sep># Model
# generator loss
z=nn.Variable([batch_size latent])<line_sep>x_fake=generator(z maps=maps up=args.up).apply(persistent=<true>)<line_sep>p_fake=discriminator(x_fake maps=maps)<line_sep>loss_gen=gan_loss(p_fake).apply(persistent=<true>)<line_sep># discriminator loss
p_fake=discriminator(x_fake maps=maps)<line_sep>x_real=nn.Variable([batch_size 3 image_size image_size])<line_sep>p_real=discriminator(x_real maps=maps)<line_sep>loss_dis=gan_loss(p_fake p_real).apply(persistent=<true>)<line_sep># gradient penalty
eps=F.rand(shape=[batch_size 1 1 1])<line_sep>x_rmix=eps<times>x_real+(1.0-eps)<times>x_fake<line_sep>p_rmix=discriminator(x_rmix maps=maps)<line_sep>x_rmix.need_grad=<true># Enabling gradient computation for double backward
grads=nn.grad([p_rmix] [x_rmix])<line_sep>l2norms=[F.sum(g<power>2.0 [1 2 3])<power>0.5<for>g grads]<line_sep>gp=sum([F.mean((l-1.0)<power>2.0)<for>l l2norms])<line_sep>loss_dis<augadd>lambda_<times>gp<line_sep># generator with fixed value for test
z_test=nn.Variable.from_numpy_array(np.random.randn(batch_size latent))<line_sep>x_test=generator(z_test maps=maps test=<true> up=args.up).apply(persistent=<true>)<line_sep># Solver
solver_gen=S.Adam(args.lrg args.beta1 args.beta2)<line_sep>solver_dis=S.Adam(args.lrd args.beta1 args.beta2)<with_stmt>nn.parameter_scope("generator")<block_start>params_gen=nn.get_parameters()<line_sep>solver_gen.set_parameters(params_gen)<block_end><with_stmt>nn.parameter_scope("discriminator")<block_start>params_dis=nn.get_parameters()<line_sep>solver_dis.set_parameters(params_dis)<block_end># Monitor
monitor=Monitor(args.monitor_path)<line_sep>monitor_loss_gen=MonitorSeries("Generator Loss" monitor interval=10)<line_sep>monitor_loss_cri=MonitorSeries("Negative Critic Loss" monitor interval=10)<line_sep>monitor_time=MonitorTimeElapsed("Training Time" monitor interval=10)<line_sep>monitor_image_tile_train=MonitorImageTile("Image Tile Train" monitor num_images=batch_size interval=1 normalize_method=denormalize)<line_sep>monitor_image_tile_test=MonitorImageTile("Image Tile Test" monitor num_images=batch_size interval=1 normalize_method=denormalize)<line_sep># Data Iterator
di=data_iterator_cifar10(batch_size <true>)<line_sep># Train loop
<for_stmt>i range(args.max_iter)# Train discriminator
<block_start>x_fake.need_grad=<false># no need backward to generator
<for_stmt>_ range(args.n_critic)<block_start>solver_dis.zero_grad()<line_sep>x_real.d=di.next()[0]/127.5-1.0<line_sep>z.d=np.random.randn(batch_size latent)<line_sep>loss_dis.forward(clear_no_need_grad=<true>)<line_sep>loss_dis.backward(clear_buffer=<true>)<line_sep>solver_dis.update()<block_end># Train generator
x_fake.need_grad=<true># need backward to generator
solver_gen.zero_grad()<line_sep>z.d=np.random.randn(batch_size latent)<line_sep>loss_gen.forward(clear_no_need_grad=<true>)<line_sep>loss_gen.backward(clear_buffer=<true>)<line_sep>solver_gen.update()<line_sep># Monitor
monitor_loss_gen.add(i loss_gen.d)<line_sep>monitor_loss_cri.add(i -loss_dis.d)<line_sep>monitor_time.add(i)<line_sep># Save
<if_stmt>i%args.save_interval<eq>0<block_start>monitor_image_tile_train.add(i x_fake)<line_sep>monitor_image_tile_test.add(i x_test)<line_sep>nn.save_parameters(os.path.join(args.monitor_path "params_{}.h5".format(i)))<block_end><block_end># Last
x_test.forward(clear_buffer=<true>)<line_sep>nn.save_parameters(os.path.join(args.monitor_path "params_{}.h5".format(i)))<line_sep>monitor_image_tile_train.add(i x_fake)<line_sep>monitor_image_tile_test.add(i x_test)<block_end><def_stmt>main <block_start>args=get_args()<line_sep>save_args(args "train")<line_sep>train(args)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
# *****************************************************************************
#
# Copyright (c) 2020, the pyEX authors.
#
# This file is part of the pyEX library, distributed under the terms of
# the Apache License 2.0. The full license can be found in the LICENSE file.
#
<import_from_stmt>functools wraps<import_stmt>pandas<as>pd<import_from_stmt>deprecation deprecated<import_from_stmt>..common _get _raiseIfNotStr _toDatetime _timeseriesWrapper<import_from_stmt>..timeseries timeSeries<def_stmt>optionExpirations symbol token="" version="stable" filter="" format="json"<block_start>"""Returns end of day options data
https://iexcloud.io/docs/api/#options
9:30am-5pm ET Mon-Fri
Args:
symbol (str): Ticker to request
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""<line_sep>_raiseIfNotStr(symbol)<line_sep><return>_get("stock/"+symbol+"/options" token=token version=version filter=filter format=format )<block_end>@deprecated(details="Deprecated: Migrate to `options`")<def_stmt>stockOptions symbol expiration side="" token="" version="stable" filter="" format="json" <block_start>"""Returns end of day options data
https://iexcloud.io/docs/api/#options
9:30am-5pm ET Mon-Fri
Args:
symbol (str): Ticker to request
expiration (str): Expiration date
side (str): Side (optional)
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Returns:
dict or DataFrame: result
"""<line_sep>_raiseIfNotStr(symbol)<if_stmt>side<block_start><return>_get("stock/{symbol}/options/{expiration}/{side}".format(symbol=symbol expiration=expiration side=side) token=token version=version filter=filter format=format )<block_end><return>_get("stock/{symbol}/options/{expiration}/".format(symbol=symbol expiration=expiration) token=token version=version filter=filter format=format )<block_end>@wraps(stockOptions)<def_stmt>stockOptionsDF *args **kwargs<block_start><return>_toDatetime(pd.DataFrame(stockOptions(*args **kwargs)) tcols=["date"])<block_end><def_stmt>options contract token="" version="stable" filter="" format="json" **timeseries_kwargs<block_start>"""Options EOD prices
Args:
contract (str): Specific dated option contract, e.g. SPY20210714C00475000
token (str): Access token
version (str): API version
filter (str): filters: https://iexcloud.io/docs/api/#filter-results
format (str): return format, defaults to json
Supports all kwargs from `pyEX.timeseries.timeSeries`
Returns:
dict or DataFrame: result
"""<line_sep>_raiseIfNotStr(contract)<line_sep>_timeseriesWrapper(timeseries_kwargs)<line_sep><return>timeSeries(id=contract key="chart" token=token version=version overrideBase="options" filter=filter format=format **timeseries_kwargs)<block_end>@wraps(options)<def_stmt>optionsDF *args **kwargs<block_start><return>_toDatetime(pd.DataFrame(options(*args **kwargs)) reformatcols=["datetime" "date" "updated"] )<block_end>
|
#
# Copyright (C) 2019-2021 Intel Corporation
# SPDX-License-Identifier: Apache-2.0
#
# This file is based in part on deepspeech_openvino_0.5.py by <NAME> at
# https://github.com/openvinotoolkit/open_model_zoo/pull/419, commit 529805d011d9b405f142b2b40f4d202bd403a4f1 on Sep 19, 2019.
#
<import_from_stmt>copy deepcopy<import_stmt>numpy<as>np<import_from_stmt>asr_utils.pipelines BlockedSeqPipelineStage<class_stmt>RnnSeqPipelineStage(BlockedSeqPipelineStage)<block_start><def_stmt>__init__ self profile ie model device='CPU'<block_start>"""
Load/compile to the target device the IE IR file with the network and initialize the pipeline stage.
profile (dict), a dict with pre/post-processing parameters, see profiles.py
ie (IECore), IECore object for model loading/compilation/inference
model (str), filename of .xml IR file
device (str), inferemnce device
"""<line_sep>self.p=deepcopy(profile)<assert_stmt>self.p['num_context_frames']%2<eq>1 "num_context_frames must be odd"<line_sep>padding_len=self.p['num_context_frames']<floordiv>2<line_sep>super().__init__(block_len=16 context_len=self.p['num_context_frames']-1 left_padding_len=padding_len right_padding_len=padding_len padding_shape=(self.p['num_mfcc_dct_coefs'] ) cut_alignment=<true>)<line_sep>net=ie.read_network(model=model)<line_sep>self.exec_net=ie.load_network(network=net device_name=device)<block_end><def_stmt>_reset_state self<block_start>super()._reset_state()<line_sep>self._rnn_state=<none><block_end><def_stmt>process_data self data finish=<false><block_start><if_stmt>data<is><not><none><block_start><assert_stmt>len(data.shape)<eq>2<block_end><return>super().process_data(data finish=finish)<block_end><def_stmt>_process_blocks self buffer finish=<false><block_start><assert_stmt>buffer.shape[0]<ge>self._block_len+self._context_len<line_sep>processed=[]<for_stmt>start_pos range(self._context_len buffer.shape[0]-self._block_len+1 self._block_len)<block_start>block=buffer[start_pos-self._context_len:start_pos+self._block_len]<line_sep>processed.append(self._process_block(block finish=finish<and>start_pos+self._block_len<ge>buffer.shape[0]))<block_end><assert_stmt><not>self._cut_alignment<or>processed[-1].shape[0]<eq>self._block_len "Networks with stride != 1 are not supported"<line_sep># Here start_pos is its value on the last iteration of the loop
buffer_skip_len=start_pos+self._block_len-self._context_len<line_sep><return>processed buffer_skip_len<block_end><def_stmt>_process_block self mfcc_features finish=<false><block_start><assert_stmt>mfcc_features.shape[0]<eq>self._block_len+self._context_len "Wrong data length: _process_block() accepts a single block of data"<line_sep># Create a view into the array with overlapping strides to simulate convolution with FC.
# NB: Replacing this and the first FC layer with conv1d may improve speed a little.
mfcc_features=np.lib.stride_tricks.as_strided(mfcc_features (self._block_len self._context_len+1 self.p['num_mfcc_dct_coefs']) (mfcc_features.strides[0] mfcc_features.strides[0] mfcc_features.strides[1]) writeable=<false> )<if_stmt>self._rnn_state<is><none><block_start>state_h=np.zeros(self.exec_net.input_info[self.p['in_state_h']].input_data.shape)<line_sep>state_c=np.zeros(self.exec_net.input_info[self.p['in_state_c']].input_data.shape)<block_end><else_stmt><block_start>state_h,state_c=self._rnn_state<block_end>infer_res=self.exec_net.infer(inputs={self.p['in_state_c']:state_c self.p['in_state_h']:state_h self.p['in_data']:[mfcc_features] })<line_sep>state_c=infer_res[self.p['out_state_c']]<line_sep>state_h=infer_res[self.p['out_state_h']]<line_sep>self._rnn_state=(state_h state_c)<line_sep>probs=infer_res[self.p['out_data']].squeeze(1)<line_sep><return>probs<block_end><block_end>
|
<import_stmt>math<import_from_stmt>unittest TestCase<import_stmt>simplejson<as>json<class_stmt>TestFloat(TestCase)<block_start><def_stmt>test_floats self<block_start><for_stmt>num [1617161771.7650001 math.pi math.pi<power>100 math.pi<power>-100 3.1]<block_start>self.assertEquals(float(json.dumps(num)) num)<line_sep>self.assertEquals(json.loads(json.dumps(num)) num)<block_end><block_end><def_stmt>test_ints self<block_start><for_stmt>num [1 1L 1<lshift>32 1<lshift>64]<block_start>self.assertEquals(json.dumps(num) str(num))<line_sep>self.assertEquals(int(json.dumps(num)) num)<block_end><block_end><block_end>
|
<import_from_stmt>abc ABC abstractmethod<import_stmt>numpy<as>np<import_from_stmt>enum Enum<import_from_stmt>learning.normalizer Normalizer<class_stmt>Env(ABC)<block_start><class_stmt>Terminate(Enum)<block_start>Null=0<line_sep>Fail=1<line_sep>Succ=2<block_end><def_stmt>__init__ self args enable_draw<block_start>self.enable_draw=enable_draw<line_sep><return><block_end>@abstractmethod<def_stmt>update self timestep<block_start><pass><block_end>@abstractmethod<def_stmt>reset self<block_start><pass><block_end>@abstractmethod<def_stmt>get_time self<block_start><pass><block_end>@abstractmethod<def_stmt>get_name self<block_start><pass><block_end># rendering and UI interface
<def_stmt>draw self<block_start><pass><block_end><def_stmt>keyboard self key x y<block_start><pass><block_end><def_stmt>mouse_click self button state x y<block_start><pass><block_end><def_stmt>mouse_move self x y<block_start><pass><block_end><def_stmt>reshape self w h<block_start><pass><block_end><def_stmt>shutdown self<block_start><pass><block_end><def_stmt>is_done self<block_start><return><false><block_end><def_stmt>set_playback_speed self speed<block_start><pass><block_end><def_stmt>set_updates_per_sec self updates_per_sec<block_start><pass><block_end>@abstractmethod<def_stmt>get_win_width self<block_start><pass><block_end>@abstractmethod<def_stmt>get_win_height self<block_start><pass><block_end><def_stmt>get_num_update_substeps self<block_start><return>1<block_end># rl interface
@abstractmethod<def_stmt>is_rl_scene self<block_start><return><false><block_end>@abstractmethod<def_stmt>get_num_agents self<block_start><return>0<block_end>@abstractmethod<def_stmt>need_new_action self agent_id<block_start><return><false><block_end>@abstractmethod<def_stmt>record_state self agent_id<block_start><pass><block_end>@abstractmethod<def_stmt>record_goal self agent_id<block_start><pass><block_end>@abstractmethod<def_stmt>set_action self agent_id<block_start><pass><block_end>@abstractmethod<def_stmt>get_action_space self agent_id<block_start><pass><block_end>@abstractmethod<def_stmt>get_state_size self agent_id<block_start><pass><block_end>@abstractmethod<def_stmt>get_goal_size self agent_id<block_start><pass><block_end>@abstractmethod<def_stmt>get_action_size self agent_id<block_start><pass><block_end>@abstractmethod<def_stmt>get_num_actions self agent_id<block_start><pass><block_end>@abstractmethod<def_stmt>log_val self agent_id val<block_start><pass><block_end><def_stmt>build_state_offset self agent_id<block_start>state_size=self.get_state_size(agent_id)<line_sep><return>np.zeros(state_size)<block_end><def_stmt>build_state_scale self agent_id<block_start>state_size=self.get_state_size(agent_id)<line_sep><return>np.ones(state_size)<block_end><def_stmt>build_goal_offset self agent_id<block_start>goal_size=self.get_goal_size(agent_id)<line_sep><return>np.zeros(goal_size)<block_end><def_stmt>build_goal_scale self agent_id<block_start>goal_size=self.get_goal_size(agent_id)<line_sep><return>np.ones(goal_size)<block_end><def_stmt>build_action_offset self agent_id<block_start>action_size=self.get_action_size()<line_sep><return>np.zeros(action_size)<block_end><def_stmt>build_action_scale self agent_id<block_start>action_size=self.get_action_size()<line_sep><return>np.ones(action_size)<block_end><def_stmt>build_action_bound_min self agent_id<block_start>action_size=self.get_action_size()<line_sep><return>-inf<times>np.ones(action_size)<block_end><def_stmt>build_action_bound_max self agent_id<block_start>action_size=self.get_action_size()<line_sep><return>inf<times>np.ones(action_size)<block_end><def_stmt>build_state_norm_groups self agent_id<block_start>state_size=self.get_state_size(agent_id)<line_sep><return>Normalizer.NORM_GROUP_SINGLE<times>np.ones(state_size dtype=np.int32)<block_end><def_stmt>build_goal_norm_groups self agent_id<block_start>goal_size=self.get_goal_size(agent_id)<line_sep><return>Normalizer.NORM_GROUP_SINGLE<times>np.ones(goal_size dtype=np.int32)<block_end>@abstractmethod<def_stmt>calc_reward self agent_id<block_start><return>0<block_end>@abstractmethod<def_stmt>get_reward_min self agent_id<block_start><return>0<block_end>@abstractmethod<def_stmt>get_reward_max self agent_id<block_start><return>1<block_end>@abstractmethod<def_stmt>get_reward_fail self agent_id<block_start><return>self.get_reward_min(agent_id)<block_end>@abstractmethod<def_stmt>get_reward_succ self agent_id<block_start><return>self.get_reward_max(agent_id)<block_end>@abstractmethod<def_stmt>is_episode_end self<block_start><return><false><block_end>@abstractmethod<def_stmt>check_terminate self agent_id<block_start><return>Terminate.Null<block_end>@abstractmethod<def_stmt>check_valid_episode self<block_start><return><true><block_end>@abstractmethod<def_stmt>set_sample_count self count<block_start><pass><block_end>@abstractmethod<def_stmt>set_mode self mode<block_start><pass><block_end><block_end>
|
# Confidence Interval using Stats Model Summary
thresh=0.05<line_sep>intervals=results.conf_int(alpha=thresh)<line_sep># Renaming column names
first_col=str(thresh/2<times>100)+"%"<line_sep>second_col=str((1-thresh/2)<times>100)+"%"<line_sep>intervals=intervals.rename(columns={0:first_col 1:second_col})<line_sep>display(intervals)<line_sep>
|
<import_from_stmt>typing_extensions Literal<import_stmt>torch<import_from_stmt>abc abstractmethod<import_from_stmt>math sqrt<import_from_stmt>torch nn<import_from_stmt>torch_scatter scatter scatter_log_softmax scatter_sum<import_from_stmt>typing NamedTuple Union<class_stmt>ElementsToSummaryRepresentationInput(NamedTuple)<block_start>"""Input to AbstractVarSizedElementReduce layers."""<line_sep>element_embeddings:torch.Tensor# float tensor of shape [num_elements, D], the representation of each node in all graphs.
element_to_sample_map:torch.Tensor# int tensor of shape [num_elements] with values in range [0, num_sampless-1], mapping each node to a sample ID.
num_samples:Union[torch.Tensor int]<block_end># scalar, specifying the number of sets.
<class_stmt>AbstractVarSizedElementReduce(nn.Module)<block_start>"""Interface for computing summary representations from multiple variable-sized sets of representations."""<line_sep>@abstractmethod<def_stmt>forward self inputs:ElementsToSummaryRepresentationInput<arrow>torch.Tensor<block_start>"""Returns: float tensor of shape [num_samples, D']"""<block_end><block_end><class_stmt>SimpleVarSizedElementReduce(AbstractVarSizedElementReduce)<block_start><def_stmt>__init__ self summarization_type:Literal["sum" "mean" "max" "min"]<block_start>super().__init__()<assert_stmt>summarization_type<in>{"sum" "mean" "max" "min"}<line_sep>self.__summarization_type=summarization_type<block_end><def_stmt>forward self inputs:ElementsToSummaryRepresentationInput<arrow>torch.Tensor<block_start><return>scatter(src=inputs.element_embeddings index=inputs.element_to_sample_map dim=0 dim_size=inputs.num_samples reduce=self.__summarization_type )<block_end><block_end><class_stmt>NormalizedWeightsVarSizedElementReduce(AbstractVarSizedElementReduce)<block_start><def_stmt>__init__ self input_representation_size:int output_representation_size:int<block_start>super().__init__()<line_sep>self.__attention_layer=nn.Linear(input_representation_size 1 bias=<false>)<line_sep>self.__output_layer=nn.Linear(input_representation_size output_representation_size bias=<false>)<block_end><def_stmt>forward self inputs:ElementsToSummaryRepresentationInput<arrow>torch.Tensor<block_start>attention_scores=self.__attention_layer(inputs.element_embeddings).squeeze(-1)<line_sep># [num_elements]
attention_probs=torch.exp(scatter_log_softmax(attention_scores index=inputs.element_to_sample_map dim=0 eps=0))<line_sep># [num_elements]
<return>scatter_sum(self.__output_layer(inputs.element_embeddings)<times>attention_probs.unsqueeze(-1) index=inputs.num_samples dim=0 dim_size=inputs.num_samples )<block_end><block_end># [num_samples, D']
<class_stmt>WeightedSumVarSizedElementReduce(AbstractVarSizedElementReduce)<block_start><def_stmt>__init__ self representation_size:int<block_start>super().__init__()<line_sep>self.__weights_layer=nn.Linear(representation_size 1 bias=<false>)<block_end><def_stmt>forward self inputs:ElementsToSummaryRepresentationInput<arrow>torch.Tensor<block_start>weights=torch.sigmoid(self.__weights_layer(inputs.element_embeddings).squeeze(-1))<line_sep># [num_elements]
<return>scatter_sum(inputs.element_embeddings<times>weights.unsqueeze(-1) index=inputs.element_to_sample_map dim=0 dim_size=inputs.num_samples )<block_end><block_end># [num_samples, D']
<class_stmt>SelfAttentionVarSizedElementReduce(AbstractVarSizedElementReduce)<block_start><def_stmt>__init__ self input_representation_size:int hidden_size:int output_representation_size:int query_representation_summarizer:AbstractVarSizedElementReduce <block_start>super().__init__()<line_sep>self.__query_layer=query_representation_summarizer<line_sep>self.__key_layer=nn.Linear(input_representation_size hidden_size bias=<false>)<line_sep>self.__output_layer=nn.Linear(input_representation_size output_representation_size bias=<false>)<block_end><def_stmt>forward self inputs:ElementsToSummaryRepresentationInput<arrow>torch.Tensor<block_start>queries=self.__query_layer(inputs)# [num_samples, H]
queries_all=queries[inputs.element_to_sample_map]# [num_elements, H]
keys=self.__key_layer(inputs.element_embeddings)# [num_elements, H]
attention_scores=torch.einsum("vh,vh->v" queries_all keys)# [num_elements]
attention_probs=torch.exp(scatter_log_softmax(attention_scores index=inputs.element_to_sample_map dim=0 eps=0))<line_sep># [num_elements]
<return>scatter_sum(self.__output_layer(inputs.element_embeddings)<times>attention_probs.unsqueeze(-1) index=inputs.element_to_sample_map dim=0 dim_size=inputs.num_samples )<block_end><block_end># [num_samples, D']
<class_stmt>MultiheadSelfAttentionVarSizedElementReduce(AbstractVarSizedElementReduce)<block_start><def_stmt>__init__ self input_representation_size:int hidden_size:int output_representation_size:int num_heads:int query_representation_summarizer:AbstractVarSizedElementReduce use_value_layer:bool=<false> <block_start>super().__init__()<line_sep>self.__query_layer=query_representation_summarizer<line_sep>self.__key_layer=nn.Linear(input_representation_size hidden_size bias=<false>)<assert_stmt>hidden_size%num_heads<eq>0 "Hidden size must be divisible by the number of heads."<line_sep>self.__use_value_layer=use_value_layer<if_stmt>use_value_layer<block_start>self.__value_layer=nn.Linear(input_representation_size hidden_size bias=<false>)<line_sep>self.__output_layer=nn.Linear(hidden_size output_representation_size bias=<false>)<block_end><else_stmt><block_start>self.__output_layer=nn.Linear(input_representation_size<times>num_heads output_representation_size bias=<false>)<block_end>self.__num_heads=num_heads<block_end><def_stmt>forward self inputs:ElementsToSummaryRepresentationInput<arrow>torch.Tensor<block_start>queries=self.__query_layer(inputs)# [num_samples, H]
queries_per_element=queries[inputs.element_to_sample_map]# [num_elements, H]
queries_per_element=queries_per_element.reshape((queries_per_element.shape[0] self.__num_heads queries_per_element.shape[1]<floordiv>self.__num_heads ))<line_sep>keys=self.__key_layer(inputs.element_embeddings)# [num_elements, H]
keys=keys.reshape((keys.shape[0] self.__num_heads keys.shape[1]<floordiv>self.__num_heads))<line_sep>attention_scores=torch.einsum("bhk,bhk->bh" queries_per_element keys)/sqrt(keys.shape[-1])<line_sep># [num_elements, num_heads]
attention_probs=torch.exp(scatter_log_softmax(attention_scores index=inputs.element_to_sample_map dim=0 eps=0))<line_sep># [num_elements, num_heads]
<if_stmt>self.__use_value_layer<block_start>values=self.__value_layer(inputs.element_embeddings)# [num_elements, hidden_size]
values=values.reshape((values.shape[0] self.__num_heads values.shape[1]<floordiv>self.__num_heads))<line_sep>outputs=attention_probs.unsqueeze(-1)<times>values<block_end><else_stmt><block_start>outputs=attention_probs.unsqueeze(-1)<times>inputs.element_embeddings.unsqueeze(1)<block_end># [num_elements, num_heads, D']
outputs=outputs.reshape((outputs.shape[0] -1))# [num_elements, num_heads * D']
per_sample_outputs=scatter_sum(outputs index=inputs.element_to_sample_map dim=0 dim_size=inputs.num_samples)<line_sep># [num_samples, num_heads, D']
<return>self.__output_layer(per_sample_outputs)<block_end><block_end># [num_samples, D']
|
<import_from_stmt>.base TestCase<import_stmt>os<import_from_stmt>os.path join isdir<import_stmt>rrdtool<import_stmt>shutil<import_stmt>six<import_stmt>time<import_from_stmt>django.conf settings<import_from_stmt>graphite.readers RRDReader<import_from_stmt>graphite.wsgi application# NOQA makes sure we have a working WSGI app
<class_stmt>RRDReaderTests(TestCase)<block_start>test_dir=join(settings.RRD_DIR)<line_sep>start_ts=0<line_sep>step=60<line_sep>points=100<line_sep># Create/wipe test whisper files
hostcpu=os.path.join(test_dir 'hosts/worker1/cpu.rrd')<line_sep># TODO Fix this!
<def_stmt>create_rrd self<block_start><if_stmt><not>isdir(self.test_dir)<block_start>os.makedirs(self.test_dir)<block_end><try_stmt><block_start>os.makedirs(self.hostcpu.replace('cpu.rrd' ''))<block_end><except_stmt>OSError<block_start><pass><block_end>self.start_ts=int(time.time())<line_sep>rrdtool.create(self.hostcpu '--start' str(self.start_ts) '--step' str(self.step) 'RRA:AVERAGE:0.5:1:{}'.format(self.points) 'DS:cpu:GAUGE:60:U:U')<block_end><def_stmt>wipe_rrd self<block_start><try_stmt><block_start>shutil.rmtree(self.test_dir)<block_end><except_stmt>OSError<block_start><pass><block_end><block_end># Confirm the reader object is not none
<def_stmt>test_RRDReader_init self<block_start>self.create_rrd()<line_sep>self.addCleanup(self.wipe_rrd)<line_sep>reader=RRDReader(self.hostcpu 'cpu')<line_sep>self.assertIsNotNone(reader)<block_end># must return a 'str' object on both py2 and py3 independent of the type of
# the argument (which is a 'unicode' on py2)
<def_stmt>test_RRDReader_convert_fs_path self<block_start>path=RRDReader._convert_fs_path(six.u(self.hostcpu))<line_sep>self.assertIsInstance(path str)<block_end># Confirm the intervals
<def_stmt>test_RRDReader_get_intervals self<block_start>self.create_rrd()<line_sep>self.addCleanup(self.wipe_rrd)<line_sep>reader=RRDReader(self.hostcpu 'cpu')<line_sep># Intervals are calculated on the actual time so tolerate a 2 second
# deviation for delays caused between file creation and test.
<for_stmt>interval reader.get_intervals()<block_start>self.assertAlmostEqual(interval.start self.start_ts-self.points<times>self.step delta=2)<line_sep>self.assertAlmostEqual(interval.end self.start_ts delta=2)<block_end><block_end># Confirm fetch works.
<def_stmt>test_RRDReader_fetch self<block_start>self.create_rrd()<line_sep>self.addCleanup(self.wipe_rrd)<line_sep># insert some data
<for_stmt>ts range(self.start_ts+60 self.start_ts+10<times>self.step self.step)<block_start>rrdtool.update(self.hostcpu '{}:42'.format(ts))<block_end>reader=RRDReader(self.hostcpu 'cpu')<line_sep>(time_info values)=reader.fetch(self.start_ts+self.step self.start_ts+self.step<times>2)<line_sep>self.assertEqual(list(values) [42.0])<block_end><def_stmt>test_RRDReader_get_datasources self<block_start>self.create_rrd()<line_sep>self.addCleanup(self.wipe_rrd)<line_sep>datasource=RRDReader.get_datasources(self.hostcpu)<line_sep>self.assertEqual(datasource ['cpu'])<block_end><def_stmt>test_RRDReader_get_retention self<block_start>self.create_rrd()<line_sep>self.addCleanup(self.wipe_rrd)<line_sep>retentions=RRDReader.get_retention(self.hostcpu)<line_sep>self.assertEqual(retentions self.points<times>self.step)<block_end><block_end>
|
<import_stmt>streamlit<as>st<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_from_stmt>st_aggrid AgGrid DataReturnMode GridUpdateMode GridOptionsBuilder<line_sep>@st.cache()<def_stmt>get_data_ex4 <block_start>df=pd.DataFrame(np.random.randint(0 100 50).reshape(-1 5) columns=list("abcde"))<line_sep><return>df<block_end>df=get_data_ex4()<line_sep>st.markdown("""
### Two grids
As in other streamlit components, it is possible to render two components for the same data using distinct ```key``` parameters.
""")<line_sep>st.subheader("Input data")<line_sep>st.dataframe(df)<line_sep>st.subheader("Editable Grids")<line_sep>c1,c2=st.beta_columns(2)<with_stmt>c1<block_start>grid_return1=AgGrid(df key='grid1' editable=<true>)<line_sep>st.text("Grid 1 Return")<line_sep>st.write(grid_return1['data'])<block_end><with_stmt>c2<block_start>grid_return2=AgGrid(df key='grid2' editable=<true>)<line_sep>st.text("Grid 2 Return")<line_sep>st.write(grid_return2['data'])<block_end>
|
# Copyright (c) 2021 Graphcore Ltd. All rights reserved.
<import_from_stmt>filelock FileLock<import_from_stmt>contextlib contextmanager<line_sep>@contextmanager<def_stmt>lock lock_path<block_start><with_stmt>FileLock(lock_path)<block_start><yield><block_end><block_end>
|
# flake8: noqa
<import_from_stmt>samtranslator.model.resource_policies ResourcePolicies PolicyTypes<line_sep>
|
<import_stmt>numpy<as>np<import_from_stmt>scipy.optimize _lbfgsb<def_stmt>objfun x<block_start>"""simplified objective func to test lbfgsb bound violation"""<line_sep>x0=[0.8750000000000278 0.7500000000000153 0.9499999999999722 0.8214285714285992 0.6363636363636085]<line_sep>x1=[1.0 0.0 1.0 0.0 0.0]<line_sep>x2=[1.0 0.0 0.9889733043149325 0.0 0.026353554421041155]<line_sep>x3=[1.0 0.0 0.9889917442915558 0.0 0.020341986743231205]<line_sep>f0=5163.647901211178<line_sep>f1=5149.8181642072905<line_sep>f2=5149.379332309634<line_sep>f3=5149.374490771297<line_sep>g0=np.array([-0.5934820547965749 1.6251549718258351 -71.99168459202559 5.346636965797545 37.10732723092604])<line_sep>g1=np.array([-0.43295349282641515 1.008607936794592 18.223666726602975 31.927010036981997 -19.667512518739386])<line_sep>g2=np.array([-0.4699874455100256 0.9466285353668347 -0.016874360242016825 48.44999161133457 5.819631620590712])<line_sep>g3=np.array([-0.46970678696829116 0.9612719312174818 0.006129809488833699 48.43557729419473 6.005481418498221])<if_stmt>np.allclose(x x0)<block_start>f=f0<line_sep>g=g0<block_end><elif_stmt>np.allclose(x x1)<block_start>f=f1<line_sep>g=g1<block_end><elif_stmt>np.allclose(x x2)<block_start>f=f2<line_sep>g=g2<block_end><elif_stmt>np.allclose(x x3)<block_start>f=f3<line_sep>g=g3<block_end><else_stmt><block_start><raise>ValueError('Simplified objective function not defined '<concat>'at requested point')<block_end><return>(np.copy(f) np.copy(g))<block_end><def_stmt>test_setulb_floatround <block_start>"""test if setulb() violates bounds
checks for violation due to floating point rounding error
"""<line_sep>n=5<line_sep>m=10<line_sep>factr=1e7<line_sep>pgtol=1e-5<line_sep>maxls=20<line_sep>iprint=-1<line_sep>nbd=np.full((n ) 2)<line_sep>low_bnd=np.zeros(n np.float64)<line_sep>upper_bnd=np.ones(n np.float64)<line_sep>x0=np.array([0.8750000000000278 0.7500000000000153 0.9499999999999722 0.8214285714285992 0.6363636363636085])<line_sep>x=np.copy(x0)<line_sep>f=np.array(0.0 np.float64)<line_sep>g=np.zeros(n np.float64)<line_sep>fortran_int=_lbfgsb.types.intvar.dtype<line_sep>wa=np.zeros(2<times>m<times>n+5<times>n+11<times>m<times>m+8<times>m np.float64)<line_sep>iwa=np.zeros(3<times>n fortran_int)<line_sep>task=np.zeros(1 'S60')<line_sep>csave=np.zeros(1 'S60')<line_sep>lsave=np.zeros(4 fortran_int)<line_sep>isave=np.zeros(44 fortran_int)<line_sep>dsave=np.zeros(29 np.float64)<line_sep>task[:]=b'START'<for_stmt>n_iter range(7)# 7 steps required to reproduce error
<block_start>f,g=objfun(x)<line_sep>_lbfgsb.setulb(m x low_bnd upper_bnd nbd f g factr pgtol wa iwa task iprint csave lsave isave dsave maxls)<assert_stmt>(x<le>upper_bnd).all()<and>(x<ge>low_bnd).all() ("_lbfgsb.setulb() stepped to a point outside of the bounds")<block_end><block_end>
|
"""
Most of the stuff from the stdlib will be tested via test_parser3. That
will mostly test if the implemenation is correct. This module does some
meta tests.
"""<import_stmt>sys<import_from_stmt>pscript.testing run_tests_if_main raises<import_from_stmt>pscript py2js evaljs evalpy Parser3 stdlib<def_stmt>test_stdlib_full_and_partial <block_start>code=stdlib.get_full_std_lib()<assert_stmt>isinstance(code str)<assert_stmt>'var %shasattr ='%stdlib.FUNCTION_PREFIX<in>code<assert_stmt>'var %slist ='%stdlib.FUNCTION_PREFIX<in>code<assert_stmt>code.count('var')<g>10<line_sep>code=stdlib.get_partial_std_lib(['hasattr'] [] [])<assert_stmt>isinstance(code str)<assert_stmt>'var %shasattr ='%stdlib.FUNCTION_PREFIX<in>code<assert_stmt>'var %slist ='%stdlib.FUNCTION_PREFIX<not><in>code<assert_stmt>code.count('var')<eq>1<assert_stmt>'_hasattr = function'<in>py2js('hasattr(x, "foo")')<assert_stmt>'_hasattr = function'<not><in>py2js('hasattr(x, "foo")' inline_stdlib=<false>)<block_end><def_stmt>test_stdlib_has_all_list_methods <block_start>method_names=[m<for>m dir(list)<if><not>m.startswith('_')]<for_stmt>method_name method_names<block_start><assert_stmt>method_name<in>stdlib.METHODS<block_end><block_end><def_stmt>test_stdlib_has_all_dict_methods <block_start>method_names=[m<for>m dir(dict)<if><not>m.startswith('_')]<if_stmt>sys.version_info[0]<eq>2<block_start>ignore='fromkeys has_key viewitems viewkeys viewvalues iteritems iterkeys itervalues'<block_end><else_stmt><block_start>ignore='fromkeys'<block_end><for_stmt>name ignore.split(' ')<block_start>method_names.remove(name)<block_end><for_stmt>method_name method_names<block_start><assert_stmt>method_name<in>stdlib.METHODS<block_end><block_end><def_stmt>test_stdlib_has_all_str_methods <block_start>method_names=[m<for>m dir(str)<if><not>m.startswith('_')]<if_stmt>sys.version_info[0]<eq>2<block_start>ignore='encode decode'<block_end><else_stmt><block_start>ignore='encode format_map isprintable maketrans isascii removeprefix removesuffix'<block_end><for_stmt>name ignore.split(' ')<block_start><if_stmt>name<in>method_names<block_start>method_names.remove(name)<block_end><block_end><for_stmt>method_name method_names<block_start><assert_stmt>method_name<in>stdlib.METHODS<block_end><block_end>run_tests_if_main()<line_sep>
|
<import_stmt>os<import_stmt>sys<import_stmt>ptvsd.ipcjson<as>_ipc<class_stmt>SocketIpcChannel(_ipc.SocketIO _ipc.IpcChannel)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super(SocketIpcChannel self).__init__(*args **kwargs)<block_end><def_stmt>on_testRequest self request args<block_start>self.send_response(request success=<true> message='' requestText=args['dataText'] responseText='test response text')<block_end><def_stmt>on_disconnect self request args<block_start>self.send_response(request)<line_sep>self.__exit=<true><block_end><block_end><def_stmt>main <block_start><import_from_stmt>optparse OptionParser<line_sep>parser=OptionParser()<line_sep>parser.add_option('-r' '--result-port' type='int')<line_sep>(opts _)=parser.parse_args()<line_sep>channel=SocketIpcChannel(port=opts.result_port)<line_sep>channel.process_messages()<line_sep>channel.close()<line_sep>sys.exit(0)<block_end><if_stmt>__name__<eq>'__main__'<block_start>main()<block_end>
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<line_sep>process=cms.Process("AlCaHBHEMuon")<line_sep>process.load('Configuration.StandardSequences.Services_cff')<line_sep>process.load('FWCore.MessageService.MessageLogger_cfi')<line_sep>process.load("Configuration.StandardSequences.GeometryRecoDB_cff")<line_sep>process.load("Configuration.StandardSequences.MagneticField_cff")<line_sep>process.load('Configuration.StandardSequences.FrontierConditions_GlobalTag_cff')<import_from_stmt>Configuration.AlCa.autoCond autoCond<line_sep>process.GlobalTag.globaltag=autoCond['run2_data']<line_sep>process.load("Calibration.HcalAlCaRecoProducers.alcahbhemuon_cfi")<line_sep>process.maxEvents=cms.untracked.PSet(input=cms.untracked.int32(-1))<line_sep>process.source=cms.Source("PoolSource" # replace 'myfile.root' with the source file you want to use
fileNames=cms.untracked.vstring('file:/afs/cern.ch/user/a/amkalsi/public/RecoFileForAlcaProducer.root'# 'root://xrootd.unl.edu//store/mc/Phys14DR/DYToMuMu_M-50_Tune4C_13TeV-pythia8/GEN-SIM-RECO/PU20bx25_tsg_castor_PHYS14_25_V1-v1/10000/184C1AC9-A775-E411-9196-002590200824.root'
))<line_sep>process.load("Calibration.HcalAlCaRecoProducers.ALCARECOHcalHBHEMuon_Output_cff")<line_sep>process.muonOutput=cms.OutputModule("PoolOutputModule" outputCommands=process.OutALCARECOHcalHBHEMuon.outputCommands fileName=cms.untracked.string('PoolOutput.root') )<line_sep>process.p=cms.Path(process.HBHEMuonProd)<line_sep>process.e=cms.EndPath(process.muonOutput)<line_sep>
|
<import_stmt>datetime<import_stmt>logging<import_from_stmt>dbnd._core.utils.project.project_fs abs_join relative_path<import_from_stmt>dbnd_test_scenarios.utils.data_chaos_monkey.client_scoring_chaos is_chaos_column_10 <import_from_stmt>targets target<line_sep>logger=logging.getLogger(__name__)<line_sep>_PLUGIN_ROOT=relative_path(__file__ ".." "..")<line_sep>_PLUGIN_SRC_ROOT=relative_path(__file__)<def_stmt>scenario_root_path *path<block_start><return>abs_join(_PLUGIN_ROOT *path)<block_end><def_stmt>scenario_src_path *path<block_start><return>abs_join(_PLUGIN_SRC_ROOT *path)<block_end><def_stmt>test_scenario_path *path<block_start><return>scenario_root_path("scenarios" *path)<block_end><def_stmt>test_scenario_target *path<block_start><return>target(test_scenario_path(*path))<block_end><def_stmt>scenario_data_path *path<block_start><return>scenario_root_path("data" *path)<block_end><def_stmt>scenario_data_target *path<block_start><return>target(scenario_data_path(*path))<block_end><def_stmt>scenario_pyspark_path *path<block_start><return>scenario_src_path("spark" "pyspark_scripts" *path)<block_end><class_stmt>_Scenarios(object)<block_start><pass><block_end><class_stmt>_ScenariosClientScoringData(object)<block_start>p_g_ingest_data=scenario_data_target("client_scoring/p_g_ready_for_ingest.csv")<line_sep>p_g_ingest_data__no_col_10=scenario_data_target("client_scoring/p_g_ready_for_ingest__no_col_10.csv")<line_sep>p_g_train_data=scenario_data_target("client_scoring/p_g_ready_for_train.csv")<line_sep>p_a_master_data_bad=scenario_data_target("client_scoring/p_a_master_data_bad.csv")<line_sep>partners=["autolab" "picsdata" "myp"]<line_sep>partners_big=["autobig" "picsbig"]<def_stmt>get_ingest_data self partner target_date_str<block_start>target_date=datetime.datetime.strptime(target_date_str "%Y-%m-%d").date()<if_stmt>is_chaos_column_10(partner target_date)<block_start><return>self.p_g_ingest_data__no_col_10<block_end><return>self.p_g_ingest_data<block_end><block_end>scenarios=_Scenarios()<line_sep>client_scoring_data=_ScenariosClientScoringData()<line_sep>
|
# -*- coding:utf-8 -*-
<import_from_stmt>models.slimmable.us_resnet us_resnet18 us_resnet50<import_from_stmt>models.slimmable.us_mobilenet us_mobilenet_v2<line_sep>
|
# -*- coding: utf-8 -*-
<import_from_stmt>datetime date<import_from_stmt>datetime datetime<import_stmt>six<import_from_stmt>mock patch<import_from_stmt>bravado_core.formatter SwaggerFormat<import_from_stmt>bravado_core.formatter to_python<import_from_stmt>bravado_core.spec Spec<if_stmt><not>six.PY2<block_start>long=int<block_end><def_stmt>test_none minimal_swagger_spec<block_start>string_spec={'type':'string' 'format':'date'}<assert_stmt>to_python(minimal_swagger_spec string_spec <none>)<is><none><block_end><def_stmt>test_no_format_returns_value minimal_swagger_spec<block_start>string_spec={'type':'string'}<assert_stmt>'boo'<eq>to_python(minimal_swagger_spec string_spec 'boo')<block_end><def_stmt>test_date minimal_swagger_spec<block_start>string_spec={'type':'string' 'format':'date'}<assert_stmt>date(2015 4 1)<eq>to_python(minimal_swagger_spec string_spec '2015-04-01' )<block_end><def_stmt>test_datetime minimal_swagger_spec<block_start>string_spec={'type':'string' 'format':'date-time'}<line_sep>result=to_python(minimal_swagger_spec string_spec '2015-03-22T13:19:54' )<assert_stmt>datetime(2015 3 22 13 19 54)<eq>result<block_end>@patch('bravado_core.spec.warnings.warn')<def_stmt>test_no_registered_format_returns_value_as_is_and_issues_warning mock_warn minimal_swagger_spec<block_start>string_spec={'type':'string' 'format':'bar'}<assert_stmt>'baz'<eq>to_python(minimal_swagger_spec string_spec 'baz')<assert_stmt>mock_warn.call_count<eq>1<block_end><def_stmt>test_int64_long minimal_swagger_spec<block_start>integer_spec={'type':'integer' 'format':'int64'}<line_sep>result=to_python(minimal_swagger_spec integer_spec long(999))<assert_stmt>long(999)<eq>result<block_end><def_stmt>test_int64_int minimal_swagger_spec<block_start>integer_spec={'type':'integer' 'format':'int64'}<line_sep>result=to_python(minimal_swagger_spec integer_spec 999)<assert_stmt>long(999)<eq>result<assert_stmt>isinstance(result long)<block_end><def_stmt>test_int32_long minimal_swagger_spec<block_start>integer_spec={'type':'integer' 'format':'int32'}<line_sep>result=to_python(minimal_swagger_spec integer_spec long(999))<assert_stmt>999<eq>result<assert_stmt>isinstance(result int)<block_end><def_stmt>test_int32_int minimal_swagger_spec<block_start>integer_spec={'type':'integer' 'format':'int32'}<line_sep>result=to_python(minimal_swagger_spec integer_spec 999)<assert_stmt>999<eq>result<assert_stmt>isinstance(result int)<block_end><def_stmt>test_float minimal_swagger_spec<block_start>float_spec={'type':'number' 'format':'float'}<line_sep>result=to_python(minimal_swagger_spec float_spec float(3.14))<assert_stmt>3.14<eq>result<assert_stmt>isinstance(result float)<block_end><def_stmt>test_double minimal_swagger_spec<block_start>double_spec={'type':'number' 'format':'double'}<line_sep>result=to_python(minimal_swagger_spec double_spec float(3.14))<assert_stmt>3.14<eq>result<assert_stmt>isinstance(result float)<block_end><def_stmt>test_byte minimal_swagger_spec<block_start>byte_spec={'type':'string' 'format':'byte'}<line_sep>result=to_python(minimal_swagger_spec byte_spec 'x')<assert_stmt>'x'<eq>result<assert_stmt>isinstance(result str)<block_end><def_stmt>test_byte_base64 minimal_swagger_dict<block_start>swagger_spec=Spec.from_dict(minimal_swagger_dict config={'use_base64_for_byte_format':<true>} )<line_sep>schema={'type':'string' 'format':'byte'}<line_sep>result=to_python(swagger_spec schema 'YWJj/w==')<assert_stmt>b'abc\xff'<eq>result<assert_stmt>isinstance(result bytes)<block_end><def_stmt>test_ref minimal_swagger_dict<block_start>minimal_swagger_dict['definitions']['Int32']={'type':'integer' 'format':'int32' }<line_sep>int_ref_spec={'$ref':'#/definitions/Int32'}<line_sep>swagger_spec=Spec.from_dict(minimal_swagger_dict)<line_sep>result=to_python(swagger_spec int_ref_spec 999)<assert_stmt>999<eq>result<assert_stmt>isinstance(result int)<block_end><def_stmt>test_override minimal_swagger_dict<block_start><class_stmt>Byte(object)<block_start><def_stmt>__init__ self x<block_start>self.x=x<block_end><def_stmt>__str__ self<block_start><return>str(self.x)<block_end><def_stmt>__repr__ self<block_start><return>'%s(%r)'%(self.__class__ self.x)<block_end><block_end>byteformat=SwaggerFormat(format='byte' to_wire=<lambda>x:str(x) to_python=<lambda>x:Byte(x) validate=<lambda>x:isinstance(x str) description=<none> )<line_sep>number_spec={'type':'string' 'format':'byte'}<line_sep>swagger_spec=Spec.from_dict(minimal_swagger_dict config={'formats':[byteformat]})<line_sep>result=to_python(swagger_spec number_spec '8bits')<assert_stmt>'8bits'<eq>str(result)<assert_stmt>repr(Byte('8bits'))<eq>repr(result)<assert_stmt>type(result)<is>Byte<block_end>
|
<import_stmt>numpy<as>np<import_from_stmt>sklearn datasets<import_from_stmt>sklearn.decomposition PCA<line_sep>np.random.seed(0)<line_sep># import some data to play with
iris_X,iris_y=datasets.load_iris(return_X_y=<true>)<line_sep>indices=np.random.permutation(len(iris_X))<line_sep>iris_X_train=iris_X[indices[:-10]]<line_sep>iris_y_train=iris_y[indices[:-10]]<line_sep># Create and fit a nearest-neighbor classifier
<import_from_stmt>sklearn.neighbors KNeighborsClassifier<line_sep>knn=KNeighborsClassifier()<line_sep>knn.fit(iris_X_train iris_y_train)<import_from_stmt>sklearn.externals joblib<line_sep>joblib.dump(knn "knn.pkl")<line_sep>
|
#
# Copyright SAS Institute
#
# Licensed under the Apache License, Version 2.0 (the License);
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
<import_from_stmt>pygments.lexer RegexLexer<import_stmt>pygments.token<import_from_stmt>pygments.style Style<class_stmt>SASLogStyle(Style)<block_start>default_style=""<line_sep>styles={pygments.token.Comment:'#0000FF' pygments.token.Keyword:'bold #ff0000' pygments.token.Name:'#008000' pygments.token.String:'#111'}<block_end><class_stmt>SASLogLexer(RegexLexer)<block_start>__all__=['SASLogLexer']<line_sep>name='Lexer to Color SAS Logs equivalent to DMS'<line_sep>tokens={'root':[(r'^\d+.*((\n|\t|\n\t)[ ]([^WEN].*)(.*))*' pygments.token.String) (r'^NOTE.*((\n|\t|\n\t)[ ]([^WEN].*)(.*))*' pygments.token.Comment.Multiline 'note') (r'^ERROR.*((\n|\t|\n\t)[ ]([^WEN].*)(.*))*' pygments.token.Keyword.Multiline 'error') (r'^WARNING.*((\n|\t|\n\t)[ ]([^WEN].*)(.*))*' pygments.token.Name.Multiline 'warning') (r'\s' pygments.token.Text)] 'error':[(r'^\s+.*$' pygments.token.Keyword.Multiline) (r'^\S+.*$' pygments.token.Keyword.Multiline '#pop')] 'note':[(r'^\s+.*$' pygments.token.Comment.Multiline) (r'^\S+.*$' pygments.token.Comment.Multiline '#pop')] 'warning':[(r'^\s+.*$' pygments.token.Name.Multiline) (r'^\S+.*$' pygments.token.Name.Multiline '#pop')]}<block_end>
|
#! /usr/bin/env python3
<import_stmt>os<import_from_stmt>setuptools setup find_packages<import_stmt>subprocess<import_stmt>shutil<class_stmt>InvalidSetupError(Exception)<block_start><pass><block_end><def_stmt>create_mo_files <block_start>"""Converts .po templates to readble .mo files using msgfmt."""<line_sep># Avoids this code running on read the docs, since gettext is not installed
# there
<if_stmt>os.environ.get("READTHEDOCS")<eq>"True"<block_start><return>[]<block_end><if_stmt>shutil.which("msgfmt")<is><none># If gettext isn't installed, skip this
<block_start><raise>InvalidSetupError("gettext not installed but is required.")<block_end>localedir='src/weresync/resources/locale'<line_sep>po_dirs=[]<line_sep>langs=next(os.walk(localedir))[1]<line_sep>po_dirs=[localedir+'/'+l+'/LC_MESSAGES/'<for>l langs]<for_stmt>d po_dirs<block_start>po_files=[f<for>f next(os.walk(d))[2]<if>os.path.splitext(f)[1]<eq>'.po']<for_stmt>po_file po_files<block_start>filename,extension=os.path.splitext(po_file)<line_sep>mo_file=filename+'.mo'<line_sep>msgfmt_cmd='msgfmt {} -o {}'.format(d+po_file d+mo_file)<line_sep>subprocess.call(msgfmt_cmd shell=<true>)<block_end><block_end><return>["locale/"+l+"/LC_MESSAGES/*.mo"<for>l langs]<block_end><def_stmt>read fname<block_start><with_stmt>open(os.path.join(os.path.dirname(__file__) fname))<as>file<block_start><return>file.read()<block_end><block_end>target_icon_loc="share/icons/hicolor/scalable/apps"<if_stmt>os.getuid()<eq>0# Install is running as root
<block_start>target_icon_loc="/usr/"+target_icon_loc<block_end><if_stmt>__name__<eq>"__main__"<block_start>setup(name="WereSync" version="1.1.5" package_dir={"":"src"} packages=find_packages("src") install_requires=["parse==1.6.6" "yapsy==1.11.223" "pydbus==0.6.0"] entry_points={'console_scripts':["weresync = weresync.interface.cli:main" "weresync-daemon = weresync.daemon.daemon:run"] 'gui_scripts':["weresync-gui = weresync.interface.gui:start_gui"]} package_data={"weresync.resources":["*.svg" "*.png" "weresync*.*"]+create_mo_files()} data_files=[(target_icon_loc ["src/weresync/resources/weresync.svg"])] # Metadata
author="<NAME>" author_email="<EMAIL>" description="Incrementally clones Linux drives" long_description=read("README.rst") license="Apache 2.0" keywords="clone, linux, backup, smaller drive" url="https://github.com/DonyorM/weresync" )<block_end>
|
<import_stmt>os<import_stmt>pdb<import_stmt>os.path<as>osp<import_stmt>sys<line_sep>sys.path[0]=os.getcwd()<import_stmt>cv2<import_stmt>copy<import_stmt>json<import_stmt>yaml<import_stmt>logging<import_stmt>argparse<import_from_stmt>tqdm tqdm<import_from_stmt>itertools groupby<import_stmt>pycocotools.mask<as>mask_utils<import_stmt>numpy<as>np<import_stmt>torch<import_from_stmt>torchvision.transforms transforms<as>T<import_from_stmt>utils.log logger<import_from_stmt>utils.meter Timer<import_from_stmt>utils.mask pts2array<import_stmt>data.video<as>videodataset<import_from_stmt>utils visualize<as>vis<import_from_stmt>utils.io mkdir_if_missing<import_from_stmt>core.association matching<import_from_stmt>tracker.mot.pose PoseAssociationTracker<def_stmt>identical a b<block_start><if_stmt>len(a)<eq>len(b)<block_start>arra=pts2array(a)<line_sep>arrb=pts2array(b)<if_stmt>np.abs(arra-arrb).sum()<l>1e-2<block_start><return><true><block_end><block_end><return><false><block_end><def_stmt>fuse_result res jpath<block_start><with_stmt>open(jpath 'r')<as>f<block_start>obsj=json.load(f)<block_end>obsj_fused=copy.deepcopy(obsj)<for_stmt>t,inpj enumerate(obsj['annolist'])<block_start>skltns,ids=res[t][2] res[t][3]<line_sep>nobj_ori=len(obsj['annolist'][t]['annorect'])<for_stmt>i range(nobj_ori)<block_start>obsj_fused['annolist'][t]['annorect'][i]['track_id']=[1000]<for_stmt>j,skltn enumerate(skltns)<block_start>match=identical(obsj['annolist'][t]['annorect'][i]['annopoints'][0]['point'] skltn)<if_stmt>match<block_start>obsj_fused['annolist'][t]['annorect'][i]['track_id']=[ids[j] ]<block_end><block_end><block_end><block_end><return>obsj_fused<block_end><def_stmt>eval_seq opt dataloader save_dir=<none><block_start><if_stmt>save_dir<block_start>mkdir_if_missing(save_dir)<block_end>tracker=PoseAssociationTracker(opt)<line_sep>timer=Timer()<line_sep>results=[]<for_stmt>frame_id,(img obs img0 _) enumerate(dataloader)# run tracking
<block_start>timer.tic()<line_sep>online_targets=tracker.update(img img0 obs)<line_sep>online_tlwhs=[]<line_sep>online_ids=[]<line_sep>online_poses=[]<for_stmt>t online_targets<block_start>tlwh=t.tlwh<line_sep>tid=t.track_id<line_sep>online_tlwhs.append(tlwh)<line_sep>online_ids.append(tid)<line_sep>online_poses.append(t.pose)<block_end>timer.toc()<line_sep># save results
results.append((frame_id+1 online_tlwhs online_poses online_ids))<if_stmt>save_dir<is><not><none><block_start>online_im=vis.plot_tracking(img0 online_tlwhs online_ids frame_id=frame_id fps=1./timer.average_time)<block_end><if_stmt>save_dir<is><not><none><block_start>cv2.imwrite(os.path.join(save_dir '{:05d}.jpg'.format(frame_id)) online_im)<block_end><block_end><return>results timer.average_time timer.calls<block_end><def_stmt>main opt<block_start>logger.setLevel(logging.INFO)<line_sep>result_root=opt.out_root<line_sep>result_json_root=osp.join(result_root 'json')<line_sep>mkdir_if_missing(result_json_root)<line_sep>transforms=T.Compose([T.ToTensor() T.Normalize(opt.im_mean opt.im_std)])<line_sep>obs_root=osp.join(opt.data_root 'obs' opt.split opt.obid)<line_sep>obs_jpaths=[osp.join(obs_root o)<for>o os.listdir(obs_root)]<line_sep>obs_jpaths=sorted([o<for>o obs_jpaths<if>o.endswith('.json')])<line_sep># run tracking
accs=[]<line_sep>timer_avgs,timer_calls=[] []<for_stmt>i,obs_jpath enumerate(obs_jpaths)<block_start>seqname=obs_jpath.split('/')[-1].split('.')[0]<line_sep>output_dir=osp.join(result_root 'frame' seqname)<line_sep>dataloader=videodataset.LoadImagesAndPoseObs(obs_jpath opt)<line_sep>seq_res,ta,tc=eval_seq(opt dataloader save_dir=output_dir)<line_sep>seq_json=fuse_result(seq_res obs_jpath)<with_stmt>open(osp.join(result_json_root "{}.json".format(seqname)) 'w')<as>f<block_start>json.dump(seq_json f)<block_end>timer_avgs.append(ta)<line_sep>timer_calls.append(tc)<line_sep># eval
logger.info('Evaluate seq: {}'.format(seqname))<if_stmt>opt.save_videos<block_start>output_video_path=osp.join(output_dir '{}.mp4'.format(seqname))<line_sep>cmd_str='ffmpeg -f image2 -i {}/%05d.jpg -c:v copy {}'.format(output_dir output_video_path)<line_sep>os.system(cmd_str)<block_end><block_end>timer_avgs=np.asarray(timer_avgs)<line_sep>timer_calls=np.asarray(timer_calls)<line_sep>all_time=np.dot(timer_avgs timer_calls)<line_sep>avg_time=all_time/np.sum(timer_calls)<line_sep>logger.info('Time elapsed: {:.2f} seconds, FPS: {:.2f}'.format(all_time 1.0/avg_time))<line_sep>cmd_str=('python ./eval/poseval/evaluate.py --groundTruth={}/posetrack_data/annotations/{} '<concat>'--predictions={}/ --evalPoseTracking'.format(opt.data_root opt.split result_json_root))<line_sep>os.system(cmd_str)<block_end><if_stmt>__name__<eq>'__main__'<block_start>parser=argparse.ArgumentParser()<line_sep>parser.add_argument('--config' default='' required=<true> type=str)<line_sep>opt=parser.parse_args()<with_stmt>open(opt.config)<as>f<block_start>common_args=yaml.load(f)<block_end><for_stmt>k,v common_args['common'].items()<block_start>setattr(opt k v)<block_end><for_stmt>k,v common_args['posetrack'].items()<block_start>setattr(opt k v)<block_end>opt.out_root=osp.join('results/pose' opt.exp_name)<line_sep>opt.out_file=osp.join('results/pose' opt.exp_name+'.json')<line_sep>print(opt end='\n\n')<line_sep>main(opt)<block_end>
|
# -*- coding: utf-8 -*-
<import_stmt>logging<if_stmt>__name__<eq>'__main__'<block_start>logging.basicConfig()<block_end>_log=logging.getLogger(__name__)<import_stmt>unittest<import_stmt>pyxb.binding.datatypes<as>xsd<class_stmt>Test_anyURI(unittest.TestCase)<block_start><def_stmt>testRange self<block_start>self.fail("Datatype anyURI test not implemented")<block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
|
<import_stmt>networkx<as>nx<import_stmt>csv<import_from_stmt>scipy sparse<as>sp<import_from_stmt>scipy.sparse csgraph<import_stmt>scipy.sparse.linalg<as>splinalg<import_stmt>numpy<as>np<import_stmt>pandas<as>pd<import_stmt>warnings<import_stmt>collections<as>cole<import_from_stmt>.cpp *<import_stmt>random<import_stmt>gzip<import_stmt>bz2<import_stmt>lzma<import_stmt>multiprocessing<as>mp<import_stmt>matplotlib.pyplot<as>plt<import_from_stmt>mpl_toolkits.mplot3d Axes3D<import_from_stmt>matplotlib.collections LineCollection<import_from_stmt>mpl_toolkits.mplot3d.art3d Line3DCollection<import_from_stmt>matplotlib.colors to_rgb to_rgba<import_from_stmt>matplotlib.colors LinearSegmentedColormap<import_from_stmt>matplotlib.colors Normalize<import_from_stmt>matplotlib.cm ScalarMappable<import_from_stmt>collections defaultdict<import_from_stmt>.GraphDrawing GraphDrawing<def_stmt>_load_from_shared sabuf dtype shape<block_start><return>np.frombuffer(sabuf dtype=dtype).reshape(shape)<block_end>""" Create shared memory that can be passed to a child process,
wrapped in a numpy array."""<def_stmt>_copy_to_shared a# determine the numpy type of a.
<block_start>dtype=a.dtype<line_sep>shape=a.shape<line_sep>sabuf=mp.RawArray(ctypes.c_uint8 a.nbytes)<line_sep>sa=_load_from_shared(sabuf dtype shape)<line_sep>np.copyto(sa a)# make a copy
<return>sa (sabuf dtype shape)<block_end><class_stmt>GraphLocal<block_start>"""
This class implements graph loading from an edgelist, gml or graphml and provides methods that operate on the graph.
Attributes
----------
adjacency_matrix : scipy csr matrix
ai : numpy vector
CSC format index pointer array, its data type is determined by "itype" during initialization
aj : numpy vector
CSC format index array, its data type is determined by "vtype" during initialization
_num_vertices : int
Number of vertices
_num_edges : int
Number of edges
_weighted : boolean
Declares if it is a weighted graph or not
d : float64 numpy vector
Degrees vector
dn : float64 numpy vector
Component-wise reciprocal of degrees vector
d_sqrt : float64 numpy vector
Component-wise square root of degrees vector
dn_sqrt : float64 numpy vector
Component-wise reciprocal of sqaure root degrees vector
vol_G : float64 numpy vector
Volume of graph
components : list of sets
Each set contains the indices of a connected component of the graph
number_of_components : int
Number of connected components of the graph
bicomponents : list of sets
Each set contains the indices of a biconnected component of the graph
number_of_bicomponents : int
Number of connected components of the graph
core_numbers : dictionary
Core number for each vertex
Methods
-------
read_graph(filename, file_type='edgelist', separator='\t')
Reads the graph from a file
compute_statistics()
Computes statistics for the graph
connected_components()
Computes the connected components of the graph
is_disconnected()
Checks if graph is connected
biconnected_components():
Computes the biconnected components of the graph
core_number()
Returns the core number for each vertex
neighbors(vertex)
Returns a list with the neighbors of the given vertex
list_to_gl(source,target)
Create a GraphLocal object from edge list
"""<def_stmt>__init__ self filename=<none> file_type='edgelist' separator='\t' remove_whitespace=<false> header=<false> headerrow=<none> vtype=np.uint32 itype=np.uint32<block_start>"""
Initializes the graph from a gml or a edgelist file and initializes the attributes of the class.
Parameters
----------
See read_graph for a description of the parameters.
"""<if_stmt>filename<ne><none><block_start>self.read_graph(filename file_type=file_type separator=separator remove_whitespace=remove_whitespace header=header headerrow=headerrow vtype=vtype itype=itype)<block_end><block_end><def_stmt>__eq__ self other<block_start><if_stmt><not>isinstance(other GraphLocal)<block_start><return>NotImplemented<block_end><return>np.array_equal(self.ai other.ai)<and>np.array_equal(self.aj other.aj)<and>np.array_equal(self.adjacency_matrix.data other.adjacency_matrix.data)<block_end><def_stmt>read_graph self filename file_type='edgelist' separator='\t' remove_whitespace=<false> header=<false> headerrow=<none> vtype=np.uint32 itype=np.uint32<block_start>"""
Reads the graph from an edgelist, gml or graphml file and initializes the class attribute adjacency_matrix.
Parameters
----------
filename : string
Name of the file, for example 'JohnsHopkins.edgelist', 'JohnsHopkins.gml', 'JohnsHopkins.graphml'.
file_type : string
Type of file. Currently only 'edgelist', 'gml' and 'graphml' are supported.
Default = 'edgelist'
separator : string
used if file_type = 'edgelist'
Default = '\t'
remove_whitespace : bool
set it to be True when there is more than one kinds of separators in the file
Default = False
header : bool
This lets the first line of the file contain a set of heade
information that should be ignore_index
Default = False
headerrow : int
Use which row as column names. This argument takes precidence over
the header=True using headerrow = 0
Default = None
vtype
numpy integer type of CSC format index array
Default = np.uint32
itype
numpy integer type of CSC format index pointer array
Default = np.uint32
"""<if_stmt>file_type<eq>'edgelist'#dtype = {0:'int32', 1:'int32', 2:'float64'}
<block_start><if_stmt>header<and>headerrow<is><none><block_start>headerrow=0<block_end><if_stmt>remove_whitespace<block_start>df=pd.read_csv(filename header=headerrow delim_whitespace=remove_whitespace)<block_end><else_stmt><block_start>df=pd.read_csv(filename sep=separator header=headerrow delim_whitespace=remove_whitespace)<block_end>cols=[0 1 2]<if_stmt>header<ne><none><block_start>cols=list(df.columns)<block_end>source=df[cols[0]].values<line_sep>target=df[cols[1]].values<if_stmt>df.shape[1]<eq>2<block_start>weights=np.ones(source.shape[0])<block_end><elif_stmt>df.shape[1]<eq>3<block_start>weights=df[cols[2]].values<block_end><else_stmt><block_start><raise>Exception('GraphLocal.read_graph: df.shape[1] not in (2, 3)')<block_end>self._num_vertices=max(source.max()+1 target.max()+1)<line_sep>#self.adjacency_matrix = source, target, weights
self.adjacency_matrix=sp.csr_matrix((weights.astype(np.float64) (source target)) shape=(self._num_vertices self._num_vertices))<block_end><elif_stmt>file_type<eq>'gml'<block_start>warnings.warn("Loading a gml is not efficient, we suggest using an edgelist format for this API.")<line_sep>G=nx.read_gml(filename).to_undirected()<line_sep>self.adjacency_matrix=nx.adjacency_matrix(G).astype(np.float64)<line_sep>self._num_vertices=nx.number_of_nodes(G)<block_end><elif_stmt>file_type<eq>'graphml'<block_start>warnings.warn("Loading a graphml is not efficient, we suggest using an edgelist format for this API.")<line_sep>G=nx.read_graphml(filename).to_undirected()<line_sep>self.adjacency_matrix=nx.adjacency_matrix(G).astype(np.float64)<line_sep>self._num_vertices=nx.number_of_nodes(G)<block_end><else_stmt><block_start>print('This file type is not supported')<line_sep><return><block_end>self._weighted=<false><for_stmt>i self.adjacency_matrix.data<block_start><if_stmt>i<ne>1<block_start>self._weighted=<true><line_sep><break><block_end><block_end>is_symmetric=(self.adjacency_matrix<ne>self.adjacency_matrix.T).sum()<eq>0<if_stmt><not>is_symmetric# Symmetrize matrix, choosing larger weight
<block_start>sel=self.adjacency_matrix.T<g>self.adjacency_matrix<line_sep>self.adjacency_matrix=self.adjacency_matrix-self.adjacency_matrix.multiply(sel)+self.adjacency_matrix.T.multiply(sel)<assert_stmt>(self.adjacency_matrix<ne>self.adjacency_matrix.T).sum()<eq>0<block_end>self._num_edges=self.adjacency_matrix.nnz<line_sep>self.compute_statistics()<line_sep>self.ai=itype(self.adjacency_matrix.indptr)<line_sep>self.aj=vtype(self.adjacency_matrix.indices)<block_end>@classmethod<def_stmt>from_networkx cls G<block_start>"""
Create a GraphLocal object from a networkx graph.
Paramters
---------
G
The networkx graph.
"""<if_stmt>G.is_directed()<eq><true><block_start><raise>Exception("from_networkx requires an undirected graph, use G.to_undirected()")<block_end>rval=cls()<line_sep>rval.adjacency_matrix=nx.adjacency_matrix(G).astype(np.float64)<line_sep>rval._num_vertices=nx.number_of_nodes(G)<line_sep># TODO, use this in the read_graph
rval._weighted=<false><for_stmt>i rval.adjacency_matrix.data<block_start><if_stmt>i<ne>1<block_start>rval._weighted=<true><line_sep><break><block_end><block_end># automatically determine sizes
<if_stmt>G.number_of_nodes()<l>4294967295<block_start>vtype=np.uint32<block_end><else_stmt><block_start>vtype=np.int64<block_end><if_stmt>2<times>G.number_of_edges()<l>4294967295<block_start>itype=np.uint32<block_end><else_stmt><block_start>itype=np.int64<block_end>rval._num_edges=rval.adjacency_matrix.nnz<line_sep>rval.compute_statistics()<line_sep>rval.ai=itype(rval.adjacency_matrix.indptr)<line_sep>rval.aj=vtype(rval.adjacency_matrix.indices)<line_sep><return>rval<block_end>@classmethod<def_stmt>from_sparse_adjacency cls A<block_start>"""
Create a GraphLocal object from a sparse adjacency matrix.
Paramters
---------
A
Adjacency matrix.
"""<line_sep>self=cls()<line_sep>self.adjacency_matrix=A.copy()<line_sep>self._num_vertices=A.shape[0]<line_sep>self._num_edges=A.nnz<line_sep># TODO, use this in the read_graph
self._weighted=<false><for_stmt>i self.adjacency_matrix.data<block_start><if_stmt>i<ne>1<block_start>self._weighted=<true><line_sep><break><block_end><block_end># automatically determine sizes
<if_stmt>self._num_vertices<l>4294967295<block_start>vtype=np.uint32<block_end><else_stmt><block_start>vtype=np.int64<block_end><if_stmt>2<times>self._num_edges<l>4294967295<block_start>itype=np.uint32<block_end><else_stmt><block_start>itype=np.int64<block_end>self.compute_statistics()<line_sep>self.ai=itype(self.adjacency_matrix.indptr)<line_sep>self.aj=vtype(self.adjacency_matrix.indices)<line_sep><return>self<block_end><def_stmt>renew_data self A<block_start>"""
Update data because the adjacency matrix changed
Paramters
---------
A
Adjacency matrix.
"""<line_sep>self._num_edges=A.nnz<line_sep># TODO, use this in the read_graph
self._weighted=<false><for_stmt>i self.adjacency_matrix.data<block_start><if_stmt>i<ne>1<block_start>self._weighted=<true><line_sep><break><block_end><block_end># automatically determine sizes
<if_stmt>self._num_vertices<l>4294967295<block_start>vtype=np.uint32<block_end><else_stmt><block_start>vtype=np.int64<block_end><if_stmt>2<times>self._num_edges<l>4294967295<block_start>itype=np.uint32<block_end><else_stmt><block_start>itype=np.int64<block_end>self.compute_statistics()<line_sep>self.ai=itype(self.adjacency_matrix.indptr)<line_sep>self.aj=vtype(self.adjacency_matrix.indices)<block_end><def_stmt>list_to_gl self source target weights vtype=np.uint32 itype=np.uint32<block_start>"""
Create a GraphLocal object from edge list.
Parameters
----------
source
A numpy array of sources for the edges
target
A numpy array of targets for the edges
weights
A numpy array of weights for the edges
vtype
numpy integer type of CSC format index array
Default = np.uint32
itype
numpy integer type of CSC format index pointer array
Default = np.uint32
"""<line_sep># TODO, fix this up to avoid duplicating code with read...
source=np.array(source dtype=vtype)<line_sep>target=np.array(target dtype=vtype)<line_sep>weights=np.array(weights dtype=np.double)<line_sep>self._num_edges=len(source)<line_sep>self._num_vertices=max(source.max()+1 target.max()+1)<line_sep>self.adjacency_matrix=sp.csr_matrix((weights.astype(np.float64) (source target)) shape=(self._num_vertices self._num_vertices))<line_sep>self._weighted=<false><for_stmt>i self.adjacency_matrix.data<block_start><if_stmt>i<ne>1<block_start>self._weighted=<true><line_sep><break><block_end><block_end>is_symmetric=(self.adjacency_matrix<ne>self.adjacency_matrix.T).sum()<eq>0<if_stmt><not>is_symmetric# Symmetrize matrix, choosing larger weight
<block_start>sel=self.adjacency_matrix.T<g>self.adjacency_matrix<line_sep>self.adjacency_matrix=self.adjacency_matrix-self.adjacency_matrix.multiply(sel)+self.adjacency_matrix.T.multiply(sel)<assert_stmt>(self.adjacency_matrix<ne>self.adjacency_matrix.T).sum()<eq>0<block_end>self._num_edges=self.adjacency_matrix.nnz<line_sep>self.compute_statistics()<line_sep>self.ai=itype(self.adjacency_matrix.indptr)<line_sep>self.aj=vtype(self.adjacency_matrix.indices)<block_end><def_stmt>discard_weights self<block_start>""" Discard any weights that were loaded from the data file.
This sets all the weights associated with each edge to 1.0,
which is our "no weight" case."""<line_sep>self.adjacency_matrix.data.fill(1.0)<line_sep>self._weighted=<false><line_sep>self.compute_statistics()<block_end><def_stmt>compute_statistics self<block_start>"""
Computes statistics for the graph. It updates the class attributes.
The user needs to read the graph first before calling
this method by calling the read_graph method from this class.
"""<line_sep>self.d=np.ravel(self.adjacency_matrix.sum(axis=1))<line_sep>self.dn=np.zeros(self._num_vertices)<line_sep>self.dn[self.d<ne>0]=1.0/self.d[self.d<ne>0]<line_sep>self.d_sqrt=np.sqrt(self.d)<line_sep>self.dn_sqrt=np.sqrt(self.dn)<line_sep>self.vol_G=np.sum(self.d)<block_end><def_stmt>to_shared self<block_start>""" Re-create the graph data with multiprocessing compatible
shared-memory arrays that can be passed to child-processes.
This returns a dictionary that allows the graph to be
re-created in a child-process from that variable and
the method "from_shared"
At this moment, this doesn't send any data from components,
core_numbers, or biconnected_components
"""<line_sep>sgraphvars={}<line_sep>self.ai,sgraphvars["ai"]=_copy_to_shared(self.ai)<line_sep>self.aj,sgraphvars["aj"]=_copy_to_shared(self.aj)<line_sep>self.d,sgraphvars["d"]=_copy_to_shared(self.d)<line_sep>self.dn,sgraphvars["dn"]=_copy_to_shared(self.dn)<line_sep>self.d_sqrt,sgraphvars["d_sqrt"]=_copy_to_shared(self.d_sqrt)<line_sep>self.dn_sqrt,sgraphvars["dn_sqrt"]=_copy_to_shared(self.dn_sqrt)<line_sep>self.adjacency_matrix.data,sgraphvars["a"]=_copy_to_shared(self.adjacency_matrix.data)<line_sep># this will rebuild without copying
# so that copies should all be accessing exactly the same
# arrays for caching
self.adjacency_matrix=sp.csr_matrix((self.adjacency_matrix.data self.aj self.ai) shape=(self._num_vertices self._num_vertices))<line_sep># scalars
sgraphvars["n"]=self._num_vertices<line_sep>sgraphvars["m"]=self._num_edges<line_sep>sgraphvars["vol"]=self.vol_G<line_sep>sgraphvars["weighted"]=self._weighted<line_sep><return>sgraphvars<block_end>@classmethod<def_stmt>from_shared cls sgraphvars<block_start>""" Return a graph object from the output of "to_shared". """<line_sep>g=cls()<line_sep>g._num_vertices=sgraphvars["n"]<line_sep>g._num_edges=sgraphvars["m"]<line_sep>g._weighted=sgraphvars["weighted"]<line_sep>g.vol_G=sgraphvars["vol"]<line_sep>g.ai=_load_from_shared(*sgraphvars["ai"])<line_sep>g.aj=_load_from_shared(*sgraphvars["aj"])<line_sep>g.adjacency_matrix=sp.csr_matrix((_load_from_shared(*sgraphvars["a"]) g.aj g.ai) shape=(g._num_vertices g._num_vertices))<line_sep>g.d=_load_from_shared(*sgraphvars["d"])<line_sep>g.dn=_load_from_shared(*sgraphvars["dn"])<line_sep>g.d_sqrt=_load_from_shared(*sgraphvars["d_sqrt"])<line_sep>g.dn_sqrt=_load_from_shared(*sgraphvars["dn_sqrt"])<line_sep><return>g<block_end><def_stmt>connected_components self<block_start>"""
Computes the connected components of the graph. It stores the results in class attributes components
and number_of_components. The user needs to call read the graph
first before calling this function by calling the read_graph function from this class.
"""<line_sep>output=csgraph.connected_components(self.adjacency_matrix directed=<false>)<line_sep>self.components=output[1]<line_sep>self.number_of_components=output[0]<line_sep>print('There are ' self.number_of_components ' connected components in the graph')<block_end><def_stmt>is_disconnected self<block_start>"""
The output can be accessed from the graph object that calls this function.
Checks if the graph is a disconnected graph. It prints the result as a comment and
returns True if the graph is disconnected, or false otherwise. The user needs to
call read the graph first before calling this function by calling the read_graph function from this class.
This function calls Networkx.
Returns
-------
True
If connected
False
If disconnected
"""<if_stmt>self.d<eq>[]<block_start>print('The graph has to be read first.')<line_sep><return><block_end>self.connected_components()<if_stmt>self.number_of_components<g>1<block_start>print('The graph is a disconnected graph.')<line_sep><return><true><block_end><else_stmt><block_start>print('The graph is not a disconnected graph.')<line_sep><return><false><block_end><block_end><def_stmt>biconnected_components self<block_start>"""
Computes the biconnected components of the graph. It stores the results in class attributes bicomponents
and number_of_bicomponents. The user needs to call read the graph first before calling this
function by calling the read_graph function from this class. This function calls Networkx.
"""<line_sep>warnings.warn("Warning, biconnected_components is not efficiently implemented.")<line_sep>g_nx=nx.from_scipy_sparse_matrix(self.adjacency_matrix)<line_sep>self.bicomponents=list(nx.biconnected_components(g_nx))<line_sep>self.number_of_bicomponents=len(self.bicomponents)<block_end><def_stmt>core_number self<block_start>"""
Returns the core number for each vertex. A k-core is a maximal
subgraph that contains nodes of degree k or more. The core number of a node
is the largest value k of a k-core containing that node. The user needs to
call read the graph first before calling this function by calling the read_graph
function from this class. The output can be accessed from the graph object that
calls this function. It stores the results in class attribute core_numbers.
"""<line_sep>warnings.warn("Warning, core_number is not efficiently implemented.")<line_sep>g_nx=nx.from_scipy_sparse_matrix(self.adjacency_matrix)<line_sep>self.core_numbers=nx.core_number(g_nx)<block_end><def_stmt>neighbors self vertex<block_start>"""
Returns a list with the neighbors of the given vertex.
"""<line_sep># this will be faster since we store the arrays ourselves.
<return>self.aj[self.ai[vertex]:self.ai[vertex+1]].tolist()<line_sep>#return self.adjacency_matrix[:,vertex].nonzero()[0].tolist()
<block_end><def_stmt>compute_conductance self R cpp=<true><block_start>"""
Return conductance of a set of vertices.
"""<line_sep>records=self.set_scores(R cpp=cpp)<line_sep><return>records["cond"]<block_end><def_stmt>set_scores self R cpp=<true><block_start>"""
Return various metrics of a set of vertices.
"""<line_sep>voltrue,cut=0 0<if_stmt>cpp<block_start>voltrue,cut=set_scores_cpp(self._num_vertices self.ai self.aj self.adjacency_matrix.data self.d R self._weighted)<block_end><else_stmt><block_start>voltrue=sum(self.d[R])<line_sep>v_ones_R=np.zeros(self._num_vertices)<line_sep>v_ones_R[R]=1<line_sep>cut=voltrue-np.dot(v_ones_R self.adjacency_matrix.dot(v_ones_R.T))<block_end>voleff=min(voltrue self.vol_G-voltrue)<line_sep>sizetrue=len(R)<line_sep>sizeeff=sizetrue<if_stmt>voleff<l>voltrue<block_start>sizeeff=self._num_vertices-sizetrue<block_end># remove the stuff we don't want returned...
<del_stmt>R<del_stmt>self<if_stmt><not>cpp<block_start><del_stmt>v_ones_R<block_end><del_stmt>cpp<line_sep>edgestrue=voltrue-cut<line_sep>edgeseff=voleff-cut<line_sep>cond=cut/voleff<if>voleff<ne>0<else>1<line_sep>isop=cut/sizeeff<if>sizeeff<ne>0<else>1<line_sep># make a dictionary out of local variables
<return>locals()<block_end><def_stmt>largest_component self<block_start>self.connected_components()<if_stmt>self.number_of_components<eq>1#self.compute_statistics()
<block_start><return>self<block_end><else_stmt># find nodes of largest component
<block_start>counter=cole.Counter(self.components)<line_sep>maxccnodes=[]<line_sep>what_key=counter.most_common(1)[0][0]<for_stmt>i range(self._num_vertices)<block_start><if_stmt>what_key<eq>self.components[i]<block_start>maxccnodes.append(i)<block_end><block_end># biggest component by len of it's list of nodes
#maxccnodes = max(self.components, key=len)
#maxccnodes = list(maxccnodes)
warnings.warn("The graph has multiple (%i) components, using the largest with %i / %i nodes"%(self.number_of_components len(maxccnodes) self._num_vertices))<line_sep>g_copy=GraphLocal()<line_sep>g_copy.adjacency_matrix=self.adjacency_matrix[maxccnodes :].tocsc()[: maxccnodes].tocsr()<line_sep>g_copy._num_vertices=len(maxccnodes)# AHH!
g_copy.compute_statistics()<line_sep>g_copy._weighted=self._weighted<line_sep>dt=np.dtype(self.ai[0])<line_sep>itype=np.int64<if>dt.name<eq>'int64'<else>np.uint32<line_sep>dt=np.dtype(self.aj[0])<line_sep>vtype=np.int64<if>dt.name<eq>'int64'<else>np.uint32<line_sep>g_copy.ai=itype(g_copy.adjacency_matrix.indptr)<line_sep>g_copy.aj=vtype(g_copy.adjacency_matrix.indices)<line_sep>g_copy._num_edges=g_copy.adjacency_matrix.nnz<line_sep><return>g_copy<block_end><block_end><def_stmt>local_extrema self vals strict=<false> reverse=<false><block_start>"""
Find extrema in a graph based on a set of values.
Parameters
----------
vals: Sequence[float]
a feature value per node used to find the ex against each other, i.e. conductance
strict: bool
If True, find a set of vertices where vals(i) < vals(j) for all neighbors N(j)
i.e. local minima in the space of the graph
If False, find a set of vertices where vals(i) <= vals(j) for all neighbors N(j)
i.e. local minima in the space of the graph
reverse: bool
if True, then find local maxima, if False then find local minima
(by default, this is false, so we find local minima)
Returns
-------
minverts: Sequence[int]
the set of vertices
minvals: Sequence[float]
the set of min values
"""<line_sep>n=self.adjacency_matrix.shape[0]<line_sep>minverts=[]<line_sep>ai=self.ai<line_sep>aj=self.aj<line_sep>factor=1.0<if_stmt>reverse<block_start>factor=-1.0<block_end><for_stmt>i range(n)<block_start>vali=factor<times>vals[i]<line_sep>lmin=<true><for_stmt>nzi range(ai[i] ai[i+1])<block_start>v=aj[nzi]<if_stmt>v<eq>i<block_start><continue># skip self-loops
<block_end><if_stmt>strict<block_start><if_stmt>vali<l>factor<times>vals[v]<block_start><continue><block_end><else_stmt><block_start>lmin=<false><block_end><block_end><else_stmt><block_start><if_stmt>vali<le>factor<times>vals[v]<block_start><continue><block_end><else_stmt><block_start>lmin=<false><block_end><block_end><if_stmt>lmin<eq><false><block_start><break><block_end><block_end># break out of the loop
<if_stmt>lmin<block_start>minverts.append(i)<block_end><block_end>minvals=vals[minverts]<line_sep><return>minverts minvals<block_end>@staticmethod<def_stmt>_plotting drawing edgecolor edgealpha linewidth is_3d **kwargs<block_start>"""
private function to do the plotting
"**kwargs" represents all possible optional parameters of "scatter" function
in matplotlib.pyplot
"""<line_sep>drawing.scatter(**kwargs)<line_sep>drawing.plot(color=edgecolor alpha=edgealpha linewidths=linewidth)<line_sep>axs=drawing.ax<line_sep>axs.autoscale()<if_stmt>is_3d<eq>3# Set the initial view
<block_start>axs.view_init(30 angle)<block_end><block_end><def_stmt>draw self coords alpha=1.0 nodesize=5 linewidth=1 nodealpha=1.0 edgealpha=1.0 edgecolor='k' nodemarker='o' axs=<none> fig=<none> values=<none> cm=<none> valuecenter=<none> angle=30 figsize=<none> nodecolor='r'<block_start>"""
Standard drawing function when having single cluster
Parameters
----------
coords: a n-by-2 or n-by-3 array with coordinates for each node of the graph.
Optional parameters
------------------
alpha: float (1.0 by default)
the overall alpha scaling of the plot, [0,1]
nodealpha: float (1.0 by default)
the overall node alpha scaling of the plot, [0, 1]
edgealpha: float (1.0 by default)
the overall edge alpha scaling of the plot, [0, 1]
nodecolor: string or RGB ('r' by default)
edgecolor: string or RGB ('k' by default)
setcolor: string or RGB ('y' by default)
nodemarker: string ('o' by default)
nodesize: float (5.0 by default)
linewidth: float (1.0 by default)
axs,fig: None,None (default)
by default it will create a new figure, or this will plot in axs if not None.
values: Sequence[float] (None by default)
used to determine node colors in a colormap, should have the same length as coords
valuecenter: often used with values together to determine vmin and vmax of colormap
offset = max(abs(values-valuecenter))
vmax = valuecenter + offset
vmin = valuecenter - offset
cm: string or colormap object (None by default)
figsize: tuple (None by default)
angle: float (30 by default)
set initial view angle when drawing 3d
Returns
-------
A GraphDrawing object
"""<line_sep>drawing=GraphDrawing(self coords ax=axs figsize=figsize)<if_stmt>values<is><not><none><block_start>values=np.asarray(values)<if_stmt>values.ndim<eq>2<block_start>node_color_list=np.reshape(values len(coords))<block_end><else_stmt><block_start>node_color_list=values<block_end>vmin=min(node_color_list)<line_sep>vmax=max(node_color_list)<if_stmt>cm<is><not><none><block_start>cm=plt.get_cmap(cm)<block_end><else_stmt><block_start><if_stmt>valuecenter<is><not><none>#when both values and valuecenter are provided, use PuOr colormap to determine colors
<block_start>cm=plt.get_cmap("PuOr")<line_sep>offset=max(abs(node_color_list-valuecenter))<line_sep>vmax=valuecenter+offset<line_sep>vmin=valuecenter-offset<block_end><else_stmt><block_start>cm=plt.get_cmap("magma")<block_end><block_end>self._plotting(drawing edgecolor edgealpha linewidth len(coords[0])<eq>3 c=node_color_list alpha=alpha<times>nodealpha edgecolors='none' s=nodesize marker=nodemarker zorder=2 cmap=cm vmin=vmin vmax=vmax)<block_end><else_stmt><block_start>self._plotting(drawing edgecolor edgealpha linewidth len(coords[0])<eq>3 c=nodecolor alpha=alpha<times>nodealpha edgecolors='none' s=nodesize marker=nodemarker zorder=2)<block_end><return>drawing<block_end><def_stmt>draw_groups self coords groups alpha=1.0 nodesize_list=[] linewidth=1 nodealpha=1.0 edgealpha=0.01 edgecolor='k' nodemarker_list=[] node_color_list=[] nodeorder_list=[] axs=<none> fig=<none> cm=<none> angle=30 figsize=<none><block_start>"""
Standard drawing function when having multiple clusters
Parameters
----------
coords: a n-by-2 or n-by-3 array with coordinates for each node of the graph.
groups: list[list] or list, for the first case, each sublist represents a cluster
for the second case, list must have the same length as the number of nodes and
nodes with the number are in the same cluster
Optional parameters
------------------
alpha: float (1.0 by default)
the overall alpha scaling of the plot, [0,1]
nodealpha: float (1.0 by default)
the overall node alpha scaling of the plot, [0, 1]
edgealpha: float (1.0 by default)
the overall edge alpha scaling of the plot, [0, 1]
nodecolor_list: list of string or RGB ('r' by default)
edgecolor: string or RGB ('k' by default)
nodemarker_list: list of strings ('o' by default)
nodesize_list: list of floats (5.0 by default)
linewidth: float (1.0 by default)
axs,fig: None,None (default)
by default it will create a new figure, or this will plot in axs if not None.
cm: string or colormap object (None by default)
figsize: tuple (None by default)
angle: float (30 by default)
set initial view angle when drawing 3d
Returns
-------
A GraphDrawing object
"""<line_sep>#when values are not provided, use tab20 or gist_ncar colormap to determine colors
number_of_colors=1<line_sep>l_initial_node_color_list=len(node_color_list)<line_sep>l_initial_nodesize_list=len(nodesize_list)<line_sep>l_initial_nodemarker_list=len(nodemarker_list)<line_sep>l_initial_nodeorder_list=len(nodeorder_list)<if_stmt>l_initial_node_color_list<eq>0<block_start>node_color_list=np.zeros(self._num_vertices)<block_end><if_stmt>l_initial_nodesize_list<eq>0<block_start>nodesize_list=25<times>np.ones(self._num_vertices)<block_end><if_stmt>l_initial_nodemarker_list<eq>0<block_start>nodemarker_list='o'<block_end><if_stmt>l_initial_nodeorder_list<eq>0<block_start>nodeorder_list=2<block_end>groups=np.asarray(groups)<if_stmt>groups.ndim<eq>1#convert 1-d group to a 2-d representation
<block_start>grp_dict=defaultdict(list)<for_stmt>idx,key enumerate(groups)<block_start>grp_dict[key].append(idx)<block_end>groups=np.asarray(list(grp_dict.values()))<block_end>number_of_colors<augadd>len(groups)<line_sep>#separate the color for different groups as far as we can
<if_stmt>l_initial_node_color_list<eq>0<block_start><for_stmt>i,g enumerate(groups)<block_start>node_color_list[g]=(1+i)<times>1.0/(number_of_colors-1)<block_end><block_end><if_stmt>number_of_colors<le>20<block_start>cm=plt.get_cmap("tab20b")<block_end><else_stmt><block_start>cm=plt.get_cmap("gist_ncar")<block_end>vmin=0.0<line_sep>vmax=1.0<line_sep>drawing=GraphDrawing(self coords ax=axs figsize=figsize)<line_sep>#m = ScalarMappable(norm=Normalize(vmin=vmin,vmax=vmax), cmap=cm)
#rgba_list = m.to_rgba(node_color_list,alpha=alpha*nodealpha)
self._plotting(drawing edgecolor edgealpha linewidth len(coords[0])<eq>3 s=nodesize_list marker=nodemarker_list zorder=nodeorder_list cmap=cm vmin=vmin vmax=vmax alpha=alpha<times>nodealpha edgecolors='none' c=node_color_list)<line_sep><return>drawing<block_end>"""
def draw_2d(self,pos,axs,cm,nodemarker='o',nodesize=5,edgealpha=0.01,linewidth=1,
node_color_list=None,edgecolor='k',nodecolor='r',node_list=None,nodelist_in=None,
nodelist_out=None,setalpha=1.0,nodealpha=1.0,use_values=False,vmin=0.0,vmax=1.0):
if use_values:
axs.scatter([p[0] for p in pos[nodelist_in]],[p[1] for p in pos[nodelist_in]],c=node_color_list[nodelist_in],
s=nodesize,marker=nodemarker,cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),alpha=setalpha,zorder=2)
axs.scatter([p[0] for p in pos[nodelist_out]],[p[1] for p in pos[nodelist_out]],c=node_color_list[nodelist_out],
s=nodesize,marker=nodemarker,cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),alpha=nodealpha,zorder=2)
else:
if node_color_list is not None:
axs.scatter([p[0] for p in pos],[p[1] for p in pos],c=node_color_list,s=nodesize,marker=nodemarker,
cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),zorder=2)
else:
axs.scatter([p[0] for p in pos],[p[1] for p in pos],c=nodecolor,s=nodesize,marker=nodemarker,zorder=2)
node_list = range(self._num_vertices) if node_list is None else node_list
edge_pos = []
for i in node_list:
self._push_edges_for_node(i,self.aj[self.ai[i]:self.ai[i+1]],pos,edge_pos)
edge_pos = np.asarray(edge_pos)
edge_collection = LineCollection(edge_pos,colors=to_rgba(edgecolor,edgealpha),linewidths=linewidth)
#make sure edges are at the bottom
edge_collection.set_zorder(1)
axs.add_collection(edge_collection)
axs.autoscale()
def draw_3d(self,pos,axs,cm,nodemarker='o',nodesize=5,edgealpha=0.01,linewidth=1,
node_color_list=None,angle=30,edgecolor='k',nodecolor='r',node_list=None,
nodelist_in=None,nodelist_out=None,setalpha=1.0,nodealpha=1.0,use_values=False,vmin=0.0,vmax=1.0):
if use_values:
axs.scatter([p[0] for p in pos[nodelist_in]],[p[1] for p in pos[nodelist_in]],[p[2] for p in pos[nodelist_in]],c=node_color_list[nodelist_in],
s=nodesize,marker=nodemarker,cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),zorder=2,alpha=setalpha)
axs.scatter([p[0] for p in pos[nodelist_out]],[p[1] for p in pos[nodelist_out]],[p[2] for p in pos[nodelist_out]],c=node_color_list[nodelist_out],
s=nodesize,marker=nodemarker,cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),zorder=2,alpha=nodealpha)
else:
if node_color_list is not None:
axs.scatter([p[0] for p in pos],[p[1] for p in pos],[p[2] for p in pos],c=node_color_list,
s=nodesize,marker=nodemarker,cmap=cm,norm=Normalize(vmin=vmin,vmax=vmax),zorder=2)
else:
axs.scatter([p[0] for p in pos],[p[1] for p in pos],[p[2] for p in pos],c=nodecolor,
s=nodesize,marker=nodemarker,zorder=2)
node_list = range(self._num_vertices) if node_list is None else node_list
edge_pos = []
for i in node_list:
self._push_edges_for_node(i,self.aj[self.ai[i]:self.ai[i+1]],pos,edge_pos)
edge_pos = np.asarray(edge_pos)
edge_collection = Line3DCollection(edge_pos,colors=to_rgba(edgecolor,edgealpha),linewidths=linewidth)
#make sure edges are at the bottom
edge_collection.set_zorder(1)
axs.add_collection(edge_collection)
axs.autoscale()
# Set the initial view
axs.view_init(30, angle)
"""<block_end>
|
# -*- coding: utf-8 -*-
# The MIT License (MIT) - Copyright (c) 2016-2021 <NAME>.
"""
Command-line program to convert a netlist into an equivalent SKiDL program.
"""<import_from_future_stmt> # isort:skip
absolute_import division print_function unicode_literals <import_stmt>argparse<import_stmt>logging<import_stmt>os<import_stmt>shutil<import_stmt>sys<import_from_stmt>builtins open<import_from_stmt>future standard_library<import_from_stmt>.netlist_to_skidl netlist_to_skidl<import_from_stmt>.pckg_info __version__<line_sep>standard_library.install_aliases()<line_sep>###############################################################################
# Command-line interface.
###############################################################################
<def_stmt>main <block_start>parser=argparse.ArgumentParser(description="A Python package for textually describing circuit schematics.")<line_sep>parser.add_argument("--version" "-v" action="version" version="skidl "+__version__)<line_sep>parser.add_argument("--input" "-i" nargs=1 type=str metavar="file.net" help="Netlist input file." )<line_sep>parser.add_argument("--output" "-o" nargs=1 type=str metavar="file.py" help="Output file for SKiDL code." )<line_sep>parser.add_argument("--overwrite" "-w" action="store_true" help="Overwrite an existing file.")<line_sep>parser.add_argument("--nobackup" "-nb" action="store_true" help="Do *not* create backups before modifying files. "+"(Default is to make backup files.)" )<line_sep>parser.add_argument("--debug" "-d" nargs="?" type=int default=0 metavar="LEVEL" help="Print debugging info. (Larger LEVEL means more info.)" )<line_sep>args=parser.parse_args()<line_sep>logger=logging.getLogger("netlist_to_skidl")<if_stmt>args.debug<is><not><none><block_start>log_level=logging.DEBUG+1-args.debug<line_sep>handler=logging.StreamHandler(sys.stdout)<line_sep>handler.setLevel(log_level)<line_sep>logger.addHandler(handler)<line_sep>logger.setLevel(log_level)<block_end><if_stmt>args.input<is><none><block_start>logger.critical("Hey! Give me some netlist files!")<line_sep>sys.exit(2)<block_end><if_stmt>args.output<is><none><block_start>print("Hey! I need some place where I can store the SKiDL code!")<line_sep>sys.exit(1)<block_end><for_stmt>file args.output<block_start><if_stmt>os.path.isfile(file)<block_start><if_stmt><not>args.overwrite<and>args.nobackup<block_start>logger.critical("File {} already exists! Use the --overwrite option to "+"allow modifications to it or allow backups.".format(file))<line_sep>sys.exit(1)<block_end><if_stmt><not>args.nobackup# Create a backup file.
<block_start>index=1# Start with this backup file suffix.
<while_stmt><true><block_start>backup_file=file+".{}.bak".format(index file)<if_stmt><not>os.path.isfile(backup_file)# Found an unused backup file name, so make backup.
<block_start>shutil.copy(file backup_file)<line_sep><break># Backup done, so break out of loop.
<block_end>index<augadd>1<block_end><block_end><block_end><block_end># Else keep looking for an unused backup file name.
skidl_code=netlist_to_skidl(args.input[0])<line_sep>open(args.output[0] "w").write(skidl_code)<block_end>###############################################################################
# Main entrypoint.
###############################################################################
<if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
|
# Copyright 2017 Natural Language Processing Group, Nanjing University, <EMAIL>.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Implement transformer decoder as described in https://arxiv.org/abs/1706.03762. """<import_from_future_stmt> absolute_import<import_from_future_stmt> division<import_from_future_stmt> print_function<import_stmt>tensorflow<as>tf<import_from_stmt>tensorflow.python.util nest<import_from_stmt>collections namedtuple<import_from_stmt>njunmt.utils.constants ModeKeys<import_from_stmt>njunmt.decoders.decoder dynamic_decode<import_from_stmt>njunmt.decoders.decoder initialize_cache<import_from_stmt>njunmt.decoders.decoder Decoder<import_from_stmt>njunmt.layers.common_layers dropout_wrapper<import_from_stmt>njunmt.layers.common_layers layer_preprocess<import_from_stmt>njunmt.layers.common_layers layer_postprocessing<import_from_stmt>njunmt.layers.common_layers transformer_ffn_layer<import_from_stmt>njunmt.layers.common_attention MultiHeadAttention<import_from_stmt>njunmt.layers.common_attention attention_bias_lower_triangle<class_stmt>TransformerDecoder(Decoder)<block_start>""" Implement transformer decoder as described
in https://arxiv.org/abs/1706.03762. """<def_stmt>__init__ self params mode name=<none> verbose=<true><block_start>""" Initializes decoder parameters.
Args:
params: A dictionary of parameters to construct the
decoder architecture.
mode: A mode.
name: The name of this decoder.
verbose: Print decoder parameters if set True.
"""<line_sep>super(TransformerDecoder self).__init__(params mode name verbose)<line_sep>self._self_attention_layers=[]<line_sep>self._encdec_attention_layers=[]<for_stmt>layer range(self.params["num_layers"])<block_start>self._self_attention_layers.append(MultiHeadAttention(self.params["selfattention.params"] self.mode))<line_sep>self._encdec_attention_layers.append(MultiHeadAttention(self.params["attention.params"] self.mode))<block_end><if_stmt>self.mode<eq>ModeKeys.TRAIN<block_start>self._DecoderOutputSpec=namedtuple("TransformerOutput" "decoder_hidden")<block_end><elif_stmt>self.mode<eq>ModeKeys.EVAL<block_start>self._DecoderOutputSpec=namedtuple("TransformerOutput" "decoder_hidden decoder_self_attention encoder_decoder_attention")<block_end><else_stmt><block_start>self._DecoderOutputSpec=namedtuple("TransformerOutput" "decoder_hidden encoder_decoder_attention")<block_end><block_end>@staticmethod<def_stmt>default_params <block_start>""" Returns a dictionary of default parameters of TransformerDecoder. """<line_sep><return>{"num_layers":6 "attention.params":{} # Arbitrary parameters for the enc-dec attention layer
"selfattention.params":{} # Arbitrary parameters for the self-attention layer
"num_filter_units":2048 "num_hidden_units":512 "dropout_relu_keep_prob":0.9 "layer_preprocess_sequence":"n" "layer_postprocess_sequence":"da" "layer_prepostprocess_dropout_keep_prob":0.9}<block_end>@property<def_stmt>output_dtype self<block_start>""" Returns a `collections.namedtuple`,
the definition of decoder output types. """<if_stmt>self.mode<eq>ModeKeys.TRAIN<block_start><return>self._DecoderOutputSpec(decoder_hidden=tf.float32)<block_end><elif_stmt>self.mode<eq>ModeKeys.EVAL<block_start><return>self._DecoderOutputSpec(decoder_hidden=tf.float32 decoder_self_attention=[tf.float32]<times>self.params["num_layers"] encoder_decoder_attention=[tf.float32]<times>self.params["num_layers"])<block_end><else_stmt><block_start><return>self._DecoderOutputSpec(decoder_hidden=tf.float32 encoder_decoder_attention=[tf.float32]<times>self.params["num_layers"])<block_end><block_end><def_stmt>merge_top_features self decoder_output<block_start>""" Merges features of decoder top layers, as the input
of softmax layer.
Here is the same as the hidden state of the last layer
of the transformer decoder.
Args:
decoder_output: An instance of `collections.namedtuple`
whose element types are defined by `output_dtype`
property.
Returns: A instance of `tf.Tensor`, as the input of
softmax layer.
"""<line_sep><return>decoder_output.decoder_hidden<block_end><def_stmt>decode self encoder_output bridge helper target_to_embedding_fn outputs_to_logits_fn **kwargs<block_start>""" Decodes one sample.
Args:
encoder_output: An instance of `collections.namedtuple`
from `Encoder.encode()`.
bridge: None.
helper: An instance of `Feedback` that samples next
symbols from logits.
target_to_embedding_fn: A callable, converts target ids to
embeddings.
outputs_to_logits_fn: A callable, converts decoder outputs
to logits.
Returns: A tuple `(decoder_output, decoder_status)`. The
`decoder_output` is an instance of `collections.namedtuple`
whose element types are defined by `output_dtype` property.
For mode=INFER, the `decoder_status` is a dict containing
hypothesis, log probabilities, beam ids and decoding length.
For mode=TRAIN/EVAL, the `decoder_status` is a `tf.Tensor`
indicating logits (computed by `target_modality`), of shape
[timesteps, batch_size, vocab_size].
"""<if_stmt>bridge<is><not><none><and>self.verbose<block_start>tf.logging.info("TransformerDecoder ignores bridge: {}".format(bridge.name))<block_end><if_stmt>self.mode<eq>ModeKeys.TRAIN<or>self.mode<eq>ModeKeys.EVAL<block_start><assert_stmt>hasattr(helper "label_ids") ("helper ({}) for TransformerDecoder when mode=TRAIN/EVAL "<concat>"should provide attr \"label_ids\"".format(type(helper)))<line_sep># prepare decoder input
label_ids=getattr(helper "label_ids")# [batch_size, max_len_trg]
batch_size=tf.shape(label_ids)[0]<line_sep># shift
target_sos_ids=tf.tile([helper.vocab.sos_id] [batch_size])<line_sep>target_sos_ids=tf.reshape(target_sos_ids [batch_size 1])<line_sep>label_ids=tf.concat([target_sos_ids label_ids] axis=1)[: :-1]<line_sep>decoder_inputs=target_to_embedding_fn(label_ids)<with_stmt>tf.variable_scope(self.name)<block_start>cache=self.prepare(encoder_output <none> helper)<line_sep>outputs,decoder_self_attention,encdec_attention=self._transform(decoder_inputs cache)<line_sep># [batch_size, time, dim]
<if_stmt>self.mode<eq>ModeKeys.TRAIN<block_start>final_outputs=self._DecoderOutputSpec(decoder_hidden=outputs)<block_end><else_stmt><block_start>final_outputs=self._DecoderOutputSpec(decoder_hidden=outputs # transpose to [length_q, batch_size, num_heads length_k]
decoder_self_attention=nest.map_structure(<lambda>x:tf.transpose(x [2 0 1 3]) decoder_self_attention) encoder_decoder_attention=nest.map_structure(<lambda>x:tf.transpose(x [2 0 1 3]) encdec_attention))<block_end>decoder_top_features=self.merge_top_features(final_outputs)<block_end># do transpose to fit loss function, [time, batch_size, dim]
decoder_top_features=tf.transpose(decoder_top_features [1 0 2])<line_sep>logits=outputs_to_logits_fn(decoder_top_features)# [time, batch_size, vocab_size]
<return>final_outputs logits<block_end>outputs,infer_status=dynamic_decode(decoder=self encoder_output=encoder_output bridge=<none> helper=helper target_to_embedding_fn=target_to_embedding_fn outputs_to_logits_fn=outputs_to_logits_fn **kwargs)<line_sep><return>outputs infer_status<block_end><def_stmt>prepare self encoder_output bridge helper<block_start>""" Prepares for `step()` function.
Do
1. acquire attention information from `encoder_output`;
Args:
encoder_output: An instance of `collections.namedtuple`
from `Encoder.encode()`.
bridge: None.
helper: An instance of `Feedback` that samples next
symbols from logits.
Returns: A dict containing decoder RNN states, pre-projected attention
keys, attention values and attention length, and will be passed
to `step()` function.
"""<line_sep>_=bridge<line_sep>attention_values=encoder_output.attention_values<line_sep>attention_length=encoder_output.attention_length<if_stmt>hasattr(encoder_output "attention_bias")<block_start>attention_bias=encoder_output.attention_bias<block_end><else_stmt><block_start>attention_bias=MultiHeadAttention.attention_length_to_bias(tf.shape(attention_values)[1] attention_length)<block_end># initialize cache
<if_stmt>self.mode<eq>ModeKeys.INFER<block_start>decoding_states={}<line_sep>batch_size=tf.shape(attention_values)[0]<line_sep>depth=self._self_attention_layers[0].attention_value_depth<if_stmt>depth<l>0# TODO please check when code goes into this condition
<block_start>depth=tf.shape(attention_values)[2]<block_end># initialize decoder self attention keys/values
<for_stmt>l range(self.params["num_layers"])<block_start>keys=tf.zeros([batch_size 0 depth])<line_sep>values=tf.zeros([batch_size 0 depth])<line_sep># Ensure shape invariance for tf.while_loop.
keys.set_shape([<none> <none> depth])<line_sep>values.set_shape([<none> <none> depth])<with_stmt>tf.variable_scope("layer_%d"%l)<block_start><with_stmt>tf.variable_scope("encdec_attention")<block_start><with_stmt>tf.variable_scope(self._encdec_attention_layers[l].name)<block_start>preproj_keys,preproj_values=self._encdec_attention_layers[l].compute_kv(attention_values)<block_end><block_end><block_end>decoding_states["layer_{}".format(l)]={"self_attention":{"keys":keys "values":values} "encdec_attention":{"attention_keys":preproj_keys "attention_values":preproj_values}}<block_end><block_end><else_stmt><block_start>decoding_states=<none><block_end>init_cache=initialize_cache(decoding_states=decoding_states memory=attention_values memory_bias=attention_bias)<line_sep><return>init_cache<block_end><def_stmt>step self decoder_input cache<block_start>""" Decodes one step.
Args:
decoder_input: The decoder input for this timestep.
A Tensor, with shape [batch_size, dmodel].
cache: A dict containing decoding states at previous
timestep, attention values and attention length.
Returns: A tuple `(cur_decoder_outputs, cur_cache)` at this timestep.
The `cur_decoder_outputs` must be an instance of `collections.namedtuple`
whose element types are defined by `output_dtype` property. The
`cur_cache` must have the same structure with `cache`.
"""<line_sep># decoder self attention: [batch_size, num_heads, length_q, length_k]
outputs,decoder_self_attention,encdec_attention=self._transform(tf.expand_dims(decoder_input axis=1) cache)<line_sep>final_outputs=self._DecoderOutputSpec(decoder_hidden=outputs[: -1 :] # decoder_self_attention=[tf.squeeze(att, axis=2) for att in decoder_self_attention],
encoder_decoder_attention=[tf.squeeze(att axis=2)<for>att encdec_attention])<line_sep># loop on decoder_state, actually it is not used
<return>final_outputs cache<block_end><def_stmt>_transform self decoder_inputs cache pad_remover=<none><block_start>""" Decodes one step
Args:
decoder_inputs: The decoder input for this timestep,
A Tensor, with shape [batch_size, timesteps, dmodel].
Note that when mode==INFER, timesteps=1.
cache: A dict containing decoding states at previous
timestep, attention values and attention length.
pad_remover: An expert_utils.PadRemover object tracking the padding
positions. If provided, the padding is removed before applying
the convolution, and restored afterward. This can give a significant
speedup (says Google's tensor2tensor code).
Returns: A transformed Tensor.
"""<line_sep># [batch_size, max_len_src, dim]
encdec_attention_values=cache["memory"]<line_sep># [batch_size, 1, 1, max_len_src]
encdec_attention_bias=cache["memory_bias"]<line_sep>decoder_self_attention_scores=[]<line_sep>encdec_attention_scores=[]<line_sep># decoder_self_attention_bias: [1, 1, max_len_trg, max_len_trg]
decoder_self_attention_bias=attention_bias_lower_triangle(tf.shape(decoder_inputs)[1])<line_sep>x=dropout_wrapper(decoder_inputs self.params["layer_prepostprocess_dropout_keep_prob"])<for_stmt>layer range(self.params["num_layers"])<block_start>layer_name="layer_{}".format(layer)<line_sep>layer_cache=<none><if>cache["decoding_states"]<is><none><else>cache["decoding_states"][layer_name]<line_sep>selfatt_cache=<none><if>layer_cache<is><none><else>layer_cache["self_attention"]<line_sep>encdecatt_cache=<none><if>layer_cache<is><none><else>layer_cache["encdec_attention"]<with_stmt>tf.variable_scope("layer_%d"%layer)<block_start><with_stmt>tf.variable_scope("self_attention")# self attention layer
<block_start>w_y,y=self._self_attention_layers[layer].build(query=<none> memory=layer_preprocess(x=x process_sequence=self.params["layer_preprocess_sequence"] dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"]) memory_bias=decoder_self_attention_bias cache=selfatt_cache)<line_sep># [batch_size, num_heads, length_q, length_k]
decoder_self_attention_scores.append(w_y)<line_sep># apply dropout, layer norm, residual
x=layer_postprocessing(x=y previous_x=x process_sequence=self.params["layer_postprocess_sequence"] dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"])<block_end><with_stmt>tf.variable_scope("encdec_attention")# encoder-decoder attention
<block_start>w_y,y=self._encdec_attention_layers[layer].build(query=layer_preprocess(x=x process_sequence=self.params["layer_preprocess_sequence"] dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"]) memory=encdec_attention_values memory_bias=encdec_attention_bias cache=encdecatt_cache)<line_sep># [batch_size, num_heads, length_q, length_k]
encdec_attention_scores.append(w_y)<line_sep># apply dropout, layer norm, residual
x=layer_postprocessing(x=y previous_x=x process_sequence=self.params["layer_postprocess_sequence"] dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"])<block_end><with_stmt>tf.variable_scope("ffn")<block_start>y=transformer_ffn_layer(x=layer_preprocess(x=x process_sequence=self.params["layer_preprocess_sequence"] dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"]) filter_size=self.params["num_filter_units"] output_size=self.params["num_hidden_units"] pad_remover=pad_remover dropout_relu_keep_prob=self.params["dropout_relu_keep_prob"])<line_sep># apply dropout, layer norm, residual
x=layer_postprocessing(x=y previous_x=x process_sequence=self.params["layer_postprocess_sequence"] dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"])<block_end><block_end><block_end>x=layer_preprocess(x=x process_sequence=self.params["layer_preprocess_sequence"] dropout_keep_prob=self.params["layer_prepostprocess_dropout_keep_prob"])<line_sep><return>x decoder_self_attention_scores encdec_attention_scores<block_end><block_end>
|
"""add default notebook to users
Revision ID: <KEY>
Revises: <KEY>
Create Date: 2016-12-17 21:24:03.788486
"""<line_sep># revision identifiers, used by Alembic.
revision='<KEY>'<line_sep>down_revision='<KEY>'<import_from_stmt>alembic op<import_stmt>sqlalchemy<as>sa<def_stmt>upgrade # ### commands auto generated by Alembic - please adjust! ###
<block_start>op.add_column('users' sa.Column('default_notebook' sa.Integer() nullable=<true>))<line_sep># ### end Alembic commands ###
<block_end><def_stmt>downgrade # ### commands auto generated by Alembic - please adjust! ###
<block_start>op.drop_column('users' 'default_notebook')<line_sep># ### end Alembic commands ###
<block_end>
|
expected_output={"GigabitEthernet0/1/1":{"service_policy":{"output":{"policy_name":{"shape-out":{"class_map":{"class-default":{"bytes":0 "bytes_output":0 "match":["any"] "match_evaluation":"match-any" "no_buffer_drops":0 "packets":0 "pkts_output":0 "queue_depth":0 "queue_limit_packets":"64" "queueing":<true> "rate":{"drop_rate_bps":0 "interval":300 "offered_rate_bps":0 } "shape_bc_bps":2000 "shape_be_bps":2000 "shape_cir_bps":500000 "shape_type":"average" "target_shape_rate":500000 "total_drops":0 }}}}}}}}<line_sep>
|
<import_from_stmt>abc abstractmethod<import_from_stmt>typing Callable Sequence Tuple<import_stmt>numpy<as>np<import_from_stmt>starfish.core.imagestack.imagestack ImageStack<import_from_stmt>starfish.core.intensity_table.decoded_intensity_table DecodedIntensityTable<import_from_stmt>starfish.core.pipeline.algorithmbase AlgorithmBase<import_from_stmt>starfish.core.types Number<import_from_stmt>.combine_adjacent_features ConnectedComponentDecodingResult<class_stmt>DetectPixelsAlgorithm(metaclass=AlgorithmBase)<block_start>@abstractmethod<def_stmt>run self primary_image:ImageStack *args <arrow>Tuple[DecodedIntensityTable ConnectedComponentDecodingResult]<block_start>"""Finds spots in an ImageStack"""<line_sep><raise>NotImplementedError()<block_end>@staticmethod<def_stmt>_get_measurement_function measurement_type:str<arrow>Callable[[Sequence] Number]<block_start><try_stmt><block_start>measurement_function=getattr(np measurement_type)<block_end><except_stmt>AttributeError<block_start><raise>ValueError(f'measurement_type must be a numpy reduce function such as "max" or "mean". '<concat>f'{measurement_type} not found.')<block_end><return>measurement_function<block_end><block_end>
|
# -*- coding: utf-8 -*-
"""This module contains exceptions
"""<class_stmt>PublishError(RuntimeError)<block_start>"""Raised when the published version is not matching the quality
"""<line_sep><pass><block_end>
|
<import_stmt>os<line_sep># MXNET_CPU_WORKER_NTHREADS must be greater than 1 for custom op to work on CPU
#os.environ['MXNET_CPU_WORKER_NTHREADS'] = '2'
<import_stmt>mxnet<as>mx<line_sep># define metric of accuracy
<class_stmt>Accuracy(mx.metric.EvalMetric)<block_start><def_stmt>__init__ self num=<none><block_start>super(Accuracy self).__init__('accuracy' num)<block_end><def_stmt>update self labels preds<block_start>mx.metric.check_label_shapes(labels preds)<if_stmt>self.num<is><not><none><block_start><assert_stmt>len(labels)<eq>self.num<block_end>pred_label=mx.nd.argmax_channel(preds[0]).asnumpy().astype('int32')<line_sep>label=labels[0].asnumpy().astype('int32')<line_sep>mx.metric.check_label_shapes(label pred_label)<line_sep>self.sum_metric<augadd>(pred_label.flat<eq>label.flat).sum()<line_sep>self.num_inst<augadd>len(pred_label.flat)<block_end><block_end># define some metric of center_loss
<class_stmt>CenterLossMetric(mx.metric.EvalMetric)<block_start><def_stmt>__init__ self<block_start>super(CenterLossMetric self).__init__('center_loss')<block_end><def_stmt>update self labels preds<block_start>self.sum_metric<augadd>preds[1].asnumpy()[0]<line_sep>self.num_inst<augadd>1<block_end><block_end># see details:
# <A Discriminative Feature Learning Approach for Deep Face Recogfnition>
<class_stmt>CenterLoss(mx.operator.CustomOp)<block_start><def_stmt>__init__ self ctx shapes dtypes num_class alpha scale=1.0<block_start><if_stmt><not>len(shapes[0])<eq>2<block_start><raise>ValueError('dim for input_data shoudl be 2 for CenterLoss')<block_end>self.alpha=alpha<line_sep>self.batch_size=shapes[0][0]<line_sep>self.num_class=num_class<line_sep>self.scale=scale<block_end><def_stmt>forward self is_train req in_data out_data aux<block_start>labels=in_data[1].asnumpy()<line_sep>diff=aux[0]<line_sep>center=aux[1]<line_sep># store x_i - c_yi
<for_stmt>i range(self.batch_size)<block_start>diff[i]=in_data[0][i]-center[int(labels[i])]<block_end>loss=mx.nd.sum(mx.nd.square(diff))/self.batch_size/2<line_sep>self.assign(out_data[0] req[0] loss)<block_end><def_stmt>backward self req out_grad in_data out_data in_grad aux<block_start>diff=aux[0]<line_sep>center=aux[1]<line_sep>sum_=aux[2]<line_sep># back grad is just scale * ( x_i - c_yi)
grad_scale=float(self.scale/self.batch_size)<line_sep>self.assign(in_grad[0] req[0] diff<times>grad_scale)<line_sep># update the center
labels=in_data[1].asnumpy()<line_sep>label_occur=dict()<for_stmt>i,label enumerate(labels)<block_start>label_occur.setdefault(int(label) []).append(i)<block_end><for_stmt>label,sample_index label_occur.items()<block_start>sum_[:]=0<for_stmt>i sample_index<block_start>sum_=sum_+diff[i]<block_end>delta_c=sum_/(1+len(sample_index))<line_sep>center[label]<augadd>self.alpha<times>delta_c<block_end><block_end><block_end>@mx.operator.register("centerloss")<class_stmt>CenterLossProp(mx.operator.CustomOpProp)<block_start><def_stmt>__init__ self num_class alpha scale=1.0 batchsize=64<block_start>super(CenterLossProp self).__init__(need_top_grad=<false>)<line_sep># convert it to numbers
self.num_class=int(num_class)<line_sep>self.alpha=float(alpha)<line_sep>self.scale=float(scale)<line_sep>self.batchsize=int(batchsize)<block_end><def_stmt>list_arguments self<block_start><return>['data' 'label']<block_end><def_stmt>list_outputs self<block_start><return>['output']<block_end><def_stmt>list_auxiliary_states self# call them 'bias' for zero initialization
<block_start><return>['diff_bias' 'center_bias' 'sum_bias']<block_end><def_stmt>infer_shape self in_shape<block_start>data_shape=in_shape[0]<line_sep>label_shape=(in_shape[0][0] )<line_sep># store diff , same shape as input batch
diff_shape=[self.batchsize data_shape[1]]<line_sep># store the center of each class , should be ( num_class, d )
center_shape=[self.num_class diff_shape[1]]<line_sep># computation buf
sum_shape=[diff_shape[1] ]<line_sep>output_shape=[1 ]<line_sep><return>[data_shape label_shape] [output_shape] [diff_shape center_shape sum_shape]<block_end><def_stmt>create_operator self ctx shapes dtypes<block_start><return>CenterLoss(ctx shapes dtypes self.num_class self.alpha self.scale)<block_end><block_end>
|
<import_from_stmt>ethereum.tools tester<import_from_stmt>ethereum.utils sha3 normalize_address<line_sep>c=tester.Chain()<line_sep>x=c.contract(open('rando.v.py').read() language='vyper')<for_stmt>i range(10)<block_start>x.deposit(sender=tester.keys[i] value=(i+1)<times>10<power>15)<line_sep>c.mine(1)<block_end>o=[0]<times>10<for_stmt>i range(550)<block_start>addr=normalize_address(x.random_select(sha3(str(i))))<line_sep>o[tester.accounts.index(addr)]<augadd>1<block_end><for_stmt>i,v enumerate(o)<block_start>ev=10<times>(i+1)<if_stmt><not>ev-4<times>ev<power>0.5<l>v<l>ev+4<times>ev<power>0.5<block_start><raise>Exception("More than four standard deviations away; something is wrong: %.2f %d %.2f"%(ev-4<times>ev<power>0.5 v ev+4<times>ev<power>0.5))<block_end><block_end>print(o)<line_sep>
|
<import_stmt>unittest<import_from_stmt>demo.demo5 app<class_stmt>DatabaseTestCase(unittest.TestCase)<block_start>"""
测试数据库的添加i和删除,需要设置一个单独的数据库,不能使用真实的数据库
"""<def_stmt>setUp self<block_start>"""单元测试开始之前执行的操作"""<line_sep>app.config['TESTING']=<true><line_sep>app.config['SQLALCHEMY_DATABASE_URI']='mysql://root:mysql@localhost/test0'<line_sep>self.app=app<line_sep>db.create_all()<block_end><def_stmt>tearDown self<block_start>"""单元测试结束后执行的操作"""<line_sep># db.session类似于数据库的连接
db.session.remove()<line_sep>db.drop_all()<block_end><def_stmt>test_append_data self<block_start>""""""<line_sep>au=Author(name='itcast')<line_sep>bk=Book(info='python')<line_sep>db.session.add_all([au bk])<line_sep>db.session.commit()<line_sep>author=Author.query.filter_by(name='itcast').first()<line_sep>book=Book.query.filter_by(info='python').first()<line_sep>#断言数据存在
self.assertIsNotNone(author)<line_sep>self.assertIsNotNone(book)<block_end><block_end>
|
# -*- coding: future_fstrings -*-
<import_stmt>requests<import_stmt>json<import_stmt>urllib<import_stmt>re<import_from_stmt>requests.exceptions HTTPError<import_from_stmt>.import_class Import<class_stmt>Imports# url snippets
<block_start>groups_snippet='groups'<line_sep>imports_snippet='imports'<line_sep>dataset_displayname_snippet='datasetDisplayName'<line_sep>nameconflict_snippet='nameConflict'<def_stmt>__init__ self client<block_start>self.client=client<line_sep>self.base_url=f'{self.client.api_url}/{self.client.api_version_snippet}/{self.client.api_myorg_snippet}'<line_sep>self.upload_file_replace_regex=re.compile('(?![A-z]|[0-9]).')<block_end>@classmethod<def_stmt>import_from_response cls response<block_start>response_dict=json.loads(response.text)<line_sep><return>Import.from_dict(response_dict)<block_end>@classmethod<def_stmt>imports_from_response cls response<block_start>response_list=json.loads(response.text).get(Import.value_key)<line_sep><return>[Import.from_dict(x)<for>x response_list]<block_end><def_stmt>upload_file self filename dataset_displayname nameconflict=<none> group_id=<none><block_start><if_stmt>group_id<is><none><block_start>groups_part='/'<block_end><else_stmt><block_start>groups_part=f'/{self.groups_snippet}/{group_id}/'<block_end># substitute using the regex pattern
prepared_displayname=re.sub(self.upload_file_replace_regex '-' dataset_displayname)<line_sep># append the pbix extension (strange yes, but names correctly in powerbi service if so)
prepared_displayname=f'{prepared_displayname}.pbix'<line_sep>url=f'{self.base_url}{groups_part}{self.imports_snippet}'<concat>f'?{urllib.parse.urlencode({self.dataset_displayname_snippet:prepared_displayname})}'<if_stmt>nameconflict<is><not><none><block_start>url=url+f'&{self.nameconflict_snippet}={nameconflict}'<block_end>headers=self.client.auth_header<try_stmt><block_start><with_stmt>open(filename 'rb')<as>file_obj<block_start>response=requests.post(url headers=headers files={'file':file_obj })<block_end><block_end><except_stmt>TypeError# assume filename is a file-like object already
<block_start>response=requests.post(url headers=headers files={'file':filename })<block_end># 200 OK
<if_stmt>response.status_code<eq>200<block_start>import_object=self.import_from_response(response)<block_end># 202 Accepted
<elif_stmt>response.status_code<eq>202<block_start>import_object=self.import_from_response(response)<block_end># 490 Conflict (due to name)
<elif_stmt>response.status_code<eq>409<block_start><raise>NotImplementedError("Name conflict resolution not implemented yet")<block_end><else_stmt><block_start><raise>HTTPError(response f"Upload file failed with status code: {response.json()}")<block_end><return>import_object<block_end><def_stmt>get_import self import_id group_id=<none><block_start><if_stmt>group_id<is><none><block_start>groups_part='/'<block_end><else_stmt><block_start>groups_part=f'/{self.groups_snippet}/{group_id}/'<block_end>url=f'{self.base_url}{groups_part}{self.imports_snippet}/{import_id}'<line_sep>headers=self.client.auth_header<line_sep>response=requests.get(url headers=headers)<line_sep># 200 OK
<if_stmt>response.status_code<eq>200<block_start>import_object=self.import_from_response(response)<block_end><else_stmt><block_start><raise>HTTPError(response f"Get import failed with status code: {response.json()}")<block_end><return>import_object<block_end><def_stmt>get_imports self group_id=<none><block_start><if_stmt>group_id<is><none><block_start>groups_part='/'<block_end><else_stmt><block_start>groups_part=f'/{self.groups_snippet}/{group_id}/'<block_end>url=f'{self.base_url}{groups_part}{self.imports_snippet}'<line_sep>headers=self.client.auth_header<line_sep>response=requests.get(url headers=headers)<line_sep># 200 OK
<if_stmt>response.status_code<eq>200<block_start>import_object=self.imports_from_response(response)<block_end><else_stmt><block_start><raise>HTTPError(response f"Get imports failed with status code: {response.json()}")<block_end><return>import_object<block_end><block_end>
|
<import_from_stmt>contextlib contextmanager<import_stmt>os<import_from_stmt>pathlib Path<import_stmt>subprocess<import_from_stmt>jinja2 Environment FileSystemLoader StrictUndefined<import_from_stmt>ruamel.yaml YAML<import_from_stmt>sayn.database.creator create<as>create_db<line_sep>@contextmanager<def_stmt>inside_dir dirpath fs=dict()<block_start>"""
Execute code from inside the given directory
:param dirpath: String, path of the directory the command is being run.
"""<line_sep>old_path=os.getcwd()<try_stmt><block_start>os.chdir(dirpath)<for_stmt>filepath,content fs.items()<block_start>fpath=Path(filepath)<line_sep>fpath.parent.mkdir(parents=<true> exist_ok=<true>)<line_sep>fpath.write_text(content)<block_end><yield><block_end><finally_stmt><block_start>os.chdir(old_path)<block_end><block_end>@contextmanager<def_stmt>create_project dirpath settings=<none> project=<none> groups=dict() env=dict()<block_start>"""
Execute code from inside the given directory, creating the sayn project files
:param settings: String, yaml for a settings.yaml file
:param project: String, yaml for a project.yaml file
:param groups: Dict, dict of yaml for the contents of the tasks folder
"""<line_sep>old_path=os.getcwd()<try_stmt><block_start>os.chdir(dirpath)<if_stmt>settings<is><not><none><block_start>Path(dirpath "settings.yaml").write_text(settings)<block_end><if_stmt>project<is><not><none><block_start>Path(dirpath "project.yaml").write_text(project)<block_end><if_stmt>len(groups)<g>0<block_start><for_stmt>name,group groups.items()<block_start>Path(dirpath f"{name}.yaml").write_text(group)<block_end><block_end><if_stmt>len(env)<g>0<block_start>os.environ.update(env)<block_end><yield><block_end><finally_stmt><block_start>os.chdir(old_path)<for_stmt>k env.keys()<block_start><del_stmt>os.environ[k]<block_end><block_end><block_end><def_stmt>run_sayn *args<block_start><return>subprocess.check_output(f"sayn {' '.join(args)}" shell=<true> stderr=subprocess.STDOUT)<block_end># Task Simulators
# create empty tracker class to enable the run to go through
<class_stmt>VoidTracker<block_start><def_stmt>set_run_steps self steps<block_start><pass><block_end><def_stmt>start_step self step<block_start><pass><block_end><def_stmt>finish_current_step self<block_start><pass><block_end><block_end>vd=VoidTracker()<def_stmt>simulate_task task source_db=<none> target_db=<none> run_arguments=dict() task_params=dict()<block_start>task.name="test_task"# set for compilation output during run
task.group="test_group"# set for compilation output during run
task.run_arguments={"folders":{"sql":"sql" "compile":"compile"} "command":"run" "debug":<false> "full_load":<false> **run_arguments }<if_stmt>target_db<is><not><none><block_start>task.connections={"target_db":create_db("target_db" "target_db" target_db.copy())}<block_end><if_stmt>source_db<is><not><none><block_start>task.connections.update({"source_db":create_db("source_db" "source_db" source_db.copy())})<block_end>task._default_db="target_db"<line_sep>task.tracker=vd<line_sep>task.jinja_env=Environment(loader=FileSystemLoader(os.getcwd()) undefined=StrictUndefined keep_trailing_newline=<true> )<line_sep>task.jinja_env.globals.update(**task_params)<block_end><def_stmt>validate_table db table_name expected_data<block_start>result=db.read_data(f"select * from {table_name}")<if_stmt>len(result)<ne>len(expected_data)<block_start><return><false><block_end>result=sorted(result key=<lambda>x:list(x.values()))<line_sep>expected_data=sorted(expected_data key=<lambda>x:list(x.values()))<for_stmt>i range(len(result))<block_start><if_stmt>result[i]<ne>expected_data[i]<block_start><return><false><block_end><block_end><return><true><block_end>@contextmanager<def_stmt>tables_with_data db tables extra_tables=list()<block_start>tables_to_delete=extra_tables.copy()<for_stmt>table,data tables.items()<block_start><if_stmt>isinstance(table tuple)<block_start>schema=table[0]<line_sep>table=table[1]<line_sep>tables_to_delete.append(f"{schema}.{table}")<block_end><else_stmt><block_start>schema=<none><line_sep>tables_to_delete.append(table)<block_end>db.load_data(table data schema=schema replace=<true>)<block_end><try_stmt><block_start><yield><block_end><finally_stmt><block_start>clear_tables(db tables_to_delete)<block_end><block_end><def_stmt>clear_tables db tables<block_start><for_stmt>table tables<block_start><try_stmt><block_start>db.execute(f"DROP TABLE IF EXISTS {table}")<block_end><except_stmt><block_start><pass><block_end><try_stmt><block_start>db.execute(f"DROP VIEW IF EXISTS {table}")<block_end><except_stmt><block_start><pass><block_end><block_end><block_end>
|
# stdlib
<import_from_stmt>typing Any<import_from_stmt>typing List<line_sep># relative
<import_from_stmt>..manager TensorChainManager<import_from_stmt>.mpc_tensor MPCTensor<import_from_stmt>.utils ispointer<class_stmt>MPCTensorAncestor(TensorChainManager)<block_start><def_stmt>share self *parties:List[Any]<arrow>MPCTensor# relative
<block_start><import_from_stmt>.mpc_tensor MPCTensor<if_stmt>ispointer(self.child)<block_start><raise>ValueError("Cannot call share on a remote tensor. Use MPCTensor(remote_secret)")<block_end><return>MPCTensor(secret=self.child parties=list(parties))<block_end><block_end>
|
<import_from_stmt>glad.lang.common.loader BaseLoader<import_from_stmt>glad.lang.volt.loader LOAD_OPENGL_DLL<import_from_stmt>glad.lang.d.loader.gl _OPENGL_HAS_EXT<as>_D_OPENGL_HAS_EXT<line_sep>_OPENGL_LOADER=LOAD_OPENGL_DLL%{'pre':'private' 'init':'open_gl' 'proc':'get_proc' 'terminate':'close_gl'}+'''
bool gladLoadGL() {
StructToDg structToDg;
structToDg.func = cast(void*)get_proc;
auto dg = *cast(Loader*)&structToDg;
bool status = false;
if(open_gl()) {
status = gladLoadGL(dg);
close_gl();
}
return status;
}
'''<line_sep>_OPENGL_HAS_EXT=('global int GL_MAJOR = 0;\nglobal int GL_MINOR = 0;'+'\n'.join(l<for>l _D_OPENGL_HAS_EXT.replace('@nogc' '').splitlines()<if>'struct'<not><in>l).replace('GLVersion.major' 'GL_MAJOR')+'\n\n')<class_stmt>OpenGLVoltLoader(BaseLoader)<block_start><def_stmt>write_header_end self fobj<block_start><pass><block_end><def_stmt>write_header self fobj<block_start><pass><block_end><def_stmt>write self fobj<block_start>fobj.write('import watt.library;\n')<if_stmt><not>self.disabled<and>'gl'<in>self.apis<block_start>fobj.write(_OPENGL_LOADER)<block_end><block_end><def_stmt>write_begin_load self fobj<block_start>fobj.write('\tglGetString = cast(typeof(glGetString))load("glGetString");\n')<line_sep>fobj.write('\tif(glGetString is null) { return false; }\n')<line_sep>fobj.write('\tif(glGetString(GL_VERSION) is null) { return false; }\n\n')<block_end><def_stmt>write_end_load self fobj<block_start>fobj.write('\treturn GL_MAJOR != 0 || GL_MINOR != 0;\n')<block_end><def_stmt>write_find_core self fobj<block_start>fobj.write('\tconst(char)* v = cast(const(char)*)glGetString(GL_VERSION);\n')<line_sep>fobj.write('\tint major = v[0] - \'0\';\n')<line_sep>fobj.write('\tint minor = v[2] - \'0\';\n')<line_sep>fobj.write('\tGL_MAJOR = major; GL_MINOR = minor;\n')<block_end><def_stmt>write_has_ext self fobj<block_start>fobj.write(_OPENGL_HAS_EXT)<block_end><block_end>
|
# Copyright 2014-present PlatformIO <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>SCons.Script AlwaysBuild Import<line_sep>Import("env")<line_sep># Added in PIO Core 4.4.0
<if_stmt><not>hasattr(env "AddPlatformTarget")<block_start><def_stmt>AddPlatformTarget env name dependencies actions title=<none> description=<none> always_build=<true> <block_start>target=env.Alias(name dependencies actions)<if_stmt>always_build<block_start>AlwaysBuild(target)<block_end><return>target<block_end>env.AddMethod(AddPlatformTarget)<block_end>
|
# encoding: utf-8
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_stmt>view_tests_base<class_stmt>MetaStaticPagesViewTests(view_tests_base.ViewTestsBase)<block_start><def_stmt>setUp self<block_start>super(MetaStaticPagesViewTests self).setUp()<line_sep>self.data_generator.repo()<block_end><def_stmt>test_get_homepage self<block_start>resp=self.client.get('/' secure=<true>)<line_sep>self.assertTrue('You are now running Person Finder.'<in>resp.content)<line_sep># Legacy path for the homepage.
resp=self.client.get('/global/home.html' secure=<true>)<line_sep>self.assertTrue('You are now running Person Finder.'<in>resp.content)<block_end><def_stmt>test_get_responders_page self<block_start>resp=self.client.get('/global/responders.html' secure=<true>)<line_sep>self.assertTrue('Information for responders'<in>resp.content)<line_sep>resp=self.client.get('/global/responders.html?lang=ja' secure=<true>)<line_sep>self.assertTrue('災害対応者向け情報'<in>resp.content)<block_end><def_stmt>test_get_howto_page self<block_start>resp=self.client.get('/global/howto.html' secure=<true>)<line_sep>self.assertTrue('from a PC or mobile phone.'<in>resp.content)<line_sep>resp=self.client.get('/global/howto.html?lang=ja' secure=<true>)<line_sep>self.assertTrue('自分の安否を伝える'<in>resp.content)<block_end><block_end>
|
"""
Copyright (c) Facebook, Inc. and its affiliates.
"""<import_stmt>colorsys<import_stmt>random<import_stmt>numpy<as>np<import_stmt>base64<import_stmt>io<import_stmt>torch<import_stmt>torchvision<import_stmt>webcolors<import_from_stmt>PIL Image ImageDraw<import_from_stmt>torchvision transforms<import_from_stmt>torchvision.models.detection.faster_rcnn FastRCNNPredictor<import_from_stmt>torchvision.models.detection.mask_rcnn MaskRCNNPredictor<import_from_stmt>collections defaultdict<def_stmt>random_colors N bright=<true><block_start>"""Generate random colors.
To get visually distinct colors, generate them in HSV space then
convert to RGB.
"""<line_sep>brightness=1.0<if>bright<else>0.7<line_sep>hsv=[(i/N 1 brightness)<for>i range(N)]<line_sep>colors=list(map(<lambda>c:colorsys.hsv_to_rgb(*c) hsv))<line_sep>random.shuffle(colors)<line_sep><return>colors<block_end>colors=random_colors(20)<def_stmt>get_random_color <block_start><return>colors[0]<block_end><def_stmt>get_encoded_image file<block_start><with_stmt>open(file "rb")<as>image<block_start>f=base64.b64encode(image.read())<block_end><return>f<block_end><def_stmt>get_decoded_image fstr<block_start>image=base64.b64decode(fstr)<line_sep><return>Image.open(io.BytesIO(image)).convert("RGB")<block_end><def_stmt>get_np_decoded_image enc<block_start>a=base64.decodebytes(enc)<line_sep>b=np.frombuffer(a dtype=np.uint8)<line_sep>print("decoded pros {}, {}".format(type(b) b.shape))<line_sep><return>Image.fromarray(b "RGB")<block_end><def_stmt>draw_xyz img xyz cent<block_start>d=ImageDraw.Draw(img)<line_sep>d.text(cent str(xyz[0])+",\n"+str(xyz[1])+",\n"+str(xyz[2]) fill=(255 255 255))<line_sep>d.point(cent fill=(255 0 0))<line_sep><return>img<block_end><def_stmt>get_closest_color_name requested_colour<block_start>min_colours={}<for_stmt>key,name webcolors.CSS3_HEX_TO_NAMES.items()<block_start>r_c,g_c,b_c=webcolors.hex_to_rgb(key)<line_sep>rd=(r_c-requested_colour[0])<power>2<line_sep>gd=(g_c-requested_colour[1])<power>2<line_sep>bd=(b_c-requested_colour[2])<power>2<line_sep>min_colours[(rd+gd+bd)]=name<block_end><return>min_colours[min(min_colours.keys())]<block_end><def_stmt>get_color_tag img cent<block_start>x=int(cent[0])<line_sep>y=int(cent[1])<line_sep>color=get_closest_color_name(img.getpixel((x y)))<line_sep><return>color<block_end><def_stmt>get_coords masked img xyz centers# decode
# xyz is in row-major, centers corresponds to the xy for the image (which is in column major)
# xyz = base64.decodebytes(enc_xyz)
# coords = np.frombuffer(xyz, dtype=np.float64).reshape((4,-1))
<block_start>coords=np.around(xyz decimals=2)<line_sep># print("Decode success ? {} \n{}".format(coords.shape, coords[:, :5]))
# print("Image size {}".format(img.size))
# map each x,y to a an index into coords
# img = draw_xyz(img, [0,0,0], (0,0))
marker_dict=defaultdict(int)<line_sep>id=4<for_stmt>cent centers<block_start>xyz=coords[:3 cent[1]<times>img.size[0]+cent[0]]# what should this index be
# xyz = coords[:3, cent[0]*img.size[1] + cent[1]]
# what should this index be
# xyz = [xyz[1], xyz[0], xyz[2]]
marker_dict[id]={"position":[xyz[0] xyz[1] xyz[2]] "color":get_closest_color_name(img.getpixel((int(cent[0]) int(cent[1])))) }<line_sep>masked=draw_xyz(masked xyz cent)<line_sep>id<augadd>1<block_end># draw the coords on the img
<return>masked marker_dict<block_end>
|
<import_from_stmt>file_io.import_utils import_detections import_segmentations<import_from_stmt>file_io.export_utils export_tracking_result_in_mots_format<import_from_stmt>eval.mots_eval.eval run_mots_eval<import_from_stmt>eval.mots_eval.mots_common.io load_seqmap<import_from_stmt>config Config<import_from_stmt>tracker.tracked_sequence TrackedSequence<import_stmt>pycocotools.mask<as>cocomask<import_from_stmt>eval.mots_eval.mots_common.io load_txt<def_stmt>import_gt_file gt_path<block_start>objects_per_frame=load_txt(gt_path)<line_sep>print(sequence)<for_stmt>frame objects_per_frame.keys()<block_start><for_stmt>object objects_per_frame.get(frame)<block_start><if_stmt><not>object.track_id<eq>10000<block_start>track_id=(object.track_id%1000)<line_sep>det={'class':object.class_id}<while_stmt>tracks_gt.get_num_ids()<le>track_id<block_start>tracks_gt.add_empty_track()<block_end>tracks_gt.add_to_track(frame track_id det object.mask)<block_end><block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>config=Config('./configs/config_default')<line_sep>list_sequences,max_frames=load_seqmap(config.str('mots_seqmap_file'))<for_stmt>sequence list_sequences<block_start>tracks_gt=TrackedSequence(max_frames[sequence]+1)<line_sep>import_gt_file('./data/mots_gt/'+sequence+'.txt')<line_sep>raw_detections=import_detections(config sequence)<line_sep>segmentations=import_segmentations(config sequence)<line_sep>tracks_gt_seg=TrackedSequence(max_frames[sequence]+1)<while_stmt>max_frames[sequence]+1<g>len(raw_detections)<block_start>raw_detections.append([])<block_end><while_stmt>max_frames[sequence]+1<g>len(segmentations)<block_start>segmentations.append([])<block_end><for_stmt>step range(tracks_gt.timesteps)<block_start>combined_mask_per_frame={}<for_stmt>gt_id tracks_gt.get_active_tracks(step)<block_start>ref_mask=tracks_gt.get_mask(step gt_id decode=<false>)<line_sep>ref_det=tracks_gt.get_detection(step gt_id)<line_sep>ref_class=ref_det['class']<for_stmt>mask,det zip(segmentations[step] raw_detections[step])# mask based (MOTS)
<block_start>mask_iou=cocomask.area(cocomask.merge([mask ref_mask] intersect=<true>))/cocomask.area(cocomask.merge([mask ref_mask]))<if_stmt>mask_iou<g>0.5<block_start><while_stmt>tracks_gt_seg.get_num_ids()<le>gt_id<block_start>tracks_gt_seg.add_empty_track()<block_end>tracks_gt_seg.add_to_track(step gt_id det mask)<if_stmt>step<not><in>combined_mask_per_frame<block_start>combined_mask_per_frame[step]=mask<block_end><else_stmt><block_start>combined_mask_per_frame[step]=cocomask.merge([combined_mask_per_frame[step] mask] intersect=<false>)<block_end><block_end><block_end><block_end><block_end>tracks_gt_seg.fix_mask_overlap()<line_sep>export_tracking_result_in_mots_format(tracks_gt_seg './scripts/gt_mots_eval/'+sequence+'/')<block_end>run_mots_eval('./scripts/gt_mots_eval/' list_sequences config.dir('mots_gt_folder') config.str('mots_seqmap_file'))<block_end>
|
<import_stmt>collections<import_stmt>warnings<import_from_stmt>functools partial<import_stmt>torch<import_from_stmt>torch.testing._internal.common_cuda TEST_CUDA <import_from_stmt>torch.testing._internal.common_dtype all_types_and_complex_and all_types_and_complex all_types_and_half all_types complex_types floating_and_complex_types floating_types_and_half floating_types integral_types floating_types_and floating_and_complex_types_and integral_types_and all_types_and _dispatch_dtypes <line_sep>COMPLETE_DTYPES_DISPATCH=(all_types all_types_and_complex all_types_and_half floating_types floating_and_complex_types floating_types_and_half integral_types complex_types )<line_sep>EXTENSIBLE_DTYPE_DISPATCH=(all_types_and_complex_and floating_types_and floating_and_complex_types_and integral_types_and all_types_and )<line_sep># Better way to acquire devices?
DEVICES=['cpu']+(['cuda']<if>TEST_CUDA<else>[])<class_stmt>_dynamic_dispatch_dtypes(_dispatch_dtypes)# Class to tag the dynamically generated types.
<block_start><pass><block_end><def_stmt>get_supported_dtypes op sample_inputs_fn device_type# Returns the supported dtypes for the given operator and device_type pair.
<block_start><assert_stmt>device_type<in>['cpu' 'cuda']<if_stmt><not>TEST_CUDA<and>device_type<eq>'cuda'<block_start>warnings.warn("WARNING: CUDA is not available, empty_dtypes dispatch will be returned!")<line_sep><return>_dynamic_dispatch_dtypes(())<block_end>supported_dtypes=set()<for_stmt>dtype all_types_and_complex_and(torch.bool torch.bfloat16 torch.half)<block_start><try_stmt><block_start>samples=sample_inputs_fn(op device_type dtype <false>)<block_end><except_stmt>RuntimeError# If `sample_inputs_fn` doesn't support sampling for a given
# `dtype`, we assume that the `dtype` is not supported.
# We raise a warning, so that user knows that this was the case
# and can investigate if there was an issue with the `sample_inputs_fn`.
<block_start>warnings.warn(f"WARNING: Unable to generate sample for device:{device_type} and dtype:{dtype}")<line_sep><continue><block_end># We assume the dtype is supported
# only if all samples pass for the given dtype.
supported=<true><for_stmt>sample samples<block_start><try_stmt><block_start>op(sample.input *sample.args **sample.kwargs)<block_end><except_stmt>RuntimeError<as>re# dtype is not supported
<block_start>supported=<false><line_sep><break><block_end><block_end><if_stmt>supported<block_start>supported_dtypes.add(dtype)<block_end><block_end><return>_dynamic_dispatch_dtypes(supported_dtypes)<block_end><def_stmt>dtypes_dispatch_hint dtypes# Function returns the appropriate dispatch function (from COMPLETE_DTYPES_DISPATCH and EXTENSIBLE_DTYPE_DISPATCH)
# and its string representation for the passed `dtypes`.
<block_start>return_type=collections.namedtuple('return_type' 'dispatch_fn dispatch_fn_str')<line_sep># CUDA is not available, dtypes will be empty.
<if_stmt>len(dtypes)<eq>0<block_start><return>return_type(() str(tuple()))<block_end>set_dtypes=set(dtypes)<for_stmt>dispatch COMPLETE_DTYPES_DISPATCH# Short circuit if we get an exact match.
<block_start><if_stmt>set(dispatch())<eq>set_dtypes<block_start><return>return_type(dispatch dispatch.__name__+"()")<block_end><block_end>chosen_dispatch=<none><line_sep>chosen_dispatch_score=0.<for_stmt>dispatch EXTENSIBLE_DTYPE_DISPATCH<block_start>dispatch_dtypes=set(dispatch())<if_stmt><not>dispatch_dtypes.issubset(set_dtypes)<block_start><continue><block_end>score=len(dispatch_dtypes)<if_stmt>score<g>chosen_dispatch_score<block_start>chosen_dispatch_score=score<line_sep>chosen_dispatch=dispatch<block_end><block_end># If user passed dtypes which are lower than the lowest
# dispatch type available (not likely but possible in code path).
<if_stmt>chosen_dispatch<is><none><block_start><return>return_type(() str(dtypes))<block_end><return>return_type(partial(dispatch *tuple(set(dtypes)-set(dispatch()))) dispatch.__name__+str(tuple(set(dtypes)-set(dispatch()))))<block_end><def_stmt>is_dynamic_dtype_set op# Detect if the OpInfo entry acquired dtypes dynamically
# using `get_supported_dtypes`.
<block_start><return>op.dynamic_dtypes<block_end><def_stmt>str_format_dynamic_dtype op<block_start>fmt_str="""
OpInfo({name},
dtypes={dtypes},
dtypesIfCUDA={dtypesIfCUDA},
)
""".format(name=op.name dtypes=dtypes_dispatch_hint(op.dtypes).dispatch_fn_str dtypesIfCUDA=dtypes_dispatch_hint(op.dtypesIfCUDA).dispatch_fn_str)<line_sep><return>fmt_str<block_end>
|
<import_from_future_stmt> unicode_literals<line_sep># This file is automatically generated via sphinx-me
<import_from_stmt>sphinx_me setup_conf<line_sep>setup_conf(globals())<line_sep>
|
<import_from_stmt>collections OrderedDict<import_from_stmt>django.db models<import_from_stmt>django.db.models Sum<import_from_stmt>django.db.models.signals post_save<import_from_stmt>django.utils timezone<import_from_stmt>django.dispatch receiver<import_from_stmt>django.contrib.auth.models AbstractUser<import_from_stmt>django.utils.functional cached_property<import_from_stmt>.castable CastableModel<import_from_stmt>.fields PriceField AmountField PercentField<line_sep>###############################################################################
# User models
###############################################################################
<class_stmt>User(AbstractUser)<block_start>"""Cointrol user"""<class_stmt>Meta<block_start>db_table='user'<block_end>@property<def_stmt>account self# TODO: support multiple Bitstamp accounts per user
<block_start><return>self.accounts.get()<block_end><block_end><class_stmt>Account(models.Model)<block_start>"""Bitstamp account"""<line_sep>user=models.ForeignKey(User related_name='accounts')<line_sep>username=models.CharField(max_length=255 blank=<true> help_text='Bitstamp login number')<line_sep>api_key=models.CharField(max_length=255 blank=<true>)<line_sep>api_secret=models.CharField(max_length=255 blank=<true>)<line_sep>created=models.DateTimeField(auto_now_add=<true>)<line_sep>updated=models.DateTimeField(auto_now=<true>)<class_stmt>Meta<block_start>db_table='account'<block_end><def_stmt>__str__ self<block_start><return>'account for {}'.format(self.user)<block_end><def_stmt>get_active_trading_session self<block_start>"""
Return the current `ACTIVE` and unfinished `TradingSession`, or `None`.
This is the exclusive method to get the session. It has
side-effects in the form of changing status from
`ACTIVE` to `FINISHED`, and from `QUEUED` to `ACTIVE` before
return a session.
"""<line_sep>ACTIVE,QUEUED,FINISHED=(TradingSession.ACTIVE TradingSession.QUEUED TradingSession.FINISHED)<try_stmt><block_start>session=self.trading_sessions.get(status=ACTIVE)<block_end><except_stmt>TradingSession.DoesNotExist<block_start><try_stmt><block_start>session=self.trading_sessions.filter(status=QUEUED).earliest()<block_end><except_stmt>TradingSession.DoesNotExist<block_start>session=<none><block_end><block_end><while_stmt>session<block_start><if_stmt>session.status<eq>FINISHED<block_start>session=<none><block_end><elif_stmt>session.status<eq>QUEUED<block_start>session.set_status(ACTIVE)<block_end><elif_stmt>session.status<eq>ACTIVE<block_start><if_stmt><not>session.is_finished()<block_start><return>session<block_end><else_stmt><block_start>session.set_status(FINISHED)<try_stmt><block_start>session=session.get_previous_by_created(account=self)<block_end><except_stmt>TradingSession.DoesNotExist<block_start>session=<none><block_end><block_end><block_end><else_stmt><block_start><raise>TypeError('invalid session status: pk={}, {!r}'.format(session.pk session.status))<block_end><block_end><block_end><block_end>###############################################################################
# Trading
###############################################################################
<class_stmt>TradingStrategyProfile(CastableModel)<block_start>"""Base trading strategy configuration models class."""<line_sep>note=models.CharField(max_length=255 blank=<true>)<line_sep>account=models.ForeignKey(Account)<line_sep>created=models.DateTimeField(auto_now_add=<true>)<line_sep>updated=models.DateTimeField(auto_now=<true>)<line_sep>@property<def_stmt>type_name self<block_start><if_stmt>type(self)<eq>TradingStrategyProfile<block_start><return>self.cast().type_name<block_end><return>type(self).__name__<block_end><def_stmt>__str__ self<block_start><return>str(self.cast())<block_end><block_end><class_stmt>RelativeStrategyProfile(TradingStrategyProfile)<block_start>"""Configuration for relative trading strategy."""<line_sep>buy=PercentField()<line_sep>sell=PercentField()<class_stmt>Meta<block_start>db_table='strategy_profile_relative'<block_end><def_stmt>__str__ self<block_start><return>'relative buy at {buy}%, sell at ${sell}%'.format(buy=self.buy sell=self.sell)<block_end><def_stmt>save self *args **kwargs<block_start>min_fee=.2<assert_stmt>self.buy<l>100-min_fee<assert_stmt>self.sell<g>100+min_fee<line_sep><return>super().save(*args **kwargs)<block_end><block_end><class_stmt>FixedStrategyProfile(TradingStrategyProfile)<block_start>"""Configuration for fixed trading strategy."""<line_sep>buy=PriceField()<line_sep>sell=PriceField()<class_stmt>Meta<block_start>db_table='strategy_profile_fixed'<block_end><def_stmt>__str__ self<block_start><return>'fixed buy at ${buy}, sell at ${sell}'.format(buy=self.buy sell=self.sell)<block_end><block_end><class_stmt>TradingSession(models.Model)<block_start>QUEUED,ACTIVE,FINISHED='queued' 'active' 'finished'<line_sep>STATUSES=[QUEUED ACTIVE FINISHED]<line_sep>created=models.DateTimeField(auto_now_add=<true>)<line_sep>updated=models.DateTimeField(auto_now=<true>)<line_sep>account=models.ForeignKey(Account related_name='trading_sessions')<line_sep>status=models.CharField(choices=zip(STATUSES STATUSES) max_length=255 db_index=<true>)<line_sep>became_active=models.DateTimeField(null=<true> blank=<true>)<line_sep>became_finished=models.DateTimeField(null=<true> blank=<true>)<line_sep>note=models.CharField(max_length=255 blank=<true>)<line_sep>strategy_profile=models.ForeignKey(TradingStrategyProfile)<line_sep># None - no limit; 1 - one repeat left; 0 - done
repeat_times=models.PositiveSmallIntegerField(default=<none> null=<true> blank=<true>)<line_sep># None - no limit
repeat_until=models.DateTimeField(null=<true> blank=<true>)<class_stmt>Meta<block_start>db_table='trading_session'<line_sep>ordering=['-created']<line_sep>get_latest_by='created'<block_end><def_stmt>__str__ self<block_start><return>'{status} session with {strategy}'.format(status=self.status strategy=self.strategy_profile )<block_end><def_stmt>set_status self status<block_start><if_stmt>status<eq>self.ACTIVE<block_start><assert_stmt>self.status<eq>self.QUEUED<assert_stmt>self.became_active<is><none><assert_stmt>self.became_finished<is><none><line_sep>self.became_active=timezone.now()<block_end><elif_stmt>status<eq>self.FINISHED<block_start><assert_stmt>self.status<eq>self.ACTIVE<assert_stmt>self.became_active<is><not><none><assert_stmt>self.became_finished<is><none><line_sep>self.became_finished=timezone.now()<block_end>self.status=status<line_sep>self.save()<block_end>@cached_property<def_stmt>profile self<block_start>"""Accessor for casted strategy profile."""<line_sep><return>self.strategy_profile.cast()<block_end><def_stmt>is_expired self<block_start><return>(self.repeat_until<is><not><none><and>self.repeat_until<g>timezone.now())<block_end><def_stmt>is_done self<block_start><return>(self.repeat_times<is><not><none><and>self.repeat_times<ge>self.orders.count())<block_end><def_stmt>is_finished self<block_start><return>self.is_expired()<or>self.is_done()<block_end><block_end>###############################################################################
# Bitstamp API-based models
# https://www.bitstamp.net/api/
###############################################################################
<class_stmt>Ticker(models.Model)<block_start>"""
{
high: "704.00",
last: "678.57",
timestamp: "1393958158",
bid: "678.49",
vwap: "677.88",
volume: "39060.90623024",
low: "633.64",
ask: "678.57"
}
"""<line_sep>timestamp=models.DateTimeField()<line_sep>volume=AmountField()<line_sep>vwap=PriceField()<line_sep>last=PriceField()<line_sep>high=PriceField()<line_sep>low=PriceField()<line_sep>bid=PriceField()<line_sep>ask=PriceField()<line_sep>open=PriceField()<class_stmt>Meta<block_start>ordering=['-timestamp']<line_sep>get_latest_by='timestamp'<line_sep>db_table='bitstamp_ticker'<block_end><def_stmt>__str__ self<block_start><return>'last={last}, timestamp={timestamp}'.format(**self.__dict__)<block_end><block_end><class_stmt>Balance(models.Model)<block_start>"""
usd_balance - USD balance
btc_balance - BTC balance
usd_reserved - USD reserved in open orders
btc_reserved - BTC reserved in open orders
usd_available- USD available for trading
btc_available - BTC available for trading
fee - customer trading fee
"""<line_sep>created=models.DateTimeField(auto_now_add=<true>)<line_sep>account=models.ForeignKey(Account related_name='balances')<line_sep>inferred=models.BooleanField(default=<false>)<line_sep>timestamp=models.DateTimeField()<line_sep># API fields
fee=PercentField()<line_sep>usd_balance=AmountField()<line_sep>btc_balance=AmountField()<line_sep>usd_reserved=AmountField()<line_sep>btc_reserved=AmountField()<line_sep>btc_available=AmountField()<line_sep>usd_available=AmountField()<line_sep>eur_balance=AmountField()<line_sep>xrp_balance=AmountField()<line_sep>eur_reserved=AmountField()<line_sep>xrp_reserved=AmountField()<line_sep>eur_available=AmountField()<line_sep>xrp_available=AmountField()<class_stmt>Meta<block_start>get_latest_by='timestamp'<line_sep>ordering=['-timestamp']<line_sep>db_table='bitstamp_balance'<block_end><def_stmt>__str__ self<block_start><return>'{usd:0>6} US$ | {btc:0>10} BTC'.format(usd=self.usd_balance btc=self.btc_balance)<block_end><block_end><class_stmt>Order(models.Model)<block_start>OPEN,CANCELLED,PROCESSED='open' 'cancelled' 'processed'<line_sep>STATUSES=[OPEN CANCELLED PROCESSED]<line_sep>BUY,SELL=0 1<line_sep>TYPES=OrderedDict([(BUY 'buy') (SELL 'sell')])<line_sep>updated=models.DateTimeField(auto_now=<true>)<line_sep>account=models.ForeignKey(Account related_name='orders')<line_sep>balance=models.ForeignKey(Balance null=<true> on_delete=models.PROTECT)<line_sep>total=AmountField()<line_sep>status=models.CharField(default=<none> choices=zip(STATUSES STATUSES) max_length=255 db_index=<true>)<line_sep>status_changed=models.DateTimeField(null=<true> blank=<true>)<line_sep>trading_session=models.ForeignKey(TradingSession null=<true> on_delete=models.SET_NULL related_name='orders')<line_sep># API fields.
price=PriceField()<line_sep>amount=AmountField()<line_sep>type=models.IntegerField(choices=[(BUY 'buy') (SELL 'sell')] db_index=<true>)<line_sep>datetime=models.DateTimeField()<def_stmt>__str__ self<block_start><return>'{type} {amount} BTC at {price} US$'.format(type=self.get_type_display() amount=self.amount price=self.price)<block_end><class_stmt>Meta<block_start>ordering=['-datetime']<line_sep>get_latest_by='datetime'<line_sep>db_table='bitstamp_order'<block_end><block_end><class_stmt>Transaction(models.Model)<block_start>DEPOSIT,WITHDRAWAL,MARKET_TRADE=0 1 2<line_sep>TYPES=[DEPOSIT WITHDRAWAL MARKET_TRADE]<line_sep># MARKET_TRADE subtypes
SELL,BUY='sell' 'buy'<line_sep>balance=models.ForeignKey(Balance on_delete=models.PROTECT)<line_sep>account=models.ForeignKey(Account related_name='transactions')<line_sep>updated=models.DateTimeField(auto_now=<true>)<line_sep># API fields.
datetime=models.DateTimeField()<line_sep>btc=AmountField()<line_sep>usd=AmountField()<line_sep>fee=AmountField()<line_sep>btc_usd=PriceField()<line_sep>order=models.ForeignKey(Order related_name='transactions' null=<true>)<line_sep>type=models.PositiveSmallIntegerField(db_index=<true> choices=[(DEPOSIT 'deposit') (WITHDRAWAL 'withdrawal') (MARKET_TRADE 'trade') ])<class_stmt>Meta<block_start>ordering=['-datetime']<line_sep>get_latest_by='datetime'<line_sep>db_table='bitstamp_transaction'<block_end><def_stmt>__str__ self<block_start><return>'${usd} | {btc} BTC'.format(usd=self.usd btc=self.btc)<block_end>@property<def_stmt>trade_type self<block_start><if_stmt>self.type<eq>Transaction.MARKET_TRADE<block_start><return>Transaction.SELL<if>self.usd<g>0<else>Transaction.BUY<block_end><block_end><def_stmt>save self *args **kwargs<block_start><if_stmt><not>self.balance_id<block_start>self._create_balance()<block_end><return>super().save(*args **kwargs)<block_end><def_stmt>_create_balance self<block_start><assert_stmt><not>self.balance_id<line_sep>older=self.account.transactions.filter(datetime__lte=self.datetime)<line_sep>aggregate=({'usd':0 'btc':0 'fee':0}<if><not>older.exists()<else>older.aggregate(usd=Sum('usd') btc=Sum('btc') fee=Sum('fee')))<line_sep># Reflect current transaction as well.
aggregate['usd']<augadd>self.usd<line_sep>aggregate['fee']<augadd>self.fee<line_sep>aggregate['btc']<augadd>self.btc<line_sep>self.balance=self.account.balances.create(inferred=<true> timestamp=self.datetime usd_balance=aggregate['usd']-aggregate['fee'] btc_balance=aggregate['btc'] fee=0 )<block_end><block_end>###############################################################################
# Signal listeners
###############################################################################
# noinspection PyUnusedLocal
@receiver(post_save sender=User)<def_stmt>create_default_account instance created **kwargs<block_start><if_stmt>created<block_start>instance.accounts.create()<block_end><block_end>
|
<import_stmt>matplotlib.pyplot<as>plt<line_sep># STEP 2
LEGEND=('ProductA' 'ProductB' 'ProductC')<line_sep>DATA=(('Q1 2017' 100 30 3) ('Q2 2017' 105 32 15) ('Q3 2017' 125 29 40) ('Q4 2017' 115 31 80) )<line_sep># STEP 3
POS=list(range(len(DATA)))<line_sep>VALUESA=[valueA<for>label,valueA,valueB,valueC DATA]<line_sep>VALUESB=[valueB<for>label,valueA,valueB,valueC DATA]<line_sep>VALUESC=[valueC<for>label,valueA,valueB,valueC DATA]<line_sep>LABELS=[label<for>label,valueA,valueB,valueC DATA]<line_sep># STEP 4
WIDTH=0.2<line_sep>valueA=plt.bar([p-WIDTH<for>p POS] VALUESA width=WIDTH)<line_sep>valueB=plt.bar([p<for>p POS] VALUESB width=WIDTH)<line_sep>valueC=plt.bar([p+WIDTH<for>p POS] VALUESC width=WIDTH)<line_sep>plt.ylabel('Sales')<line_sep>plt.xticks(POS LABELS)<line_sep># STEP 5
plt.annotate('400% growth' xy=(1.2 18) xytext=(1.3 40) horizontalalignment='center' fontsize=9 arrowprops={'facecolor':'black' 'arrowstyle':"fancy" 'connectionstyle':"angle3" })<line_sep># STEP 6
# Draw the legend outside the plot
plt.legend(LEGEND title='Products' bbox_to_anchor=(1 0.8))<line_sep>plt.subplots_adjust(right=0.80)<line_sep># STEP 6
plt.show()<line_sep>
|
<import_stmt>FWCore.ParameterSet.Config<as>cms<import_from_stmt>PhysicsTools.NanoAOD.common_cff *<import_from_stmt>PhysicsTools.NanoAOD.nano_eras_cff *<line_sep>btagSFdir="PhysicsTools/NanoAOD/data/btagSF/"<line_sep>btagWeightTable=cms.EDProducer("BTagSFProducer" src=cms.InputTag("linkedObjects" "jets") cut=cms.string("pt > 25. && abs(eta) < 2.5") discNames=cms.vstring("pfCombinedInclusiveSecondaryVertexV2BJetTags" "pfDeepCSVJetTags:probb+pfDeepCSVJetTags:probbb" #if multiple MiniAOD branches need to be summed up (e.g., DeepCSV b+bb), separate them using '+' delimiter
"pfCombinedMVAV2BJetTags") discShortNames=cms.vstring("CSVV2" "DeepCSVB" "CMVA") weightFiles=cms.vstring(#default settings are for 2017 94X. toModify function is called later for other eras.
btagSFdir+"CSVv2_94XSF_V2_B_F.csv" btagSFdir+"DeepCSV_94XSF_V2_B_F.csv" "unavailable"#if SFs for an algorithm in an era is unavailable, the corresponding branch will not be stored
) operatingPoints=cms.vstring("3" "3" "3") #loose = 0, medium = 1, tight = 2, reshaping = 3
measurementTypesB=cms.vstring("iterativefit" "iterativefit" "iterativefit") #e.g. "comb", "incl", "ttbar", "iterativefit"
measurementTypesC=cms.vstring("iterativefit" "iterativefit" "iterativefit") measurementTypesUDSG=cms.vstring("iterativefit" "iterativefit" "iterativefit") sysTypes=cms.vstring("central" "central" "central"))<for_stmt>modifier run2_miniAOD_80XLegacy run2_nanoAOD_94X2016# to be updated when SF for Summer16MiniAODv3 MC will be available
<block_start>modifier.toModify(btagWeightTable cut=cms.string("pt > 25. && abs(eta) < 2.4") #80X corresponds to 2016, |eta| < 2.4
weightFiles=cms.vstring(#80X corresponds to 2016 SFs
btagSFdir+"CSVv2_Moriond17_B_H.csv" "unavailable" btagSFdir+"cMVAv2_Moriond17_B_H.csv"))<block_end>
|
# @Time : 2021/4/19
# @Author : <NAME>
# @Email : <EMAIL>
"""
textbox.evaluator.meteor_evaluator
#######################################
"""<import_stmt>numpy<as>np<import_from_stmt>nltk.translate.meteor_score meteor_score<import_from_stmt>textbox.evaluator.abstract_evaluator AbstractEvaluator<class_stmt>MeteorEvaluator(AbstractEvaluator)<block_start><def_stmt>_preprocess self input_sentence<block_start><return>" ".join(input_sentence)<block_end><def_stmt>_calc_metrics_info self generate_corpus reference_corpus<block_start>generate_corpus=[self._preprocess(generate_sentence)<for>generate_sentence generate_corpus]<line_sep>reference_corpus=[self._preprocess(reference_sentence)<for>reference_sentence reference_corpus]<line_sep>reference_corpus=[[reference_sentence]<for>reference_sentence reference_corpus]<line_sep>result={}<line_sep>scores=[]<for_stmt>gen,refs zip(generate_corpus reference_corpus)<block_start>score=meteor_score(refs gen)<line_sep>scores.append(score)<block_end>result['meteor']=scores<line_sep><return>result<block_end><block_end>
|
<import_from_stmt>urllib.parse urlparse<import_stmt>requests<import_from_stmt>django.contrib admin<import_from_stmt>django.contrib.sites.shortcuts get_current_site<import_from_stmt>django.forms Media widgets<import_from_stmt>django.db.models Q<import_from_stmt>django.http JsonResponse HttpResponseForbidden HttpResponseNotFound<import_from_stmt>django.urls re_path reverse<import_from_stmt>django.utils.translation get_language_from_request<import_from_stmt>cms.models.pagemodel Page<import_from_stmt>cms.extensions PageExtensionAdmin<import_from_stmt>cms.utils.page get_page_from_path<import_from_stmt>cmsplugin_cascade.models CascadePage IconFont<import_from_stmt>cmsplugin_cascade.link.forms format_page_link<line_sep>@admin.register(CascadePage)<class_stmt>CascadePageAdmin(PageExtensionAdmin)<block_start>add_form_template=change_form_template='cascade/admin/change_form.html'<line_sep>fields=['icon_font' 'menu_symbol']<line_sep>@property<def_stmt>media self<block_start>media=super().media<line_sep>media<augadd>Media(css={'all':['cascade/css/admin/cascadepage.css']} js=['admin/js/jquery.init.js' 'cascade/js/admin/cascadepage.js'])<line_sep><return>media<block_end><def_stmt>get_form self request obj=<none> **kwargs<block_start>options=dict(kwargs widgets={'menu_symbol':widgets.HiddenInput})<line_sep>ModelForm=super().get_form(request obj **options)<line_sep><return>ModelForm<block_end><def_stmt>get_urls self<block_start>urls=[re_path(r'^get_page_sections/$' <lambda>_:JsonResponse({'element_ids':[]}) name='get_page_sections') # just to reverse
re_path(r'^get_page_sections/(?P<page_pk>\d+)$' self.admin_site.admin_view(self.get_page_sections)) re_path(r'^published_pages/$' self.get_published_pagelist name='get_published_pagelist') re_path(r'^fetch_fonticons/(?P<iconfont_id>[0-9]+)$' self.fetch_fonticons) re_path(r'^fetch_fonticons/$' self.fetch_fonticons name='fetch_fonticons') re_path(r'^validate_exturl/$' self.validate_exturl name='validate_exturl') ]<line_sep>urls.extend(super().get_urls())<line_sep><return>urls<block_end><def_stmt>get_page_sections self request page_pk=<none><block_start>choices=[]<try_stmt><block_start>extended_glossary=self.model.objects.get(extended_object_id=page_pk).glossary<for_stmt>key,val extended_glossary['element_ids'].items()<block_start>choices.append((key val))<block_end><block_end><except_stmt>(self.model.DoesNotExist KeyError)<block_start><pass><block_end><return>JsonResponse({'element_ids':choices})<block_end><def_stmt>get_published_pagelist self request *args **kwargs<block_start>"""
This view is used by the SearchLinkField as the user types to feed the autocomplete drop-down.
"""<if_stmt><not>request.is_ajax()<block_start><return>HttpResponseForbidden()<block_end>data={'results':[]}<line_sep>language=get_language_from_request(request)<line_sep>query_term=request.GET.get('term')<if_stmt><not>query_term<block_start><return>JsonResponse(data)<block_end># first, try to resolve by URL if it points to a local resource
parse_result=urlparse(query_term)<if_stmt>parse_result.netloc.split(':')[0]<eq>request.META['HTTP_HOST'].split(':')[0]<block_start>site=get_current_site(request)<line_sep>path=parse_result.path.lstrip(reverse('pages-root')).rstrip('/')<line_sep>page=get_page_from_path(site path)<if_stmt>page<block_start>data['results'].append(self.get_result_set(language page))<line_sep><return>JsonResponse(data)<block_end><block_end># otherwise resolve by search term
matching_published_pages=Page.objects.published().public().filter(Q(title_set__title__icontains=query_term title_set__language=language)|Q(title_set__path__icontains=query_term title_set__language=language)|Q(title_set__menu_title__icontains=query_term title_set__language=language)|Q(title_set__page_title__icontains=query_term title_set__language=language)).distinct().order_by('title_set__title').iterator()<for_stmt>page matching_published_pages<block_start>data['results'].append(self.get_result_set(language page))<if_stmt>len(data['results'])<g>15<block_start><break><block_end><block_end><return>JsonResponse(data)<block_end><def_stmt>get_result_set self language page<block_start>title=page.get_title(language=language)<line_sep>path=page.get_absolute_url(language=language)<line_sep><return>{'id':page.pk 'text':format_page_link(title path) }<block_end><def_stmt>fetch_fonticons self request iconfont_id=<none><block_start><try_stmt><block_start>icon_font=IconFont.objects.get(id=iconfont_id)<block_end><except_stmt>IconFont.DoesNotExist<block_start><return>HttpResponseNotFound("IconFont with id={} does not exist".format(iconfont_id))<block_end><else_stmt><block_start>data=dict(icon_font.config_data)<line_sep>data.pop('glyphs' <none>)<line_sep>data['families']=icon_font.get_icon_families()<line_sep><return>JsonResponse(data)<block_end><block_end><def_stmt>validate_exturl self request<block_start>"""
Perform a GET request onto the given external URL and return its status.
"""<line_sep>exturl=request.GET.get('exturl')<line_sep>request_headers={'User-Agent':'Django-CMS-Cascade'}<try_stmt><block_start>response=requests.get(exturl allow_redirects=<true> headers=request_headers)<block_end><except_stmt>Exception<block_start><return>JsonResponse({'status_code':500})<block_end><else_stmt><block_start><return>JsonResponse({'status_code':response.status_code})<block_end><block_end><def_stmt>changeform_view self request object_id=<none> form_url='' extra_context=<none><block_start>extra_context=dict(extra_context<or>{} icon_fonts=IconFont.objects.all())<line_sep><return>super().changeform_view(request object_id=object_id form_url=form_url extra_context=extra_context)<block_end><block_end>
|
<import_from_stmt>.generate ResourseGenerator RESOURCES_PATH<line_sep>
|
<import_from_stmt>flashtext KeywordProcessor<import_stmt>logging<import_stmt>unittest<import_stmt>json<line_sep>logger=logging.getLogger(__name__)<class_stmt>TestKeywordExtractor(unittest.TestCase)<block_start><def_stmt>setUp self<block_start>logger.info("Starting...")<with_stmt>open('test/keyword_extractor_test_cases.json')<as>f<block_start>self.test_cases=json.load(f)<block_end><block_end><def_stmt>tearDown self<block_start>logger.info("Ending.")<block_end><def_stmt>test_extract_keywords self<block_start>"""For each of the test case initialize a new KeywordProcessor.
Add the keywords the test case to KeywordProcessor.
Extract keywords and check if they match the expected result for the test case.
"""<for_stmt>test_id,test_case enumerate(self.test_cases)<block_start>keyword_processor=KeywordProcessor()<line_sep>keyword_processor.add_keywords_from_dict(test_case['keyword_dict'])<line_sep>keywords_extracted=keyword_processor.extract_keywords(test_case['sentence'])<line_sep>self.assertEqual(keywords_extracted test_case['keywords'] "keywords_extracted don't match the expected results for test case: {}".format(test_id))<block_end><block_end><def_stmt>test_extract_keywords_case_sensitive self<block_start>"""For each of the test case initialize a new KeywordProcessor.
Add the keywords the test case to KeywordProcessor.
Extract keywords and check if they match the expected result for the test case.
"""<for_stmt>test_id,test_case enumerate(self.test_cases)<block_start>keyword_processor=KeywordProcessor(case_sensitive=<true>)<line_sep>keyword_processor.add_keywords_from_dict(test_case['keyword_dict'])<line_sep>keywords_extracted=keyword_processor.extract_keywords(test_case['sentence'])<line_sep>self.assertEqual(keywords_extracted test_case['keywords_case_sensitive'] "keywords_extracted don't match the expected results for test case: {}".format(test_id))<block_end><block_end><block_end><if_stmt>__name__<eq>'__main__'<block_start>unittest.main()<block_end>
|
<import_from_stmt>datetime datetime<import_from_stmt>typing Any Dict List Optional Type Union<import_from_stmt>.cpp _CPP _make_cpp_trade<import_from_stmt>.order Order<import_from_stmt>..instrument Instrument<import_from_stmt>..exchange ExchangeType<import_from_stmt>...config DataType Side<class_stmt>Trade(object)<block_start>__slots__=["__id" "__type" "__price" "__volume" "__maker_orders" "__taker_order" # FIXME hide
"__my_order" "__slippage" "__transaction_cost" ]<line_sep># for convenience
Types=DataType<def_stmt>__new__ cls *args **kwargs# type: ignore
<block_start><if_stmt>_CPP<block_start><return>_make_cpp_trade(*args **kwargs)<block_end><return>super(Trade cls).__new__(cls)<block_end><def_stmt>__init__ self volume:float price:float taker_order:Order maker_orders:Optional[List[Order]]=<none> **kwargs:Any <arrow><none><block_start>self.__id=kwargs.get("id" "0")<line_sep># on construction, provide no ID until exchange assigns one
self.__type=DataType.TRADE<assert_stmt>isinstance(price (float int))<assert_stmt>isinstance(volume (float int))<assert_stmt>isinstance(taker_order Order)<line_sep># assert(len(maker_orders) > 0) # not necessarily
<assert_stmt>volume<eq>taker_order.filled<line_sep>self.__price=price<line_sep>self.__volume=volume<line_sep>self.__maker_orders=maker_orders<or>[]<line_sep>self.__taker_order=taker_order<line_sep>self.__my_order=kwargs.get("my_order" <none>)<line_sep>self.__slippage=0.0<line_sep>self.__transaction_cost=0.0<block_end># ******** #
# Readonly #
# ******** #
@property<def_stmt>timestamp self<arrow>datetime<block_start><return>self.taker_order.timestamp<block_end>@property<def_stmt>type self<arrow>DataType<block_start><return>self.__type<block_end>@property<def_stmt>volume self<arrow>float<block_start><return>self.__volume<block_end>@property<def_stmt>price self<arrow>float<block_start><return>self.__price<block_end>@property<def_stmt>instrument self<arrow>Instrument<block_start><return>self.taker_order.instrument<block_end>@property<def_stmt>exchange self<arrow>ExchangeType<block_start><return>self.taker_order.exchange<block_end>@property<def_stmt>side self<arrow>Side<block_start><return>self.taker_order.side<block_end>@property<def_stmt>notional self<arrow>float<block_start><return>self.price<times>self.volume<block_end><def_stmt>finished self<arrow>bool<block_start><return>self.taker_order.finished()<block_end># ***********#
# Read/write #
# ***********#
@property<def_stmt>id self<arrow>str<block_start><return>self.__id<block_end>@id.setter<def_stmt>id self id:str<arrow><none><block_start><assert_stmt>isinstance(id (str int))<line_sep>self.__id=str(id)<block_end>@property<def_stmt>maker_orders self<arrow>List[Order]# no setter
<block_start><return>self.__maker_orders<block_end>@property<def_stmt>taker_order self<arrow>Order<block_start><return>self.__taker_order<block_end>@property<def_stmt>my_order self<arrow>Order<block_start><return>self.__my_order<block_end>@my_order.setter<def_stmt>my_order self order:Order<arrow><none><block_start><assert_stmt>isinstance(order Order)<line_sep>self.__my_order=order<block_end><def_stmt>__repr__ self<arrow>str<block_start><return>f"Trade( id={self.id}, timestamp={self.timestamp}, {self.volume}@{self.price}, \n\ttaker_order={self.taker_order},\n\tmaker_orders={self.maker_orders}, )"<block_end><def_stmt>__eq__ self other:object<arrow>bool<block_start><assert_stmt>isinstance(other Trade)<line_sep><return>self.id<eq>other.id<and>self.timestamp<eq>other.timestamp<block_end><def_stmt>json self flat:bool=<false><arrow>Dict[str Union[str int float dict]]<block_start>"""convert trade to flat json"""<line_sep>ret:Dict[str Union[str int float dict]]={"id":self.id "timestamp":self.timestamp.timestamp() "price":self.price "volume":self.volume }<if_stmt>flat# Typings here to enforce flatness of json
<block_start>taker_order:Dict[str Union[str int float dict]]={"taker_order."+k:v<for>k,v self.taker_order.json(flat=flat).items()}<line_sep>maker_orders:List[Dict[str Union[str int float dict]]]=[{"maker_order{}."+k:v<for>k,v order.json(flat=flat).items()}<for>i,order enumerate(self.maker_orders)]<line_sep># update with taker order dict
ret.update(taker_order)<line_sep># update with maker order dicts
<for_stmt>maker_order maker_orders<block_start>ret.update(maker_order)<block_end><block_end><else_stmt><block_start>ret["taker_order"]=self.taker_order.json()# type: ignore
ret["maker_orders"]=[m.json()<for>m self.maker_orders]<block_end># type: ignore
<return>ret<block_end>@staticmethod<def_stmt>fromJson jsn:dict<arrow>"Trade"<block_start>ret=Trade(jsn["volume"] jsn["price"] Order.fromJson(jsn["taker_order"]) [Order.fromJson(x)<for>x jsn["maker_orders"]] )<if_stmt>"id"<in>jsn<block_start>ret.id=str(jsn.get("id"))<block_end><return>ret<block_end>@staticmethod<def_stmt>schema <arrow>Dict[str Type]# FIXME
# this varies from the json schema
<block_start><return>{"id":int "timestamp":int "volume":float "price":float}<block_end><block_end>
|
#
# This file has been modified in 2019 by MongoDB Inc.
#
# OCSPBuilder is derived from https://github.com/wbond/ocspbuilder
# OCSPResponder is derived from https://github.com/threema-ch/ocspresponder
# Copyright (c) 2015-2018 <NAME> <<EMAIL>>
# Permission is hereby granted, free of charge, to any person obtaining a copy of
# this software and associated documentation files (the "Software"), to deal in
# the Software without restriction, including without limitation the rights to
# use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
# of the Software, and to permit persons to whom the Software is furnished to do
# so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Copyright 2016 Threema GmbH
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_future_stmt> unicode_literals division absolute_import print_function<import_stmt>logging<import_stmt>base64<import_stmt>inspect<import_stmt>re<import_stmt>enum<import_stmt>sys<import_stmt>textwrap<import_from_stmt>datetime datetime timezone timedelta<import_from_stmt>typing Callable Tuple Optional<import_from_stmt>asn1crypto x509 keys core ocsp<import_from_stmt>asn1crypto.ocsp OCSPRequest OCSPResponse<import_from_stmt>oscrypto asymmetric<import_from_stmt>flask Flask request Response<line_sep>__version__='0.10.2'<line_sep>__version_info__=(0 10 2)<line_sep>logger=logging.getLogger(__name__)<if_stmt>sys.version_info<l>(3 )<block_start>byte_cls=str<block_end><else_stmt><block_start>byte_cls=bytes<block_end><def_stmt>_pretty_message string *params<block_start>"""
Takes a multi-line string and does the following:
- dedents
- converts newlines with text before and after into a single line
- strips leading and trailing whitespace
:param string:
The string to format
:param *params:
Params to interpolate into the string
:return:
The formatted string
"""<line_sep>output=textwrap.dedent(string)<line_sep># Unwrap lines, taking into account bulleted lists, ordered lists and
# underlines consisting of = signs
<if_stmt>output.find('\n')<ne>-1<block_start>output=re.sub('(?<=\\S)\n(?=[^ \n\t\\d\\*\\-=])' ' ' output)<block_end><if_stmt>params<block_start>output=output%params<block_end>output=output.strip()<line_sep><return>output<block_end><def_stmt>_type_name value<block_start>"""
:param value:
A value to get the object name of
:return:
A unicode string of the object name
"""<if_stmt>inspect.isclass(value)<block_start>cls=value<block_end><else_stmt><block_start>cls=value.__class__<block_end><if_stmt>cls.__module__<in>set(['builtins' '__builtin__'])<block_start><return>cls.__name__<block_end><return>'%s.%s'%(cls.__module__ cls.__name__)<block_end><def_stmt>_writer func<block_start>"""
Decorator for a custom writer, but a default reader
"""<line_sep>name=func.__name__<line_sep><return>property(fget=<lambda>self:getattr(self '_%s'%name) fset=func)<block_end><class_stmt>OCSPResponseBuilder(object)<block_start>_response_status=<none><line_sep>_certificate=<none><line_sep>_certificate_status=<none><line_sep>_revocation_date=<none><line_sep>_certificate_issuer=<none><line_sep>_hash_algo=<none><line_sep>_key_hash_algo=<none><line_sep>_nonce=<none><line_sep>_this_update=<none><line_sep>_next_update=<none><line_sep>_response_data_extensions=<none><line_sep>_single_response_extensions=<none><def_stmt>__init__ self response_status certificate_status_list=[] revocation_date=<none><block_start>"""
Unless changed, responses will use SHA-256 for the signature,
and will be valid from the moment created for one week.
:param response_status:
A unicode string of OCSP response type:
- "successful" - when the response includes information about the certificate
- "malformed_request" - when the request could not be understood
- "internal_error" - when an internal error occured with the OCSP responder
- "try_later" - when the OCSP responder is temporarily unavailable
- "sign_required" - when the OCSP request must be signed
- "unauthorized" - when the responder is not the correct responder for the certificate
:param certificate_list:
A list of tuples with certificate serial number and certificate status objects.
certificate_status:
A unicode string of the status of the certificate. Only required if
the response_status is "successful".
- "good" - when the certificate is in good standing
- "revoked" - when the certificate is revoked without a reason code
- "key_compromise" - when a private key is compromised
- "ca_compromise" - when the CA issuing the certificate is compromised
- "affiliation_changed" - when the certificate subject name changed
- "superseded" - when the certificate was replaced with a new one
- "cessation_of_operation" - when the certificate is no longer needed
- "certificate_hold" - when the certificate is temporarily invalid
- "remove_from_crl" - only delta CRLs - when temporary hold is removed
- "privilege_withdrawn" - one of the usages for a certificate was removed
- "unknown" - the responder doesn't know about the certificate being requested
:param revocation_date:
A datetime.datetime object of when the certificate was revoked, if
the response_status is "successful" and the certificate status is
not "good" or "unknown".
"""<line_sep>self._response_status=response_status<line_sep>self._certificate_status_list=certificate_status_list<line_sep>self._revocation_date=revocation_date<line_sep>self._key_hash_algo='sha1'<line_sep>self._hash_algo='sha256'<line_sep>self._response_data_extensions={}<line_sep>self._single_response_extensions={}<block_end>@_writer<def_stmt>nonce self value<block_start>"""
The nonce that was provided during the request.
"""<if_stmt><not>isinstance(value byte_cls)<block_start><raise>TypeError(_pretty_message('''
nonce must be a byte string, not %s
''' _type_name(value)))<block_end>self._nonce=value<block_end>@_writer<def_stmt>certificate_issuer self value<block_start>"""
An asn1crypto.x509.Certificate object of the issuer of the certificate.
This should only be set if the OCSP responder is not the issuer of
the certificate, but instead a special certificate only for OCSP
responses.
"""<if_stmt>value<is><not><none><block_start>is_oscrypto=isinstance(value asymmetric.Certificate)<if_stmt><not>is_oscrypto<and><not>isinstance(value x509.Certificate)<block_start><raise>TypeError(_pretty_message('''
certificate_issuer must be an instance of
asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate, not %s
''' _type_name(value)))<block_end><if_stmt>is_oscrypto<block_start>value=value.asn1<block_end><block_end>self._certificate_issuer=value<block_end>@_writer<def_stmt>next_update self value<block_start>"""
A datetime.datetime object of when the response may next change. This
should only be set if responses are cached. If responses are generated
fresh on every request, this should not be set.
"""<if_stmt><not>isinstance(value datetime)<block_start><raise>TypeError(_pretty_message('''
next_update must be an instance of datetime.datetime, not %s
''' _type_name(value)))<block_end>self._next_update=value<block_end><def_stmt>build self responder_private_key=<none> responder_certificate=<none><block_start>"""
Validates the request information, constructs the ASN.1 structure and
signs it.
The responder_private_key and responder_certificate parameters are onlystr
required if the response_status is "successful".
:param responder_private_key:
An asn1crypto.keys.PrivateKeyInfo or oscrypto.asymmetric.PrivateKey
object for the private key to sign the response with
:param responder_certificate:
An asn1crypto.x509.Certificate or oscrypto.asymmetric.Certificate
object of the certificate associated with the private key
:return:
An asn1crypto.ocsp.OCSPResponse object of the response
"""<if_stmt>self._response_status<ne>'successful'<block_start><return>ocsp.OCSPResponse({'response_status':self._response_status})<block_end>is_oscrypto=isinstance(responder_private_key asymmetric.PrivateKey)<if_stmt><not>isinstance(responder_private_key keys.PrivateKeyInfo)<and><not>is_oscrypto<block_start><raise>TypeError(_pretty_message('''
responder_private_key must be an instance ofthe c
asn1crypto.keys.PrivateKeyInfo or
oscrypto.asymmetric.PrivateKey, not %s
''' _type_name(responder_private_key)))<block_end>cert_is_oscrypto=isinstance(responder_certificate asymmetric.Certificate)<if_stmt><not>isinstance(responder_certificate x509.Certificate)<and><not>cert_is_oscrypto<block_start><raise>TypeError(_pretty_message('''
responder_certificate must be an instance of
asn1crypto.x509.Certificate or
oscrypto.asymmetric.Certificate, not %s
''' _type_name(responder_certificate)))<block_end><if_stmt>cert_is_oscrypto<block_start>responder_certificate=responder_certificate.asn1<block_end><if_stmt>self._certificate_status_list<is><none><block_start><raise>ValueError(_pretty_message('''
certificate_status_list must be set if the response_status is
"successful"
'''))<block_end><def_stmt>_make_extension name value<block_start><return>{'extn_id':name 'critical':<false> 'extn_value':value}<block_end>responses=[]<for_stmt>serial,status self._certificate_status_list<block_start>response_data_extensions=[]<line_sep>single_response_extensions=[]<for_stmt>name,value self._response_data_extensions.items()<block_start>response_data_extensions.append(_make_extension(name value))<block_end><if_stmt>self._nonce<block_start>response_data_extensions.append(_make_extension('nonce' self._nonce))<block_end><if_stmt><not>response_data_extensions<block_start>response_data_extensions=<none><block_end><for_stmt>name,value self._single_response_extensions.items()<block_start>single_response_extensions.append(_make_extension(name value))<block_end><if_stmt>self._certificate_issuer<block_start>single_response_extensions.append(_make_extension('certificate_issuer' [x509.GeneralName(name='directory_name' value=self._certificate_issuer.subject)]))<block_end><if_stmt><not>single_response_extensions<block_start>single_response_extensions=<none><block_end>responder_key_hash=getattr(responder_certificate.public_key self._key_hash_algo)<if_stmt>status<eq>'good'<block_start>cert_status=ocsp.CertStatus(name='good' value=core.Null())<block_end><elif_stmt>status<eq>'unknown'<block_start>cert_status=ocsp.CertStatus(name='unknown' value=core.Null())<block_end><else_stmt><block_start>reason=status<if>status<ne>'revoked'<else>'unspecified'<line_sep>cert_status=ocsp.CertStatus(name='revoked' value={'revocation_time':self._revocation_date 'revocation_reason':reason })<block_end>issuer=self._certificate_issuer<if>self._certificate_issuer<else>responder_certificate<line_sep>produced_at=datetime.now(timezone.utc).replace(microsecond=0)<if_stmt>self._this_update<is><none><block_start>self._this_update=produced_at<block_end><if_stmt>self._next_update<is><none><block_start>self._next_update=(self._this_update+timedelta(days=7)).replace(microsecond=0)<block_end>response={'cert_id':{'hash_algorithm':{'algorithm':self._key_hash_algo} 'issuer_name_hash':getattr(issuer.subject self._key_hash_algo) 'issuer_key_hash':getattr(issuer.public_key self._key_hash_algo) 'serial_number':serial } 'cert_status':cert_status 'this_update':self._this_update 'next_update':self._next_update 'single_extensions':single_response_extensions}<line_sep>responses.append(response)<block_end>response_data=ocsp.ResponseData({'responder_id':ocsp.ResponderId(name='by_key' value=responder_key_hash) 'produced_at':produced_at 'responses':responses 'response_extensions':response_data_extensions})<line_sep>signature_algo=responder_private_key.algorithm<if_stmt>signature_algo<eq>'ec'<block_start>signature_algo='ecdsa'<block_end>signature_algorithm_id='%s_%s'%(self._hash_algo signature_algo)<if_stmt>responder_private_key.algorithm<eq>'rsa'<block_start>sign_func=asymmetric.rsa_pkcs1v15_sign<block_end><elif_stmt>responder_private_key.algorithm<eq>'dsa'<block_start>sign_func=asymmetric.dsa_sign<block_end><elif_stmt>responder_private_key.algorithm<eq>'ec'<block_start>sign_func=asymmetric.ecdsa_sign<block_end><if_stmt><not>is_oscrypto<block_start>responder_private_key=asymmetric.load_private_key(responder_private_key)<block_end>signature_bytes=sign_func(responder_private_key response_data.dump() self._hash_algo)<line_sep>certs=<none><if_stmt>self._certificate_issuer<and>getattr(self._certificate_issuer.public_key self._key_hash_algo)<ne>responder_key_hash<block_start>certs=[responder_certificate]<block_end><return>ocsp.OCSPResponse({'response_status':self._response_status 'response_bytes':{'response_type':'basic_ocsp_response' 'response':{'tbs_response_data':response_data 'signature_algorithm':{'algorithm':signature_algorithm_id} 'signature':signature_bytes 'certs':certs }}})<block_end><block_end># Enums
<class_stmt>ResponseStatus(enum.Enum)<block_start>successful='successful'<line_sep>malformed_request='malformed_request'<line_sep>internal_error='internal_error'<line_sep>try_later='try_later'<line_sep>sign_required='sign_required'<line_sep>unauthorized='unauthorized'<block_end><class_stmt>CertificateStatus(enum.Enum)<block_start>good='good'<line_sep>revoked='revoked'<line_sep>key_compromise='key_compromise'<line_sep>ca_compromise='ca_compromise'<line_sep>affiliation_changed='affiliation_changed'<line_sep>superseded='superseded'<line_sep>cessation_of_operation='cessation_of_operation'<line_sep>certificate_hold='certificate_hold'<line_sep>remove_from_crl='remove_from_crl'<line_sep>privilege_withdrawn='privilege_withdrawn'<line_sep>unknown='unknown'<block_end># API endpoints
FAULT_REVOKED="revoked"<line_sep>FAULT_UNKNOWN="unknown"<line_sep>app=Flask(__name__)<class_stmt>OCSPResponder<block_start><def_stmt>__init__ self issuer_cert:str responder_cert:str responder_key:str fault:str next_update_seconds:int<block_start>"""
Create a new OCSPResponder instance.
:param issuer_cert: Path to the issuer certificate.
:param responder_cert: Path to the certificate of the OCSP responder
with the `OCSP Signing` extension.
:param responder_key: Path to the private key belonging to the
responder cert.
:param validate_func: A function that - given a certificate serial -
will return the appropriate :class:`CertificateStatus` and -
depending on the status - a revocation datetime.
:param cert_retrieve_func: A function that - given a certificate serial -
will return the corresponding certificate as a string.
:param next_update_seconds: The ``nextUpdate`` value that will be written
into the response. Default: 9 hours.
"""<line_sep># Certs and keys
self._issuer_cert=asymmetric.load_certificate(issuer_cert)<line_sep>self._responder_cert=asymmetric.load_certificate(responder_cert)<line_sep>self._responder_key=asymmetric.load_private_key(responder_key)<line_sep># Next update
self._next_update_seconds=next_update_seconds<line_sep>self._fault=fault<block_end><def_stmt>_fail self status:ResponseStatus<arrow>OCSPResponse<block_start>builder=OCSPResponseBuilder(response_status=status.value)<line_sep><return>builder.build()<block_end><def_stmt>parse_ocsp_request self request_der:bytes<arrow>OCSPRequest<block_start>"""
Parse the request bytes, return an ``OCSPRequest`` instance.
"""<line_sep><return>OCSPRequest.load(request_der)<block_end><def_stmt>validate self<block_start>time=datetime(2018 1 1 1 00 00 00 timezone.utc)<if_stmt>self._fault<eq>FAULT_REVOKED<block_start><return>(CertificateStatus.revoked time)<block_end><elif_stmt>self._fault<eq>FAULT_UNKNOWN<block_start><return>(CertificateStatus.unknown <none>)<block_end><elif_stmt>self._fault<ne><none><block_start><raise>NotImplemented('Fault type could not be found')<block_end><return>(CertificateStatus.good time)<block_end><def_stmt>_build_ocsp_response self ocsp_request:OCSPRequest<arrow>OCSPResponse<block_start>"""
Create and return an OCSP response from an OCSP request.
"""<line_sep># Get the certificate serial
tbs_request=ocsp_request['tbs_request']<line_sep>request_list=tbs_request['request_list']<if_stmt>len(request_list)<l>1<block_start>logger.warning('Received OCSP request with no requests')<line_sep><raise>NotImplemented('Empty requests not supported')<block_end>single_request=request_list[0]# TODO: Support more than one request
req_cert=single_request['req_cert']<line_sep>serial=req_cert['serial_number'].native<line_sep># Check certificate status
<try_stmt><block_start>certificate_status,revocation_date=self.validate()<block_end><except_stmt>Exception<as>e<block_start>logger.exception('Could not determine certificate status: %s' e)<line_sep><return>self._fail(ResponseStatus.internal_error)<block_end>certificate_status_list=[(serial certificate_status.value)]<line_sep># Build the response
builder=OCSPResponseBuilder(**{'response_status':ResponseStatus.successful.value 'certificate_status_list':certificate_status_list 'revocation_date':revocation_date })<line_sep># Parse extensions
<for_stmt>extension tbs_request['request_extensions']<block_start>extn_id=extension['extn_id'].native<line_sep>critical=extension['critical'].native<line_sep>value=extension['extn_value'].parsed<line_sep># This variable tracks whether any unknown extensions were encountered
unknown=<false><line_sep># Handle nonce extension
<if_stmt>extn_id<eq>'nonce'<block_start>builder.nonce=value.native<block_end># That's all we know
<else_stmt><block_start>unknown=<true><block_end># If an unknown critical extension is encountered (which should not
# usually happen, according to RFC 6960 4.1.2), we should throw our
# hands up in despair and run.
<if_stmt>unknown<is><true><and>critical<is><true><block_start>logger.warning('Could not parse unknown critical extension: %r' dict(extension.native))<line_sep><return>self._fail(ResponseStatus.internal_error)<block_end># If it's an unknown non-critical extension, we can safely ignore it.
<elif_stmt>unknown<is><true><block_start>logger.info('Ignored unknown non-critical extension: %r' dict(extension.native))<block_end><block_end># Set certificate issuer
builder.certificate_issuer=self._issuer_cert<line_sep># Set next update date
now=datetime.now(timezone.utc)<line_sep>builder.next_update=(now+timedelta(seconds=self._next_update_seconds)).replace(microsecond=0)<line_sep><return>builder.build(self._responder_key self._responder_cert)<block_end><def_stmt>build_http_response self request_der:bytes<arrow>Response<block_start><global>app<line_sep>response_der=self._build_ocsp_response(request_der).dump()<line_sep>resp=app.make_response((response_der 200))<line_sep>resp.headers['content_type']='application/ocsp-response'<line_sep><return>resp<block_end><block_end>responder=<none><def_stmt>init_responder issuer_cert:str responder_cert:str responder_key:str fault:str next_update_seconds:int<block_start><global>responder<line_sep>responder=OCSPResponder(issuer_cert=issuer_cert responder_cert=responder_cert responder_key=responder_key fault=fault next_update_seconds=next_update_seconds)<block_end><def_stmt>init port=8080 debug=<false><block_start>logger.info('Launching %sserver on port %d' 'debug'<if>debug<else>'' port)<line_sep>app.run(port=port debug=debug)<block_end>@app.route('/' methods=['GET'])<def_stmt>_handle_root <block_start><return>'ocsp-responder'<block_end>@app.route('/status/' defaults={'u_path':''} methods=['GET'])@app.route('/status/<path:u_path>' methods=['GET'])<def_stmt>_handle_get u_path<block_start><global>responder<line_sep>"""
An OCSP GET request contains the DER-in-base64 encoded OCSP request in the
HTTP request URL.
"""<line_sep>der=base64.b64decode(u_path)<line_sep>ocsp_request=responder.parse_ocsp_request(der)<line_sep><return>responder.build_http_response(ocsp_request)<block_end>@app.route('/status' methods=['POST'])<def_stmt>_handle_post <block_start><global>responder<line_sep>"""
An OCSP POST request contains the DER encoded OCSP request in the HTTP
request body.
"""<line_sep>ocsp_request=responder.parse_ocsp_request(request.data)<line_sep><return>responder.build_http_response(ocsp_request)<block_end>
|
# Generated by Django 3.0.5 on 2020-05-07 18:22
<import_stmt>django.db.models.deletion<import_stmt>django.utils.timezone<import_from_stmt>django.conf settings<import_from_stmt>django.db migrations models<class_stmt>Migration(migrations.Migration)<block_start>dependencies=[("posthog" "0050_dashboards") ]<line_sep>operations=[migrations.AddField(model_name="cohort" name="created_at" field=models.DateTimeField(blank=<true> default=django.utils.timezone.now null=<true>) ) migrations.AddField(model_name="cohort" name="created_by" field=models.ForeignKey(blank=<true> null=<true> on_delete=django.db.models.deletion.SET_NULL to=settings.AUTH_USER_MODEL ) ) migrations.AddField(model_name="cohort" name="is_calculating" field=models.BooleanField(default=<false>) ) migrations.AddField(model_name="cohort" name="last_calculation" field=models.DateTimeField(blank=<true> null=<true>) ) migrations.CreateModel(name="CohortPeople" fields=[("id" models.AutoField(auto_created=<true> primary_key=<true> serialize=<false> verbose_name="ID" ) ) ("cohort" models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to="posthog.Cohort") ) ("person" models.ForeignKey(on_delete=django.db.models.deletion.CASCADE to="posthog.Person") ) ] ) migrations.AddField(model_name="cohort" name="people" field=models.ManyToManyField(through="posthog.CohortPeople" to="posthog.Person") ) migrations.AddIndex(model_name="cohortpeople" index=models.Index(fields=["cohort_id" "person_id"] name="posthog_coh_cohort__89c25f_idx") ) ]<block_end>
|
# --------------------------------------------------------
# TRIPLET LOSS
# Copyright (c) 2015 Pinguo Tech.
# Written by <NAME>
# --------------------------------------------------------
"""The data layer used during training a VGG_FACE network by triplet loss.
"""<import_stmt>caffe<import_stmt>numpy<as>np<import_from_stmt>numpy *<import_stmt>yaml<import_from_stmt>multiprocessing Process Queue<import_from_stmt>caffe._caffe RawBlobVec<import_from_stmt>sklearn preprocessing<class_stmt>TripletLayer(caffe.Layer)<block_start><global>no_residual_list margin<def_stmt>setup self bottom top<block_start>"""Setup the TripletDataLayer."""<assert_stmt>shape(bottom[0].data)<eq>shape(bottom[1].data)<assert_stmt>shape(bottom[0].data)<eq>shape(bottom[2].data)<line_sep>layer_params=yaml.load(self.param_str_)<line_sep>self.margin=layer_params['margin']<line_sep>self.a=1<line_sep>top[0].reshape(1)<block_end><def_stmt>forward self bottom top<block_start>"""Get blobs and copy them into this layer's top blob vector."""<line_sep>anchor_minibatch_db=[]<line_sep>positive_minibatch_db=[]<line_sep>negative_minibatch_db=[]<for_stmt>i range((bottom[0]).num)<block_start>anchor_minibatch_db.append(bottom[0].data[i])<line_sep>positive_minibatch_db.append(bottom[1].data[i])<line_sep>negative_minibatch_db.append(bottom[2].data[i])<block_end>loss=float(0)<line_sep>self.no_residual_list=[]<for_stmt>i range(((bottom[0]).num))<block_start>a=np.array(anchor_minibatch_db[i])<line_sep>p=np.array(positive_minibatch_db[i])<line_sep>n=np.array(negative_minibatch_db[i])<line_sep>a_p=a-p<line_sep>a_n=a-n<line_sep>ap=np.dot(a_p a_p)<line_sep>an=np.dot(a_n a_n)<line_sep>dist=(self.margin+ap-an)<line_sep>_loss=max(dist 0.0)<if_stmt>i<eq>0<block_start>print('loss:'+' ap:'+str(ap)+' '+'an:'+str(an))<block_end><if_stmt>_loss<eq>0<block_start>self.no_residual_list.append(i)<block_end>loss<augadd>_loss<block_end>loss=(loss/(2<times>(bottom[0]).num))<line_sep>top[0].data[<ellipsis>]=loss<block_end><def_stmt>backward self top propagate_down bottom<block_start>count=0<if_stmt>propagate_down[0]<block_start><for_stmt>i range((bottom[0]).num)<block_start><if_stmt><not>i<in>self.no_residual_list<block_start>x_a=bottom[0].data[i]<line_sep>x_p=bottom[1].data[i]<line_sep>x_n=bottom[2].data[i]<line_sep>#print x_a,x_p,x_n
bottom[0].diff[i]=self.a<times>((x_n-x_p)/((bottom[0]).num))<line_sep>bottom[1].diff[i]=self.a<times>((x_p-x_a)/((bottom[0]).num))<line_sep>bottom[2].diff[i]=self.a<times>((x_a-x_n)/((bottom[0]).num))<line_sep>count<augadd>1<block_end><else_stmt><block_start>bottom[0].diff[i]=np.zeros(shape(bottom[0].data)[1])<line_sep>bottom[1].diff[i]=np.zeros(shape(bottom[0].data)[1])<line_sep>bottom[2].diff[i]=np.zeros(shape(bottom[0].data)[1])<block_end><block_end><block_end>#print 'select gradient_loss:',bottom[0].diff[0][0]
#print shape(bottom[0].diff),shape(bottom[1].diff),shape(bottom[2].diff)
<block_end><def_stmt>reshape self bottom top<block_start>"""Reshaping happens during the call to forward."""<line_sep><pass><block_end><block_end>
|
# Copyright 2015 Netflix, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
<import_from_stmt>sqlalchemy Column Integer ForeignKey<import_from_stmt>sleepypuppy db<line_sep># Database association models
user_associations=db.Table('user_associations' Column('user_id' Integer ForeignKey('users.id')) Column('assessment_id' Integer ForeignKey('assessments.id')) )<line_sep>taxonomy=db.Table('taxonomy' Column('puppyscript_id' Integer ForeignKey('puppyscript.id')) Column('payload' Integer ForeignKey('payloads.id')) )<line_sep>
|
# Time: O(nlogn)
# Space: O(n)
<class_stmt>Solution(object)<block_start><def_stmt>relativeSortArray self arr1 arr2<block_start>"""
:type arr1: List[int]
:type arr2: List[int]
:rtype: List[int]
"""<line_sep>lookup={v:i<for>i,v enumerate(arr2)}<line_sep><return>sorted(arr1 key=<lambda>i:lookup.get(i len(arr2)+i))<block_end><block_end>
|
<import_stmt>sys<import_from_stmt>django.core.management.base BaseCommand<import_from_stmt>corehq.apps.domain.models Domain<import_from_stmt>corehq.util.couch get_db_by_doc_type<class_stmt>Command(BaseCommand)<block_start>help="Purge ALL documents of a particular type. E.g. purge_docs MyDocType,AnotherOne"<def_stmt>handle self doc_types *args **options<block_start>user_input=input('\n'.join(['\n\nReally delete documents of the following types: {}?' 'This operation is not reversible. Enter a number N to delete the first '<concat>'N found, or type "delete all" to delete everything.' '' ]).format(doc_types))<if_stmt>user_input<eq>'delete all'<block_start>remaining=<none><block_end><else_stmt><block_start><try_stmt><block_start>remaining=int(user_input)<block_end><except_stmt>ValueError<block_start>print('aborting')<line_sep>sys.exit()<block_end><block_end>doc_types=doc_types.split(',')<line_sep>deleted=0<line_sep># unfortunately the only couch view we have for this needs to go by domain
# will be a bit slow
domain_names=Domain.get_all_names()<for_stmt>doc_type doc_types<block_start>db=get_db_by_doc_type(doc_type)<if_stmt><not>db<block_start>print("Cannot find db for {}, skipping".format(doc_type))<line_sep><continue><block_end><for_stmt>domain domain_names<block_start>docs=[row['doc']<for>row db.view('by_domain_doc_type_date/view' startkey=[domain doc_type] endkey=[domain doc_type {}] reduce=<false> include_docs=<true> )][:remaining]<if_stmt>docs<block_start>count=len(docs)<line_sep>print('deleting {} {}s from {}'.format(count doc_type domain))<line_sep>db.delete_docs(docs)<line_sep>deleted<augadd>count<if_stmt>remaining<is><not><none><block_start>remaining<augsub>count<if_stmt>remaining<le>0<block_start><return><block_end><block_end><block_end><block_end><block_end>print('successfully deleted {} documents'.format(deleted))<block_end><block_end>
|
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements PeepholeLSTM Cell."""<import_stmt>tensorflow<as>tf<line_sep>@tf.keras.utils.register_keras_serializable(package="Addons")<class_stmt>PeepholeLSTMCell(tf.keras.layers.LSTMCell)<block_start>"""Equivalent to `tf.keras.layers.LSTMCell` class but adds peephole connections.
Peephole connections allow the gates to utilize the previous internal state as
well as the previous hidden state (which is what LSTMCell is limited to).
This allows PeepholeLSTMCell to better learn precise timings over LSTMCell.
From [Gers et al., 2002](
http://www.jmlr.org/papers/volume3/gers02a/gers02a.pdf):
"We find that LSTM augmented by 'peephole connections' from its internal
cells to its multiplicative gates can learn the fine distinction between
sequences of spikes spaced either 50 or 49 time steps apart without the help
of any short training exemplars."
The peephole implementation is based on:
[Sak et al., 2014](https://research.google.com/pubs/archive/43905.pdf)
Example:
>>> inputs = np.random.random([30,23,9]).astype(np.float32)
>>> LSTMCell = tfa.rnn.PeepholeLSTMCell(4)
>>> rnn = tf.keras.layers.RNN(LSTMCell, return_sequences=True, return_state=True)
>>> outputs, memory_state, carry_state = rnn(inputs)
>>> outputs.shape
TensorShape([30, 23, 4])
>>> memory_state.shape
TensorShape([30, 4])
>>> carry_state.shape
TensorShape([30, 4])
"""<def_stmt>build self input_shape<block_start>super().build(input_shape)<line_sep># The following are the weight matrices for the peephole connections. These
# are multiplied with the previous internal state during the computation of
# carry and output.
self.input_gate_peephole_weights=self.add_weight(shape=(self.units ) name="input_gate_peephole_weights" initializer=self.kernel_initializer )<line_sep>self.forget_gate_peephole_weights=self.add_weight(shape=(self.units ) name="forget_gate_peephole_weights" initializer=self.kernel_initializer )<line_sep>self.output_gate_peephole_weights=self.add_weight(shape=(self.units ) name="output_gate_peephole_weights" initializer=self.kernel_initializer )<block_end><def_stmt>_compute_carry_and_output self x h_tm1 c_tm1<block_start>x_i,x_f,x_c,x_o=x<line_sep>h_tm1_i,h_tm1_f,h_tm1_c,h_tm1_o=h_tm1<line_sep>i=self.recurrent_activation(x_i+tf.keras.backend.dot(h_tm1_i self.recurrent_kernel[: :self.units])+self.input_gate_peephole_weights<times>c_tm1)<line_sep>f=self.recurrent_activation(x_f+tf.keras.backend.dot(h_tm1_f self.recurrent_kernel[: self.units:self.units<times>2])+self.forget_gate_peephole_weights<times>c_tm1)<line_sep>c=f<times>c_tm1+i<times>self.activation(x_c+tf.keras.backend.dot(h_tm1_c self.recurrent_kernel[: self.units<times>2:self.units<times>3]))<line_sep>o=self.recurrent_activation(x_o+tf.keras.backend.dot(h_tm1_o self.recurrent_kernel[: self.units<times>3:])+self.output_gate_peephole_weights<times>c)<line_sep><return>c o<block_end><def_stmt>_compute_carry_and_output_fused self z c_tm1<block_start>z0,z1,z2,z3=z<line_sep>i=self.recurrent_activation(z0+self.input_gate_peephole_weights<times>c_tm1)<line_sep>f=self.recurrent_activation(z1+self.forget_gate_peephole_weights<times>c_tm1)<line_sep>c=f<times>c_tm1+i<times>self.activation(z2)<line_sep>o=self.recurrent_activation(z3+self.output_gate_peephole_weights<times>c)<line_sep><return>c o<block_end><block_end>
|
<import_stmt>mtm.util.YamlSerializer<as>YamlSerializer<import_from_stmt>mtm.util.Assert *<import_stmt>mtm.ioc.Container<as>Container<import_from_stmt>mtm.ioc.Inject Inject<import_from_stmt>mtm.ioc.Inject InjectMany<import_stmt>mtm.ioc.IocAssertions<as>Assertions<import_from_stmt>mtm.util.Platforms Platforms<import_from_stmt>prj.main.ProjenyConstants ProjectConfigFileName<import_from_stmt>prj.main.ProjectConfig ProjectConfig<class_stmt>ProjectConfigChanger<block_start>_log=Inject('Logger')<line_sep>_sys=Inject('SystemHelper')<line_sep>_packageManager=Inject('PackageManager')<line_sep>_varMgr=Inject('VarManager')<def_stmt>_getProjectConfigPath self projectName<block_start><return>self._varMgr.expandPath('[UnityProjectsDir]/{0}/{1}'.format(projectName ProjectConfigFileName))<block_end><def_stmt>_loadProjectConfig self projectName<block_start>configPath=self._getProjectConfigPath(projectName)<line_sep>yamlData=YamlSerializer.deserialize(self._sys.readFileAsText(configPath))<line_sep>result=ProjectConfig()<for_stmt>pair yamlData.__dict__.items()<block_start>result.__dict__[pair[0]]=pair[1]<block_end><return>result<block_end><def_stmt>_saveProjectConfig self projectName projectConfig<block_start>configPath=self._getProjectConfigPath(projectName)<line_sep>self._sys.writeFileAsText(configPath YamlSerializer.serialize(projectConfig))<block_end><def_stmt>addPackage self projectName packageName addToAssetsFolder<block_start><with_stmt>self._log.heading('Adding package {0} to project {1}'.format(packageName projectName))<block_start>assertThat(packageName<in>self._packageManager.getAllPackageNames() "Could not find the given package '{0}' in the UnityPackages folder" packageName)<line_sep>self._packageManager.setPathsForProjectPlatform(projectName Platforms.Windows)<line_sep>projConfig=self._loadProjectConfig(projectName)<line_sep>assertThat(packageName<not><in>projConfig.assetsFolder<and>packageName<not><in>projConfig.pluginsFolder "Given package '{0}' has already been added to project config" packageName)<if_stmt>addToAssetsFolder<block_start>projConfig.assetsFolder.append(packageName)<block_end><else_stmt><block_start>projConfig.pluginsFolder.append(packageName)<block_end>self._saveProjectConfig(projectName projConfig)<line_sep>self._log.good("Added package '{0}' to file '{1}/{2}'" packageName projectName ProjectConfigFileName)<block_end><block_end><block_end>
|
<import_from_stmt>typing Tuple<import_from_stmt>.bybit BybitInverseDataStore BybitUSDTDataStore<line_sep>__all__:Tuple[str <ellipsis>]=('BybitInverseDataStore' 'BybitUSDTDataStore' )<line_sep>
|
<def_stmt>test_before_overflow <block_start><import_from_stmt>issue_11_proto A<line_sep>a=A()<line_sep>a.a0=0x7FFFFFFF<assert_stmt>A.FromString(a.SerializeToString()).a0<eq>2147483647<block_end><def_stmt>test_after_overflow <block_start><import_from_stmt>issue_11_proto A<line_sep>a=A()<line_sep>a.a0=0x80000000<assert_stmt>A.FromString(a.SerializeToString()).a0<eq>2147483648<block_end>
|
<import_stmt>json<import_stmt>threading<import_from_stmt>queue Queue<import_from_stmt>typing Optional List Any Dict Union<import_from_stmt>platypush.message.response Response<import_from_stmt>platypush.plugins.mqtt MqttPlugin action<import_from_stmt>platypush.plugins.switch SwitchPlugin<class_stmt>ZigbeeMqttPlugin(MqttPlugin SwitchPlugin)# lgtm [py/missing-call-to-init]
<block_start>"""
This plugin allows you to interact with Zigbee devices over MQTT through any Zigbee sniffer and
`zigbee2mqtt <https://www.zigbee2mqtt.io/>`_.
In order to get started you'll need:
- A Zigbee USB adapter/sniffer (in this example I'll use the
`CC2531 <https://hackaday.io/project/163487-zigbee-cc2531-smart-home-usb-adapter>`_.
- A Zigbee debugger/emulator + downloader cable (only to flash the firmware).
Instructions:
- Install `cc-tool <https://github.com/dashesy/cc-tool>`_ either from sources or from a package manager.
- Connect the Zigbee to your PC/RaspberryPi in this way: ::
USB -> CC debugger -> downloader cable -> CC2531 -> USB
- The debugger and the adapter should be connected *at the same time*. If the later ``cc-tool`` command throws
up an error, put the device in sync while connected by pressing the _Reset_ button on the debugger.
- Check where the device is mapped. On Linux it will usually be ``/dev/ttyACM0``.
- Download the latest `Z-Stack firmware <https://github.com/Koenkk/Z-Stack-firmware/tree/master/coordinator>`_
to your device. Instructions for a CC2531 device:
.. code-block:: shell
wget https://github.com/Koenkk/Z-Stack-firmware/raw/master/coordinator/Z-Stack_Home_1.2/bin/default/CC2531_DEFAULT_20201127.zip
unzip CC2531_DEFAULT_20201127.zip
[sudo] cc-tool -e -w CC2531ZNP-Prod.hex
- You can disconnect your debugger and downloader cable once the firmware is flashed.
- Install ``zigbee2mqtt``. First install a node/npm environment, then either install ``zigbee2mqtt`` manually or
through your package manager. **NOTE**: many API breaking changes have occurred on Zigbee2MQTT 1.17.0,
therefore this integration will only be compatible with the version 1.17.0 of the service or higher versions.
Manual instructions:
.. code-block:: shell
# Clone zigbee2mqtt repository
[sudo] git clone https://github.com/Koenkk/zigbee2mqtt.git /opt/zigbee2mqtt
[sudo] chown -R pi:pi /opt/zigbee2mqtt # Or whichever is your user
# Install dependencies (as user "pi")
cd /opt/zigbee2mqtt
npm install
- You need to have an MQTT broker running somewhere. If not, you can install
`Mosquitto <https://mosquitto.org/>`_ through your package manager on any device in your network.
- Edit the ``/opt/zigbee2mqtt/data/configuration.yaml`` file to match the configuration of your MQTT broker:
.. code-block:: yaml
# MQTT settings
mqtt:
# MQTT base topic for zigbee2mqtt MQTT messages
base_topic: zigbee2mqtt
# MQTT server URL
server: 'mqtt://localhost'
# MQTT server authentication, uncomment if required:
# user: my_user
# password: <PASSWORD>
- Also make sure that ``permit_join`` is set to ``True``, in order to allow Zigbee devices to join the network
while you're configuring it. It's equally important to set ``permit_join`` to ``False`` once you have
configured your network, to prevent accidental/malignant joins from outer Zigbee devices.
- Start the ``zigbee2mqtt`` daemon on your device (the
`official documentation <https://www.zigbee2mqtt.io/getting_started/running_zigbee2mqtt.html#5-optional-running-as-a-daemon-with-systemctl>`_
also contains instructions on how to configure it as a ``systemd`` service:
.. code-block:: shell
cd /opt/zigbee2mqtt
npm start
- If you have Zigbee devices that are paired to other bridges, unlink them or do a factory reset to pair them
to your new bridge.
- If it all goes fine, once the daemon is running and a new device is found you should see traces like this in
the output of ``zigbee2mqtt``::
zigbee2mqtt:info 2019-11-09T12:19:56: Successfully interviewed '0x00158d0001dc126a', device has
successfully been paired
- You are now ready to use this integration.
Requires:
* **paho-mqtt** (``pip install paho-mqtt``)
"""<def_stmt>__init__ self host:str='localhost' port:int=1883 base_topic:str='zigbee2mqtt' timeout:int=10 tls_certfile:Optional[str]=<none> tls_keyfile:Optional[str]=<none> tls_version:Optional[str]=<none> tls_ciphers:Optional[str]=<none> username:Optional[str]=<none> password:Optional[str]=<none> **kwargs<block_start>"""
:param host: Default MQTT broker where ``zigbee2mqtt`` publishes its messages (default: ``localhost``).
:param port: Broker listen port (default: 1883).
:param base_topic: Topic prefix, as specified in ``/opt/zigbee2mqtt/data/configuration.yaml``
(default: '``base_topic``').
:param timeout: If the command expects from a response, then this timeout value will be used
(default: 60 seconds).
:param tls_cafile: If the connection requires TLS/SSL, specify the certificate authority file
(default: None)
:param tls_certfile: If the connection requires TLS/SSL, specify the certificate file (default: None)
:param tls_keyfile: If the connection requires TLS/SSL, specify the key file (default: None)
:param tls_version: If the connection requires TLS/SSL, specify the minimum TLS supported version
(default: None)
:param tls_ciphers: If the connection requires TLS/SSL, specify the supported ciphers (default: None)
:param username: If the connection requires user authentication, specify the username (default: None)
:param password: If the connection requires user authentication, specify the password (default: None)
"""<line_sep>super().__init__(host=host port=port tls_certfile=tls_certfile tls_keyfile=tls_keyfile tls_version=tls_version tls_ciphers=tls_ciphers username=username password=password **kwargs)<line_sep>self.base_topic=base_topic<line_sep>self.timeout=timeout<line_sep>self._info={'devices':{} 'groups':{} }<block_end><def_stmt>_get_network_info self **kwargs<block_start>self.logger.info('Fetching Zigbee network information')<line_sep>client=<none><line_sep>mqtt_args=self._mqtt_args(**kwargs)<line_sep>timeout=30<if_stmt>'timeout'<in>mqtt_args<block_start>timeout=mqtt_args.pop('timeout')<block_end>info={'state':<none> 'info':{} 'config':{} 'devices':[] 'groups':[] }<line_sep>info_ready_events={topic:threading.Event()<for>topic info.keys()}<def_stmt>_on_message <block_start><def_stmt>callback _ __ msg<block_start>topic=msg.topic.split('/')[-1]<if_stmt>topic<in>info<block_start>info[topic]=msg.payload.decode()<if>topic<eq>'state'<else>json.loads(msg.payload.decode())<line_sep>info_ready_events[topic].set()<block_end><block_end><return>callback<block_end><try_stmt><block_start>host=mqtt_args.pop('host')<line_sep>port=mqtt_args.pop('port')<line_sep>client=self._get_client(**mqtt_args)<line_sep>client.on_message=_on_message()<line_sep>client.connect(host port keepalive=timeout)<line_sep>client.subscribe(self.base_topic+'/bridge/#')<line_sep>client.loop_start()<for_stmt>event info_ready_events.values()<block_start>info_ready=event.wait(timeout=timeout)<if_stmt><not>info_ready<block_start><raise>TimeoutError('A timeout occurred while fetching the Zigbee network information')<block_end><block_end># Cache the new results
self._info['devices']={device.get('friendly_name' device['ieee_address']):device<for>device info.get('devices' [])}<line_sep>self._info['groups']={group.get('name'):group<for>group info.get('groups' [])}<line_sep>self.logger.info('Zigbee network configuration updated')<line_sep><return>info<block_end><finally_stmt><block_start><try_stmt><block_start>client.loop_stop()<line_sep>client.disconnect()<block_end><except_stmt>Exception<as>e<block_start>self.logger.warning('Error on MQTT client disconnection: {}'.format(str(e)))<block_end><block_end><block_end><def_stmt>_topic self topic<block_start><return>self.base_topic+'/'+topic<block_end>@staticmethod<def_stmt>_parse_response response:Union[dict Response]<arrow>dict<block_start><if_stmt>isinstance(response Response)<block_start>response=response.output<block_end><assert_stmt>response.get('status')<ne>'error' response.get('error' 'zigbee2mqtt error')<line_sep><return>response<block_end>@action<def_stmt>devices self **kwargs<arrow>List[Dict[str Any]]<block_start>"""
Get the list of devices registered to the service.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
:return: List of paired devices. Example output:
.. code-block:: json
[
{
"date_code": "20190608",
"friendly_name": "Coordinator",
"ieee_address": "0x00123456789abcde",
"network_address": 0,
"supported": false,
"type": "Coordinator",
"interviewing": false,
"interviewing_completed": true,
"definition": null,
"endpoints": {
"13": {
"bindings": [],
"clusters": {
"input": ["genOta"],
"output": []
},
"output": []
}
}
},
{
"date_code": "20180906",
"friendly_name": "<NAME>",
"ieee_address": "0x00123456789abcdf",
"network_address": 52715,
"power_source": "Mains (single phase)",
"software_build_id": "5.127.1.26581",
"model_id": "LCT001",
"supported": true,
"interviewing": false,
"interviewing_completed": true,
"type": "Router",
"definition": {
"description": "Hue white and color ambiance E26/E27/E14",
"model": "9290012573A",
"vendor": "Philips",
"exposes": [
{
"features": [
{
"access": 7,
"description": "On/off state of this light",
"name": "state",
"property": "state",
"type": "binary",
"value_off": "OFF",
"value_on": "ON",
"value_toggle": "TOGGLE"
},
{
"access": 7,
"description": "Brightness of this light",
"name": "brightness",
"property": "brightness",
"type": "numeric",
"value_max": 254,
"value_min": 0
},
{
"access": 7,
"description": "Color temperature of this light",
"name": "color_temp",
"property": "color_temp",
"type": "numeric",
"unit": "mired",
"value_max": 500,
"value_min": 150
},
{
"description": "Color of this light in the CIE 1931 color space (x/y)",
"features": [
{
"access": 7,
"name": "x",
"property": "x",
"type": "numeric"
},
{
"access": 7,
"name": "y",
"property": "y",
"type": "numeric"
}
],
"name": "color_xy",
"property": "color",
"type": "composite"
}
],
"type": "light"
},
{
"access": 2,
"description": "Triggers an effect on the light (e.g. make light blink for a few seconds)",
"name": "effect",
"property": "effect",
"type": "enum",
"values": [
"blink",
"breathe",
"okay",
"channel_change",
"finish_effect",
"stop_effect"
]
},
{
"access": 1,
"description": "Link quality (signal strength)",
"name": "linkquality",
"property": "linkquality",
"type": "numeric",
"unit": "lqi",
"value_max": 255,
"value_min": 0
}
]
},
"endpoints": {
"11": {
"bindings": [],
"clusters": {
"input": [
"genBasic",
"genIdentify",
"genGroups",
"genScenes",
"genOnOff",
"genLevelCtrl",
"touchlink",
"lightingColorCtrl",
"manuSpecificUbisysDimmerSetup"
],
"output": [
"genOta"
]
},
"configured_reportings": []
},
"242": {
"bindings": [],
"clusters": {
"input": [
"greenPower"
],
"output": [
"greenPower"
]
},
"configured_reportings": []
}
}
}
]
"""<line_sep><return>self._get_network_info(**kwargs).get('devices')<block_end>@action<def_stmt>permit_join self permit:bool=<true> timeout:Optional[float]=<none> **kwargs<block_start>"""
Enable/disable devices from joining the network. This is not persistent (will not be saved to
``configuration.yaml``).
:param permit: Set to True to allow joins, False otherwise.
:param timeout: Allow/disallow joins only for this amount of time.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<if_stmt>timeout<block_start><return>self._parse_response(self.publish(topic=self._topic('bridge/request/permit_join') msg={'value':permit 'time':timeout} reply_topic=self._topic('bridge/response/permit_join') **self._mqtt_args(**kwargs)))<block_end><return>self.publish(topic=self._topic('bridge/request/permit_join') msg={'value':permit} **self._mqtt_args(**kwargs))<block_end>@action<def_stmt>factory_reset self **kwargs<block_start>"""
Perform a factory reset of a device connected to the network, following the procedure required by the particular
device (for instance, Hue bulbs require the Zigbee adapter to be close to the device while a button on the back
of the bulb is pressed).
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep>self.publish(topic=self._topic('bridge/request/touchlink/factory_reset') msg='' **self._mqtt_args(**kwargs))<block_end>@action<def_stmt>log_level self level:str **kwargs<block_start>"""
Change the log level at runtime. This change will not be persistent.
:param level: Possible values: 'debug', 'info', 'warn', 'error'.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/config/log_level') msg={'value':level} reply_topic=self._topic('bridge/response/config/log_level') **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>device_set_option self device:str option:str value:Any **kwargs<block_start>"""
Change the options of a device. Options can only be changed, not added or deleted.
:param device: Display name of the device.
:param option: Option name.
:param value: New value.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/device/options') reply_topic=self._topic('bridge/response/device/options') msg={'id':device 'options':{option:value }} **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>device_remove self device:str force:bool=<false> **kwargs<block_start>"""
Remove a device from the network.
:param device: Display name of the device.
:param force: Force the remove also if the removal wasn't acknowledged by the device. Note: a forced remove
only removes the entry from the internal database, but the device is likely to connect again when
restarted unless it's factory reset (default: False).
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/device/remove') msg={'id':device 'force':force} reply_topic=self._topic('bridge/response/device/remove') **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>device_ban self device:str **kwargs<block_start>"""
Ban a device from the network.
:param device: Display name of the device.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/device/ban') reply_topic=self._topic('bridge/response/device/ban') msg={'id':device} **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>device_whitelist self device:str **kwargs<block_start>"""
Whitelist a device on the network. Note: once at least a device is whitelisted, all the other non-whitelisted
devices will be removed from the network.
:param device: Display name of the device.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/device/whitelist') reply_topic=self._topic('bridge/response/device/whitelist') msg={'id':device} **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>device_rename self name:str device:Optional[str]=<none> **kwargs<block_start>"""
Rename a device on the network.
:param name: New name.
:param device: Current name of the device to rename. If no name is specified then the rename will
affect the last device that joined the network.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<if_stmt>name<eq>device<block_start>self.logger.info('Old and new name are the same: nothing to do')<line_sep><return><block_end># noinspection PyUnresolvedReferences
devices=self.devices().output<assert_stmt><not>[dev<for>dev devices<if>dev.get('friendly_name')<eq>name] 'A device named {} already exists on the network'.format(name)<if_stmt>device<block_start>req={'from':device 'to':name }<block_end><else_stmt><block_start>req={'last':<true> 'to':name }<block_end><return>self._parse_response(self.publish(topic=self._topic('bridge/request/device/rename') msg=req reply_topic=self._topic('bridge/response/device/rename') **self._mqtt_args(**kwargs)))<block_end>@staticmethod<def_stmt>build_device_get_request values:List[Dict[str Any]]<arrow>dict<block_start><def_stmt>extract_value value:dict root:dict<block_start><if_stmt><not>value.get('access' 1)&0x1# Property not readable
<block_start><return><block_end><if_stmt>'features'<not><in>value<block_start><if_stmt>'property'<in>value<block_start>root[value['property']]=0<if>value['type']<eq>'numeric'<else>''<block_end><return><block_end><if_stmt>'property'<in>value<block_start>root[value['property']]=root.get(value['property'] {})<line_sep>root=root[value['property']]<block_end><for_stmt>feature value['features']<block_start>extract_value(feature root)<block_end><block_end>ret={}<for_stmt>value values<block_start>extract_value(value root=ret)<block_end><return>ret<block_end># noinspection PyShadowingBuiltins
@action<def_stmt>device_get self device:str property:Optional[str]=<none> **kwargs<arrow>Dict[str Any]<block_start>"""
Get the properties of a device. The returned keys vary depending on the device. For example, a light bulb
may have the "``state``" and "``brightness``" properties, while an environment sensor may have the
"``temperature``" and "``humidity``" properties, and so on.
:param device: Display name of the device.
:param property: Name of the property that should be retrieved (default: all).
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
:return: Key->value map of the device properties.
"""<line_sep>kwargs=self._mqtt_args(**kwargs)<if_stmt>property<block_start>properties=self.publish(topic=self._topic(device)+'/get/'+property reply_topic=self._topic(device) msg={property:''} **kwargs).output<assert_stmt>property<in>properties 'No such property: '+property<line_sep><return>{property:properties[property]}<block_end><if_stmt>device<not><in>self._info.get('devices' {})# Refresh devices info
<block_start>self._get_network_info(**kwargs)<block_end><assert_stmt>self._info.get('devices' {}).get(device) 'No such device: '+device<line_sep>exposes=(self._info.get('devices' {}).get(device {}).get('definition' {})<or>{}).get('exposes' [])<if_stmt><not>exposes<block_start><return>{}<block_end><return>self.publish(topic=self._topic(device)+'/get' reply_topic=self._topic(device) msg=self.build_device_get_request(exposes) **kwargs)<block_end>@action<def_stmt>devices_get self devices:Optional[List[str]]=<none> **kwargs<arrow>Dict[str dict]<block_start>"""
Get the properties of the devices connected to the network.
:param devices: If set, then only the status of these devices (by friendly name) will be retrieved (default:
retrieve all).
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
:return: Key->value map of the device properties:
.. code-block:: json
{
"Bulb": {
"state": "ON",
"brightness": 254
},
"Sensor": {
"temperature": 22.5
}
}
"""<line_sep>kwargs=self._mqtt_args(**kwargs)<if_stmt><not>devices# noinspection PyUnresolvedReferences
<block_start>devices=set([device['friendly_name']<or>device['ieee_address']<for>device self.devices(**kwargs).output])<block_end><def_stmt>worker device:str q:Queue# noinspection PyUnresolvedReferences
<block_start>q.put(self.device_get(device **kwargs).output)<block_end>queues={}<line_sep>workers={}<line_sep>response={}<for_stmt>device devices<block_start>queues[device]=Queue()<line_sep>workers[device]=threading.Thread(target=worker args=(device queues[device]))<line_sep>workers[device].start()<block_end><for_stmt>device devices<block_start><try_stmt><block_start>response[device]=queues[device].get(timeout=kwargs.get('timeout'))<line_sep>workers[device].join(timeout=kwargs.get('timeout'))<block_end><except_stmt>Exception<as>e<block_start>self.logger.warning('An error while getting the status of the device {}: {}'.format(device str(e)))<block_end><block_end><return>response<block_end>@action<def_stmt>status self device:Optional[str]=<none> *args **kwargs<block_start>"""
Get the status of a device (by friendly name) or of all the connected devices (it wraps :meth:`.devices_get`).
:param device: Device friendly name (default: get all devices).
"""<line_sep><return>self.devices_get([device] *args **kwargs)<block_end># noinspection PyShadowingBuiltins,DuplicatedCode
@action<def_stmt>device_set self device:str property:str value:Any **kwargs<block_start>"""
Set a properties on a device. The compatible properties vary depending on the device. For example, a light bulb
may have the "``state``" and "``brightness``" properties, while an environment sensor may have the
"``temperature``" and "``humidity``" properties, and so on.
:param device: Display name of the device.
:param property: Name of the property that should be set.
:param value: New value of the property.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep>properties=self.publish(topic=self._topic(device+'/set') reply_topic=self._topic(device) msg={property:value} **self._mqtt_args(**kwargs)).output<if_stmt>property<block_start><assert_stmt>property<in>properties 'No such property: '+property<line_sep><return>{property:properties[property]}<block_end><return>properties<block_end>@action<def_stmt>device_check_ota_updates self device:str **kwargs<arrow>dict<block_start>"""
Check if the specified device has any OTA updates available to install.
:param device: Address or friendly name of the device.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
:return:
.. code-block:: json
{
"id": "<device ID>",
"update_available": true,
"status": "ok"
}
"""<line_sep>ret=self._parse_response(self.publish(topic=self._topic('bridge/request/device/ota_update/check') reply_topic=self._topic('bridge/response/device/ota_update/check') msg={'id':device} **self._mqtt_args(**kwargs)))<line_sep><return>{'status':ret['status'] 'id':ret.get('data' {}).get('id') 'update_available':ret.get('data' {}).get('update_available' <false>) }<block_end>@action<def_stmt>device_install_ota_updates self device:str **kwargs<block_start>"""
Install OTA updates for a device if available.
:param device: Address or friendly name of the device.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/device/ota_update/update') reply_topic=self._topic('bridge/response/device/ota_update/update') msg={'id':device} **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>groups self **kwargs<arrow>List[dict]<block_start>"""
Get the groups registered on the device.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._get_network_info(**kwargs).get('groups')<block_end>@action<def_stmt>info self **kwargs<arrow>dict<block_start>"""
Get the information, configuration and state of the network.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
:return: Example:
.. code-block:: json
{
"state": "online",
"commit": "07cdc9d",
"config": {
"advanced": {
"adapter_concurrent": null,
"adapter_delay": null,
"availability_blacklist": [],
"availability_blocklist": [],
"availability_passlist": [],
"availability_timeout": 0,
"availability_whitelist": [],
"cache_state": true,
"cache_state_persistent": true,
"cache_state_send_on_startup": true,
"channel": 11,
"elapsed": false,
"ext_pan_id": [
221,
221,
221,
221,
221,
221,
221,
221
],
"homeassistant_discovery_topic": "homeassistant",
"homeassistant_legacy_triggers": true,
"homeassistant_status_topic": "hass/status",
"last_seen": "disable",
"legacy_api": true,
"log_directory": "/opt/zigbee2mqtt/data/log/%TIMESTAMP%",
"log_file": "log.txt",
"log_level": "debug",
"log_output": [
"console",
"file"
],
"log_rotation": true,
"log_syslog": {},
"pan_id": 6754,
"report": false,
"soft_reset_timeout": 0,
"timestamp_format": "YYYY-MM-DD HH:mm:ss"
},
"ban": [],
"blocklist": [],
"device_options": {},
"devices": {
"0x00123456789abcdf": {
"friendly_name": "My Lightbulb"
}
},
"experimental": {
"output": "json"
},
"external_converters": [],
"groups": {},
"homeassistant": false,
"map_options": {
"graphviz": {
"colors": {
"fill": {
"coordinator": "#e04e5d",
"enddevice": "#fff8ce",
"router": "#4ea3e0"
},
"font": {
"coordinator": "#ffffff",
"enddevice": "#000000",
"router": "#ffffff"
},
"line": {
"active": "#009900",
"inactive": "#994444"
}
}
}
},
"mqtt": {
"base_topic": "zigbee2mqtt",
"force_disable_retain": false,
"include_device_information": false,
"server": "mqtt://localhost"
},
"passlist": [],
"permit_join": true,
"serial": {
"disable_led": false,
"port": "/dev/ttyUSB0"
},
"whitelist": []
},
"coordinator": {
"meta": {
"maintrel": 3,
"majorrel": 2,
"minorrel": 6,
"product": 0,
"revision": 20190608,
"transportrev": 2
},
"type": "zStack12"
},
"log_level": "debug",
"network": {
"channel": 11,
"extended_pan_id": "0xdddddddddddddddd",
"pan_id": 6754
},
"permit_join": true,
"version": "1.17.0"
}
"""<line_sep>info=self._get_network_info(**kwargs)<line_sep><return>{'state':info.get('state') 'info':info.get('info') }<block_end>@action<def_stmt>group_add self name:str id:Optional[int]=<none> **kwargs<block_start>"""
Add a new group.
:param name: Display name of the group.
:param id: Optional numeric ID (default: auto-generated).
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep>payload=name<if>id<is><none><else>{'id':id 'friendly_name':name }<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/group/add') reply_topic=self._topic('bridge/response/group/add') msg=payload **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>group_get self group:str property:Optional[str]=<none> **kwargs<arrow>dict<block_start>"""
Get one or more properties of a group. The compatible properties vary depending on the devices on the group.
For example, a light bulb may have the "``state``" (with values ``"ON"`` and ``"OFF"``) and "``brightness``"
properties, while an environment sensor may have the "``temperature``" and "``humidity``" properties, and so on.
:param group: Display name of the group.
:param property: Name of the property to retrieve (default: all available properties)
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep>msg={}<if_stmt>property<block_start>msg={property:''}<block_end>properties=self.publish(topic=self._topic(group+'/get') reply_topic=self._topic(group) msg=msg **self._mqtt_args(**kwargs)).output<if_stmt>property<block_start><assert_stmt>property<in>properties 'No such property: '+property<line_sep><return>{property:properties[property]}<block_end><return>properties<block_end># noinspection PyShadowingBuiltins,DuplicatedCode
@action<def_stmt>group_set self group:str property:str value:Any **kwargs<block_start>"""
Set a properties on a group. The compatible properties vary depending on the devices on the group.
For example, a light bulb may have the "``state``" (with values ``"ON"`` and ``"OFF"``) and "``brightness``"
properties, while an environment sensor may have the "``temperature``" and "``humidity``" properties, and so on.
:param group: Display name of the group.
:param property: Name of the property that should be set.
:param value: New value of the property.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep>properties=self.publish(topic=self._topic(group+'/set') reply_topic=self._topic(group) msg={property:value} **self._mqtt_args(**kwargs)).output<if_stmt>property<block_start><assert_stmt>property<in>properties 'No such property: '+property<line_sep><return>{property:properties[property]}<block_end><return>properties<block_end>@action<def_stmt>group_rename self name:str group:str **kwargs<block_start>"""
Rename a group.
:param name: New name.
:param group: Current name of the group to rename.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<if_stmt>name<eq>group<block_start>self.logger.info('Old and new name are the same: nothing to do')<line_sep><return><block_end># noinspection PyUnresolvedReferences
groups={group.get('friendly_name'):group<for>group self.groups().output}<assert_stmt>name<not><in>groups 'A group named {} already exists on the network'.format(name)<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/group/rename') reply_topic=self._topic('bridge/response/group/rename') msg={'from':group 'to':name}<if>group<else>name **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>group_remove self name:str **kwargs<block_start>"""
Remove a group.
:param name: Display name of the group.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/group/remove') reply_topic=self._topic('bridge/response/group/remove') msg=name **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>group_add_device self group:str device:str **kwargs<block_start>"""
Add a device to a group.
:param group: Display name of the group.
:param device: Display name of the device to be added.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/group/members/add') reply_topic=self._topic('bridge/response/group/members/add') msg={'group':group 'device':device } **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>group_remove_device self group:str device:Optional[str]=<none> **kwargs<block_start>"""
Remove a device from a group.
:param group: Display name of the group.
:param device: Display name of the device to be removed. If none is specified then all the devices registered
to the specified group will be removed.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/group/members/remove{}'.format('_all'<if>device<is><none><else>'')) reply_topic=self._topic('bridge/response/group/members/remove{}'.format('_all'<if>device<is><none><else>'')) msg={'group':group 'device':device } **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>bind_devices self source:str target:str **kwargs<block_start>"""
Bind two devices. Binding makes it possible that devices can directly control each other without the
intervention of zigbee2mqtt or any home automation software. You may want to use this feature to bind
for example an IKEA/Philips Hue dimmer switch to a light bulb, or a Zigbee remote to a thermostat.
Read more on the `zigbee2mqtt binding page <https://www.zigbee2mqtt.io/information/binding.html>`_.
:param source: Name of the source device. It can also be a group name, although the support is
`still experimental <https://www.zigbee2mqtt.io/information/binding.html#binding-a-group>`_.
You can also bind a specific device endpoint - for example ``MySensor/temperature``.
:param target: Name of the target device.
You can also bind a specific device endpoint - for example ``MyLight/state``.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/device/bind') reply_topic=self._topic('bridge/response/device/bind') msg={'from':source 'to':target} **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>unbind_devices self source:str target:str **kwargs<block_start>"""
Un-bind two devices.
:param source: Name of the source device.
You can also bind a specific device endpoint - for example ``MySensor/temperature``.
:param target: Name of the target device.
You can also bind a specific device endpoint - for example ``MyLight/state``.
:param kwargs: Extra arguments to be passed to :meth:`platypush.plugins.mqtt.MqttPlugin.publish``
(default: query the default configured device).
"""<line_sep><return>self._parse_response(self.publish(topic=self._topic('bridge/request/device/unbind') reply_topic=self._topic('bridge/response/device/unbind') msg={'from':source 'to':target} **self._mqtt_args(**kwargs)))<block_end>@action<def_stmt>on self device *args **kwargs<arrow>dict<block_start>"""
Implements :meth:`platypush.plugins.switch.plugin.SwitchPlugin.on` and turns on a Zigbee device with a writable
binary property.
"""<line_sep>switch_info=self._get_switches_info().get(device)<assert_stmt>switch_info '{} is not a valid switch'.format(device)<line_sep>props=self.device_set(device switch_info['property'] switch_info['value_on']).output<line_sep><return>self._properties_to_switch(device=device props=props switch_info=switch_info)<block_end>@action<def_stmt>off self device *args **kwargs<arrow>dict<block_start>"""
Implements :meth:`platypush.plugins.switch.plugin.SwitchPlugin.off` and turns off a Zigbee device with a
writable binary property.
"""<line_sep>switch_info=self._get_switches_info().get(device)<assert_stmt>switch_info '{} is not a valid switch'.format(device)<line_sep>props=self.device_set(device switch_info['property'] switch_info['value_off']).output<line_sep><return>self._properties_to_switch(device=device props=props switch_info=switch_info)<block_end>@action<def_stmt>toggle self device *args **kwargs<arrow>dict<block_start>"""
Implements :meth:`platypush.plugins.switch.plugin.SwitchPlugin.toggle` and toggles a Zigbee device with a
writable binary property.
"""<line_sep>switch_info=self._get_switches_info().get(device)<assert_stmt>switch_info '{} is not a valid switch'.format(device)<line_sep>props=self.device_set(device switch_info['property'] switch_info['value_toggle']).output<line_sep><return>self._properties_to_switch(device=device props=props switch_info=switch_info)<block_end>@staticmethod<def_stmt>_properties_to_switch device:str props:dict switch_info:dict<arrow>dict<block_start><return>{'on':props[switch_info['property']]<eq>switch_info['value_on'] 'friendly_name':device 'name':device **props }<block_end><def_stmt>_get_switches_info self<arrow>dict<block_start><def_stmt>switch_info device_info:dict<arrow>dict<block_start>exposes=(device_info.get('definition' {})<or>{}).get('exposes' [])<for_stmt>exposed exposes<block_start><for_stmt>feature exposed.get('features' [])<block_start><if_stmt>feature.get('type')<eq>'binary'<and>'value_on'<in>feature<and>'value_off'<in>feature<and>feature.get('access' 0)&2<block_start><return>{'property':feature['property'] 'value_on':feature['value_on'] 'value_off':feature['value_off'] 'value_toggle':feature.get('value_toggle' <none>) }<block_end><block_end><block_end><return>{}<block_end># noinspection PyUnresolvedReferences
devices=self.devices().output<line_sep>switches_info={}<for_stmt>device devices<block_start>info=switch_info(device)<if_stmt><not>info<block_start><continue><block_end>switches_info[device.get('friendly_name' device.get('ieee_address'))]=info<block_end><return>switches_info<block_end>@property<def_stmt>switches self<arrow>List[dict]<block_start>"""
Implements the :class:`platypush.plugins.switch.SwitchPlugin.switches` property and returns the state of any
device on the Zigbee network identified as a switch (a device is identified as a switch if it exposes a writable
``state`` property that can be set to ``ON`` or ``OFF``).
"""<line_sep>switches_info=self._get_switches_info()<line_sep># noinspection PyUnresolvedReferences
<return>[self._properties_to_switch(device=name props=switch switch_info=switches_info[name])<for>name,switch self.devices_get(list(switches_info.keys())).output.items()]<block_end><block_end># vim:sw=4:ts=4:et:
|
# TensorFlow implementation of a DCGAN model for SVHN
<import_stmt>tensorflow<as>tf<line_sep>init_kernel=tf.contrib.layers.xavier_initializer()<line_sep>image_size=32<line_sep>learning_rate=0.003<line_sep>batch_size=32<line_sep>kernel_conv_size=3<line_sep>filters_conv=64<line_sep>filters_fc=128<line_sep>strides_conv=2<def_stmt>UnPooling2x2ZeroFilled x# https://github.com/tensorflow/tensorflow/issues/2169
<block_start>out=tf.concat([x tf.zeros_like(x)] 3)<line_sep>out=tf.concat([out tf.zeros_like(out)] 2)<line_sep>sh=x.get_shape().as_list()<if_stmt><none><not><in>sh[1:]<block_start>out_size=[-1 sh[1]<times>2 sh[2]<times>2 sh[3]]<line_sep><return>tf.reshape(out out_size)<block_end><else_stmt><block_start>shv=tf.shape(x)<line_sep>ret=tf.reshape(out tf.stack([-1 shv[1]<times>2 shv[2]<times>2 sh[3]]))<line_sep><return>ret<block_end><block_end><def_stmt>network x_inp is_training=<false> getter=<none> reuse=<false><block_start>""" Network architecture in tensorflow
Discriminates between real data and generated data
Note:
Provides histogram and distribution tensorflow summaries
Args:
x_inp (tensor): input data for the encoder.
reuse (bool): sharing variables or not
Returns:
logits (tensor): last activation layer of the discriminator (shape 1)
intermediate_layer (tensor): intermediate layer for feature matching
"""<with_stmt>tf.variable_scope('network' reuse=reuse custom_getter=getter)<block_start>kernel_conv=tf.get_variable('kernel_conv' [kernel_conv_size kernel_conv_size 3 filters_conv] initializer=init_kernel)<line_sep>conv_output_size=int(image_size<times>image_size/4/strides_conv/strides_conv<times>filters_conv)<line_sep>kernel_dense=tf.get_variable('kernel_dense' [conv_output_size filters_fc] initializer=init_kernel)<line_sep>bias_dense=tf.get_variable('bias_dense' [filters_fc])<line_sep>bias_inv_dense=tf.get_variable('bias_inv_dense' [conv_output_size])<line_sep>x=tf.nn.conv2d(x_inp kernel_conv [1 strides_conv strides_conv 1] 'SAME')<line_sep>x=tf.nn.softplus(x)<line_sep>x=tf.nn.pool(x (2 2) "MAX" "SAME" strides=(2 2))<line_sep>x=tf.contrib.layers.flatten(x)<line_sep>x=tf.nn.softplus(tf.matmul(x kernel_dense)+bias_dense)<line_sep>###INVERSE LAYERS
x=tf.nn.softplus(tf.matmul(x tf.transpose(kernel_dense))+bias_inv_dense)<line_sep>new_image_size=int(image_size/2/strides_conv)<line_sep>x=tf.reshape(x [-1 new_image_size new_image_size filters_conv])<line_sep>x=UnPooling2x2ZeroFilled(x)<line_sep>x=tf.nn.conv2d_transpose(x kernel_conv tf.shape(x_inp) [1 strides_conv strides_conv 1] 'SAME')<line_sep>x=tf.nn.softplus(x name='softplus')<block_end><return>x<block_end>
|
# Copyright 2016 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
<import_stmt>os<import_stmt>unittest<import_from_stmt>systrace decorators<import_from_stmt>systrace update_systrace_trace_viewer<line_sep>SCRIPT_DIR=os.path.dirname(os.path.abspath(__file__))<line_sep>STABLE_VIEWER_PATH=os.path.join(SCRIPT_DIR 'systrace_trace_viewer.html')<line_sep># Tests presence and content of static HTML files used not only for Python
# systrace capture, but also Java-based capture in the android SDK tools.
#
# NOTE: changes to this file should typically be accompanied by changes to the
# Android SDK's method of systrace capture.
<class_stmt>MonitorTest(unittest.TestCase)<block_start>@decorators.HostOnlyTest<def_stmt>test_systrace_trace_viewer self<block_start>self.assertEqual(STABLE_VIEWER_PATH update_systrace_trace_viewer.SYSTRACE_TRACE_VIEWER_HTML_FILE)<line_sep>update_systrace_trace_viewer.update(force_update=<true>)<with_stmt>open(STABLE_VIEWER_PATH)<as>f<block_start>content=f.read().strip()<line_sep># expect big html file
self.assertGreater(5<times>1024<times>1024 len(content))<line_sep>self.assertEqual('<' content[0])<block_end>os.remove(f.name)<block_end>@decorators.HostOnlyTest<def_stmt>test_prefix self<block_start><with_stmt>open(os.path.join(SCRIPT_DIR 'prefix.html'))<as>f<block_start>content=f.read().strip()<line_sep>self.assertTrue("<html>"<in>content)<line_sep>self.assertTrue("<title>Android System Trace</title>"<in>content)<line_sep>self.assertTrue("{{SYSTRACE_TRACE_VIEWER_HTML}}"<in>content)<block_end><block_end>@decorators.HostOnlyTest<def_stmt>test_suffix self<block_start><with_stmt>open(os.path.join(SCRIPT_DIR 'suffix.html'))<as>f<block_start>content=f.read().strip()<line_sep>self.assertTrue("</html>"<in>content)<block_end><block_end><block_end>
|
# -*- coding: utf-8 -*-
<import_from_stmt>bpy.types Panel<class_stmt>MMDBonePanel(Panel)<block_start>bl_idname='BONE_PT_mmd_tools_bone'<line_sep>bl_label='MMD Bone Tools'<line_sep>bl_space_type='PROPERTIES'<line_sep>bl_region_type='WINDOW'<line_sep>bl_context='bone'<line_sep>@classmethod<def_stmt>poll cls context<block_start><return>context.mode<eq>'EDIT_ARMATURE'<and>context.active_bone<is><not><none><or>context.mode<eq>'POSE'<and>context.active_pose_bone<is><not><none><block_end><def_stmt>draw self context<block_start><if_stmt>context.mode<eq>'EDIT_ARMATURE'<block_start>edit_bone=context.active_bone<line_sep>pose_bone=context.active_object.pose.bones[edit_bone.name]<block_end><else_stmt><block_start>pose_bone=context.active_pose_bone<block_end>layout=self.layout<line_sep>c=layout.column(align=<true>)<line_sep>c.label('Information:')<line_sep>c.prop(pose_bone.mmd_bone 'name_j')<line_sep>c.prop(pose_bone.mmd_bone 'name_e')<line_sep>c.label(text='ID: %d'%(pose_bone.mmd_bone.bone_id))<line_sep>c=layout.column(align=<true>)<line_sep>row=c.row()<line_sep>row.prop(pose_bone.mmd_bone 'transform_order')<line_sep>row.prop(pose_bone.mmd_bone 'transform_after_dynamics')<line_sep>row.prop(pose_bone.mmd_bone 'is_visible')<line_sep>row=c.row()<line_sep>row.prop(pose_bone.mmd_bone 'is_controllable')<line_sep>row.prop(pose_bone.mmd_bone 'is_tip')<line_sep>row.prop(pose_bone.mmd_bone 'enabled_local_axes')<line_sep>row=c.row()<line_sep>row.prop(pose_bone.mmd_bone 'enabled_fixed_axis')<line_sep>row.prop(pose_bone.mmd_bone 'use_tail_location')<line_sep>row=layout.row(align=<true>)<line_sep>c=row.column()<line_sep>c.prop(pose_bone.mmd_bone 'local_axis_x')<line_sep>c=row.column()<line_sep>c.prop(pose_bone.mmd_bone 'local_axis_z')<line_sep>c=layout.column()<line_sep>row=layout.row(align=<true>)<line_sep>c=row.column()<line_sep>c.prop(pose_bone.mmd_bone 'fixed_axis')<block_end><block_end><class_stmt>MMDBoneATPanel(Panel)<block_start>bl_idname='BONE_PT_mmd_tools_bone_at'<line_sep>bl_label='MMD Additional Transformation'<line_sep>bl_space_type='PROPERTIES'<line_sep>bl_region_type='WINDOW'<line_sep>bl_context='bone'<line_sep>@classmethod<def_stmt>poll cls context<block_start><return>context.mode<eq>'EDIT_ARMATURE'<and>context.active_bone<is><not><none><or>context.mode<eq>'POSE'<and>context.active_pose_bone<is><not><none><block_end><def_stmt>draw self context<block_start><if_stmt>context.mode<eq>'EDIT_ARMATURE'<block_start>edit_bone=context.active_bone<line_sep>pose_bone=context.active_object.pose.bones[edit_bone.name]<block_end><else_stmt><block_start>pose_bone=context.active_pose_bone<block_end>layout=self.layout<line_sep>c=layout.column(align=<true>)<if_stmt>pose_bone.mmd_bone.is_additional_transform_dirty<block_start>c.label(text='Changes has not been applied.' icon='ERROR')<block_end>row=c.row()<line_sep>row.prop(pose_bone.mmd_bone 'has_additional_rotation' text='Rotation')<line_sep>row.prop(pose_bone.mmd_bone 'has_additional_location' text='Location')<line_sep>c=layout.column(align=<true>)<line_sep>c.prop_search(pose_bone.mmd_bone 'additional_transform_bone' pose_bone.id_data.pose 'bones' icon='BONE_DATA' text='')<line_sep># mmd_bone = MMDBone(pose_bone)
# if mmd_bone.has_additional_transform_constraint():
# constraint = mmd_bone.get_additional_transform_constraint()
# c.prop_search(constraint, 'subtarget', constraint.target.pose, 'bones', icon='BONE_DATA', text='Additional Transform Bone')
# else:
# c.operator('mmd_tools.bone_add_additional_transform')
c.prop(pose_bone.mmd_bone 'additional_transform_influence' text='Influence')<block_end><block_end>
|
# -*- coding: utf-8 -*-
# flake8: noqa
"""Manual clustering facilities."""<import_from_stmt>._utils ClusterMeta UpdateInfo<import_from_stmt>.clustering Clustering<import_from_stmt>.supervisor Supervisor ClusterView SimilarityView<import_from_stmt>.views *# noqa
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
<import_stmt>warnings<import_stmt>pulumi<import_stmt>pulumi.runtime<import_from_stmt>typing Any Mapping Optional Sequence Union overload<import_from_stmt>.. _utilities<line_sep>__all__=['TimeSeriesInsightsEventSourceEventhubArgs' 'TimeSeriesInsightsEventSourceEventhub']<line_sep>@pulumi.input_type<class_stmt>TimeSeriesInsightsEventSourceEventhubArgs<block_start><def_stmt>__init__ __self__ * consumer_group_name:pulumi.Input[str] environment_id:pulumi.Input[str] event_source_resource_id:pulumi.Input[str] eventhub_name:pulumi.Input[str] namespace_name:pulumi.Input[str] shared_access_key:pulumi.Input[str] shared_access_key_name:pulumi.Input[str] location:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> timestamp_property_name:Optional[pulumi.Input[str]]=<none><block_start>"""
The set of arguments for constructing a TimeSeriesInsightsEventSourceEventhub resource.
:param pulumi.Input[str] consumer_group_name: Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
:param pulumi.Input[str] environment_id: Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
:param pulumi.Input[str] event_source_resource_id: Specifies the resource id where events will be coming from.
:param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub which will be associated with this resource.
:param pulumi.Input[str] namespace_name: Specifies the EventHub Namespace name.
:param pulumi.Input[str] shared_access_key: Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
:param pulumi.Input[str] shared_access_key_name: Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] timestamp_property_name: Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""<line_sep>pulumi.set(__self__ "consumer_group_name" consumer_group_name)<line_sep>pulumi.set(__self__ "environment_id" environment_id)<line_sep>pulumi.set(__self__ "event_source_resource_id" event_source_resource_id)<line_sep>pulumi.set(__self__ "eventhub_name" eventhub_name)<line_sep>pulumi.set(__self__ "namespace_name" namespace_name)<line_sep>pulumi.set(__self__ "shared_access_key" shared_access_key)<line_sep>pulumi.set(__self__ "shared_access_key_name" shared_access_key_name)<if_stmt>location<is><not><none><block_start>pulumi.set(__self__ "location" location)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>tags<is><not><none><block_start>pulumi.set(__self__ "tags" tags)<block_end><if_stmt>timestamp_property_name<is><not><none><block_start>pulumi.set(__self__ "timestamp_property_name" timestamp_property_name)<block_end><block_end>@[email protected](name="consumerGroupName")<def_stmt>consumer_group_name self<arrow>pulumi.Input[str]<block_start>"""
Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
"""<line_sep><return>pulumi.get(self "consumer_group_name")<block_end>@consumer_group_name.setter<def_stmt>consumer_group_name self value:pulumi.Input[str]<block_start>pulumi.set(self "consumer_group_name" value)<block_end>@[email protected](name="environmentId")<def_stmt>environment_id self<arrow>pulumi.Input[str]<block_start>"""
Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
"""<line_sep><return>pulumi.get(self "environment_id")<block_end>@environment_id.setter<def_stmt>environment_id self value:pulumi.Input[str]<block_start>pulumi.set(self "environment_id" value)<block_end>@[email protected](name="eventSourceResourceId")<def_stmt>event_source_resource_id self<arrow>pulumi.Input[str]<block_start>"""
Specifies the resource id where events will be coming from.
"""<line_sep><return>pulumi.get(self "event_source_resource_id")<block_end>@event_source_resource_id.setter<def_stmt>event_source_resource_id self value:pulumi.Input[str]<block_start>pulumi.set(self "event_source_resource_id" value)<block_end>@[email protected](name="eventhubName")<def_stmt>eventhub_name self<arrow>pulumi.Input[str]<block_start>"""
Specifies the name of the EventHub which will be associated with this resource.
"""<line_sep><return>pulumi.get(self "eventhub_name")<block_end>@eventhub_name.setter<def_stmt>eventhub_name self value:pulumi.Input[str]<block_start>pulumi.set(self "eventhub_name" value)<block_end>@[email protected](name="namespaceName")<def_stmt>namespace_name self<arrow>pulumi.Input[str]<block_start>"""
Specifies the EventHub Namespace name.
"""<line_sep><return>pulumi.get(self "namespace_name")<block_end>@namespace_name.setter<def_stmt>namespace_name self value:pulumi.Input[str]<block_start>pulumi.set(self "namespace_name" value)<block_end>@[email protected](name="sharedAccessKey")<def_stmt>shared_access_key self<arrow>pulumi.Input[str]<block_start>"""
Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
"""<line_sep><return>pulumi.get(self "shared_access_key")<block_end>@shared_access_key.setter<def_stmt>shared_access_key self value:pulumi.Input[str]<block_start>pulumi.set(self "shared_access_key" value)<block_end>@[email protected](name="sharedAccessKeyName")<def_stmt>shared_access_key_name self<arrow>pulumi.Input[str]<block_start>"""
Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
"""<line_sep><return>pulumi.get(self "shared_access_key_name")<block_end>@shared_access_key_name.setter<def_stmt>shared_access_key_name self value:pulumi.Input[str]<block_start>pulumi.set(self "shared_access_key_name" value)<block_end>@[email protected]<def_stmt>location self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "location")<block_end>@location.setter<def_stmt>location self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "location" value)<block_end>@[email protected]<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
"""<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@[email protected]<def_stmt>tags self<arrow>Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>"""
A mapping of tags to assign to the resource.
"""<line_sep><return>pulumi.get(self "tags")<block_end>@tags.setter<def_stmt>tags self value:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>pulumi.set(self "tags" value)<block_end>@[email protected](name="timestampPropertyName")<def_stmt>timestamp_property_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""<line_sep><return>pulumi.get(self "timestamp_property_name")<block_end>@timestamp_property_name.setter<def_stmt>timestamp_property_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "timestamp_property_name" value)<block_end><block_end>@pulumi.input_type<class_stmt>_TimeSeriesInsightsEventSourceEventhubState<block_start><def_stmt>__init__ __self__ * consumer_group_name:Optional[pulumi.Input[str]]=<none> environment_id:Optional[pulumi.Input[str]]=<none> event_source_resource_id:Optional[pulumi.Input[str]]=<none> eventhub_name:Optional[pulumi.Input[str]]=<none> location:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> namespace_name:Optional[pulumi.Input[str]]=<none> shared_access_key:Optional[pulumi.Input[str]]=<none> shared_access_key_name:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> timestamp_property_name:Optional[pulumi.Input[str]]=<none><block_start>"""
Input properties used for looking up and filtering TimeSeriesInsightsEventSourceEventhub resources.
:param pulumi.Input[str] consumer_group_name: Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
:param pulumi.Input[str] environment_id: Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
:param pulumi.Input[str] event_source_resource_id: Specifies the resource id where events will be coming from.
:param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub which will be associated with this resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
:param pulumi.Input[str] namespace_name: Specifies the EventHub Namespace name.
:param pulumi.Input[str] shared_access_key: Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
:param pulumi.Input[str] shared_access_key_name: Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] timestamp_property_name: Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""<if_stmt>consumer_group_name<is><not><none><block_start>pulumi.set(__self__ "consumer_group_name" consumer_group_name)<block_end><if_stmt>environment_id<is><not><none><block_start>pulumi.set(__self__ "environment_id" environment_id)<block_end><if_stmt>event_source_resource_id<is><not><none><block_start>pulumi.set(__self__ "event_source_resource_id" event_source_resource_id)<block_end><if_stmt>eventhub_name<is><not><none><block_start>pulumi.set(__self__ "eventhub_name" eventhub_name)<block_end><if_stmt>location<is><not><none><block_start>pulumi.set(__self__ "location" location)<block_end><if_stmt>name<is><not><none><block_start>pulumi.set(__self__ "name" name)<block_end><if_stmt>namespace_name<is><not><none><block_start>pulumi.set(__self__ "namespace_name" namespace_name)<block_end><if_stmt>shared_access_key<is><not><none><block_start>pulumi.set(__self__ "shared_access_key" shared_access_key)<block_end><if_stmt>shared_access_key_name<is><not><none><block_start>pulumi.set(__self__ "shared_access_key_name" shared_access_key_name)<block_end><if_stmt>tags<is><not><none><block_start>pulumi.set(__self__ "tags" tags)<block_end><if_stmt>timestamp_property_name<is><not><none><block_start>pulumi.set(__self__ "timestamp_property_name" timestamp_property_name)<block_end><block_end>@[email protected](name="consumerGroupName")<def_stmt>consumer_group_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
"""<line_sep><return>pulumi.get(self "consumer_group_name")<block_end>@consumer_group_name.setter<def_stmt>consumer_group_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "consumer_group_name" value)<block_end>@[email protected](name="environmentId")<def_stmt>environment_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
"""<line_sep><return>pulumi.get(self "environment_id")<block_end>@environment_id.setter<def_stmt>environment_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "environment_id" value)<block_end>@[email protected](name="eventSourceResourceId")<def_stmt>event_source_resource_id self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the resource id where events will be coming from.
"""<line_sep><return>pulumi.get(self "event_source_resource_id")<block_end>@event_source_resource_id.setter<def_stmt>event_source_resource_id self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "event_source_resource_id" value)<block_end>@[email protected](name="eventhubName")<def_stmt>eventhub_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the name of the EventHub which will be associated with this resource.
"""<line_sep><return>pulumi.get(self "eventhub_name")<block_end>@eventhub_name.setter<def_stmt>eventhub_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "eventhub_name" value)<block_end>@[email protected]<def_stmt>location self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "location")<block_end>@location.setter<def_stmt>location self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "location" value)<block_end>@[email protected]<def_stmt>name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
"""<line_sep><return>pulumi.get(self "name")<block_end>@name.setter<def_stmt>name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "name" value)<block_end>@[email protected](name="namespaceName")<def_stmt>namespace_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the EventHub Namespace name.
"""<line_sep><return>pulumi.get(self "namespace_name")<block_end>@namespace_name.setter<def_stmt>namespace_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "namespace_name" value)<block_end>@[email protected](name="sharedAccessKey")<def_stmt>shared_access_key self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
"""<line_sep><return>pulumi.get(self "shared_access_key")<block_end>@shared_access_key.setter<def_stmt>shared_access_key self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "shared_access_key" value)<block_end>@[email protected](name="sharedAccessKeyName")<def_stmt>shared_access_key_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
"""<line_sep><return>pulumi.get(self "shared_access_key_name")<block_end>@shared_access_key_name.setter<def_stmt>shared_access_key_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "shared_access_key_name" value)<block_end>@[email protected]<def_stmt>tags self<arrow>Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>"""
A mapping of tags to assign to the resource.
"""<line_sep><return>pulumi.get(self "tags")<block_end>@tags.setter<def_stmt>tags self value:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]<block_start>pulumi.set(self "tags" value)<block_end>@[email protected](name="timestampPropertyName")<def_stmt>timestamp_property_name self<arrow>Optional[pulumi.Input[str]]<block_start>"""
Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""<line_sep><return>pulumi.get(self "timestamp_property_name")<block_end>@timestamp_property_name.setter<def_stmt>timestamp_property_name self value:Optional[pulumi.Input[str]]<block_start>pulumi.set(self "timestamp_property_name" value)<block_end><block_end><class_stmt>TimeSeriesInsightsEventSourceEventhub(pulumi.CustomResource)<block_start>@overload<def_stmt>__init__ __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> consumer_group_name:Optional[pulumi.Input[str]]=<none> environment_id:Optional[pulumi.Input[str]]=<none> event_source_resource_id:Optional[pulumi.Input[str]]=<none> eventhub_name:Optional[pulumi.Input[str]]=<none> location:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> namespace_name:Optional[pulumi.Input[str]]=<none> shared_access_key:Optional[pulumi.Input[str]]=<none> shared_access_key_name:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> timestamp_property_name:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start>"""
Manages an Azure IoT Time Series Insights EventHub Event Source.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_event_hub_namespace = azure.eventhub.EventHubNamespace("exampleEventHubNamespace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="Standard")
example_event_hub = azure.eventhub.EventHub("exampleEventHub",
namespace_name=example_event_hub_namespace.name,
resource_group_name=example_resource_group.name,
partition_count=2,
message_retention=7)
example_consumer_group = azure.eventhub.ConsumerGroup("exampleConsumerGroup",
namespace_name=example_event_hub_namespace.name,
eventhub_name=example_event_hub.name,
resource_group_name=example_resource_group.name)
example_authorization_rule = azure.eventhub.AuthorizationRule("exampleAuthorizationRule",
namespace_name=example_event_hub_namespace.name,
eventhub_name=example_event_hub.name,
resource_group_name=example_resource_group.name,
listen=True,
send=False,
manage=False)
example_account = azure.storage.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
account_tier="Standard",
account_replication_type="LRS")
example_time_series_insights_gen2_environment = azure.iot.TimeSeriesInsightsGen2Environment("exampleTimeSeriesInsightsGen2Environment",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="L1",
id_properties=["id"],
storage=azure.iot.TimeSeriesInsightsGen2EnvironmentStorageArgs(
name=example_account.name,
key=example_account.primary_access_key,
))
example_time_series_insights_event_source_eventhub = azure.iot.TimeSeriesInsightsEventSourceEventhub("exampleTimeSeriesInsightsEventSourceEventhub",
location=example_resource_group.location,
environment_id=example_time_series_insights_gen2_environment.id,
eventhub_name=example_event_hub.name,
namespace_name=example_event_hub_namespace.name,
shared_access_key=example_authorization_rule.primary_key,
shared_access_key_name=example_authorization_rule.name,
consumer_group_name=example_consumer_group.name,
event_source_resource_id=example_event_hub.id)
```
## Import
Azure IoT Time Series Insights EventHub Event Source can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:iot/timeSeriesInsightsEventSourceEventhub:TimeSeriesInsightsEventSourceEventhub example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.TimeSeriesInsights/environments/environment1/eventSources/example
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] consumer_group_name: Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
:param pulumi.Input[str] environment_id: Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
:param pulumi.Input[str] event_source_resource_id: Specifies the resource id where events will be coming from.
:param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub which will be associated with this resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
:param pulumi.Input[str] namespace_name: Specifies the EventHub Namespace name.
:param pulumi.Input[str] shared_access_key: Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
:param pulumi.Input[str] shared_access_key_name: Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] timestamp_property_name: Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""<line_sep><ellipsis><block_end>@overload<def_stmt>__init__ __self__ resource_name:str args:TimeSeriesInsightsEventSourceEventhubArgs opts:Optional[pulumi.ResourceOptions]=<none><block_start>"""
Manages an Azure IoT Time Series Insights EventHub Event Source.
## Example Usage
```python
import pulumi
import pulumi_azure as azure
example_resource_group = azure.core.ResourceGroup("exampleResourceGroup", location="West Europe")
example_event_hub_namespace = azure.eventhub.EventHubNamespace("exampleEventHubNamespace",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku="Standard")
example_event_hub = azure.eventhub.EventHub("exampleEventHub",
namespace_name=example_event_hub_namespace.name,
resource_group_name=example_resource_group.name,
partition_count=2,
message_retention=7)
example_consumer_group = azure.eventhub.ConsumerGroup("exampleConsumerGroup",
namespace_name=example_event_hub_namespace.name,
eventhub_name=example_event_hub.name,
resource_group_name=example_resource_group.name)
example_authorization_rule = azure.eventhub.AuthorizationRule("exampleAuthorizationRule",
namespace_name=example_event_hub_namespace.name,
eventhub_name=example_event_hub.name,
resource_group_name=example_resource_group.name,
listen=True,
send=False,
manage=False)
example_account = azure.storage.Account("exampleAccount",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
account_tier="Standard",
account_replication_type="LRS")
example_time_series_insights_gen2_environment = azure.iot.TimeSeriesInsightsGen2Environment("exampleTimeSeriesInsightsGen2Environment",
location=example_resource_group.location,
resource_group_name=example_resource_group.name,
sku_name="L1",
id_properties=["id"],
storage=azure.iot.TimeSeriesInsightsGen2EnvironmentStorageArgs(
name=example_account.name,
key=example_account.primary_access_key,
))
example_time_series_insights_event_source_eventhub = azure.iot.TimeSeriesInsightsEventSourceEventhub("exampleTimeSeriesInsightsEventSourceEventhub",
location=example_resource_group.location,
environment_id=example_time_series_insights_gen2_environment.id,
eventhub_name=example_event_hub.name,
namespace_name=example_event_hub_namespace.name,
shared_access_key=example_authorization_rule.primary_key,
shared_access_key_name=example_authorization_rule.name,
consumer_group_name=example_consumer_group.name,
event_source_resource_id=example_event_hub.id)
```
## Import
Azure IoT Time Series Insights EventHub Event Source can be imported using the `resource id`, e.g.
```sh
$ pulumi import azure:iot/timeSeriesInsightsEventSourceEventhub:TimeSeriesInsightsEventSourceEventhub example /subscriptions/00000000-0000-0000-0000-000000000000/resourceGroups/example/providers/Microsoft.TimeSeriesInsights/environments/environment1/eventSources/example
```
:param str resource_name: The name of the resource.
:param TimeSeriesInsightsEventSourceEventhubArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""<line_sep><ellipsis><block_end><def_stmt>__init__ __self__ resource_name:str *args **kwargs<block_start>resource_args,opts=_utilities.get_resource_args_opts(TimeSeriesInsightsEventSourceEventhubArgs pulumi.ResourceOptions *args **kwargs)<if_stmt>resource_args<is><not><none><block_start>__self__._internal_init(resource_name opts **resource_args.__dict__)<block_end><else_stmt><block_start>__self__._internal_init(resource_name *args **kwargs)<block_end><block_end><def_stmt>_internal_init __self__ resource_name:str opts:Optional[pulumi.ResourceOptions]=<none> consumer_group_name:Optional[pulumi.Input[str]]=<none> environment_id:Optional[pulumi.Input[str]]=<none> event_source_resource_id:Optional[pulumi.Input[str]]=<none> eventhub_name:Optional[pulumi.Input[str]]=<none> location:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> namespace_name:Optional[pulumi.Input[str]]=<none> shared_access_key:Optional[pulumi.Input[str]]=<none> shared_access_key_name:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> timestamp_property_name:Optional[pulumi.Input[str]]=<none> __props__=<none><block_start><if_stmt>opts<is><none><block_start>opts=pulumi.ResourceOptions()<block_end><if_stmt><not>isinstance(opts pulumi.ResourceOptions)<block_start><raise>TypeError('Expected resource options to be a ResourceOptions instance')<block_end><if_stmt>opts.version<is><none><block_start>opts.version=_utilities.get_version()<block_end><if_stmt>opts.id<is><none><block_start><if_stmt>__props__<is><not><none><block_start><raise>TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')<block_end>__props__=TimeSeriesInsightsEventSourceEventhubArgs.__new__(TimeSeriesInsightsEventSourceEventhubArgs)<if_stmt>consumer_group_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'consumer_group_name'")<block_end>__props__.__dict__["consumer_group_name"]=consumer_group_name<if_stmt>environment_id<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'environment_id'")<block_end>__props__.__dict__["environment_id"]=environment_id<if_stmt>event_source_resource_id<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'event_source_resource_id'")<block_end>__props__.__dict__["event_source_resource_id"]=event_source_resource_id<if_stmt>eventhub_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'eventhub_name'")<block_end>__props__.__dict__["eventhub_name"]=eventhub_name<line_sep>__props__.__dict__["location"]=location<line_sep>__props__.__dict__["name"]=name<if_stmt>namespace_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'namespace_name'")<block_end>__props__.__dict__["namespace_name"]=namespace_name<if_stmt>shared_access_key<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'shared_access_key'")<block_end>__props__.__dict__["shared_access_key"]=shared_access_key<if_stmt>shared_access_key_name<is><none><and><not>opts.urn<block_start><raise>TypeError("Missing required property 'shared_access_key_name'")<block_end>__props__.__dict__["shared_access_key_name"]=shared_access_key_name<line_sep>__props__.__dict__["tags"]=tags<line_sep>__props__.__dict__["timestamp_property_name"]=timestamp_property_name<block_end>super(TimeSeriesInsightsEventSourceEventhub __self__).__init__('azure:iot/timeSeriesInsightsEventSourceEventhub:TimeSeriesInsightsEventSourceEventhub' resource_name __props__ opts)<block_end>@staticmethod<def_stmt>get resource_name:str id:pulumi.Input[str] opts:Optional[pulumi.ResourceOptions]=<none> consumer_group_name:Optional[pulumi.Input[str]]=<none> environment_id:Optional[pulumi.Input[str]]=<none> event_source_resource_id:Optional[pulumi.Input[str]]=<none> eventhub_name:Optional[pulumi.Input[str]]=<none> location:Optional[pulumi.Input[str]]=<none> name:Optional[pulumi.Input[str]]=<none> namespace_name:Optional[pulumi.Input[str]]=<none> shared_access_key:Optional[pulumi.Input[str]]=<none> shared_access_key_name:Optional[pulumi.Input[str]]=<none> tags:Optional[pulumi.Input[Mapping[str pulumi.Input[str]]]]=<none> timestamp_property_name:Optional[pulumi.Input[str]]=<none><arrow>'TimeSeriesInsightsEventSourceEventhub'<block_start>"""
Get an existing TimeSeriesInsightsEventSourceEventhub resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] consumer_group_name: Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
:param pulumi.Input[str] environment_id: Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
:param pulumi.Input[str] event_source_resource_id: Specifies the resource id where events will be coming from.
:param pulumi.Input[str] eventhub_name: Specifies the name of the EventHub which will be associated with this resource.
:param pulumi.Input[str] location: Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
:param pulumi.Input[str] name: Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
:param pulumi.Input[str] namespace_name: Specifies the EventHub Namespace name.
:param pulumi.Input[str] shared_access_key: Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
:param pulumi.Input[str] shared_access_key_name: Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
:param pulumi.Input[Mapping[str, pulumi.Input[str]]] tags: A mapping of tags to assign to the resource.
:param pulumi.Input[str] timestamp_property_name: Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""<line_sep>opts=pulumi.ResourceOptions.merge(opts pulumi.ResourceOptions(id=id))<line_sep>__props__=_TimeSeriesInsightsEventSourceEventhubState.__new__(_TimeSeriesInsightsEventSourceEventhubState)<line_sep>__props__.__dict__["consumer_group_name"]=consumer_group_name<line_sep>__props__.__dict__["environment_id"]=environment_id<line_sep>__props__.__dict__["event_source_resource_id"]=event_source_resource_id<line_sep>__props__.__dict__["eventhub_name"]=eventhub_name<line_sep>__props__.__dict__["location"]=location<line_sep>__props__.__dict__["name"]=name<line_sep>__props__.__dict__["namespace_name"]=namespace_name<line_sep>__props__.__dict__["shared_access_key"]=shared_access_key<line_sep>__props__.__dict__["shared_access_key_name"]=shared_access_key_name<line_sep>__props__.__dict__["tags"]=tags<line_sep>__props__.__dict__["timestamp_property_name"]=timestamp_property_name<line_sep><return>TimeSeriesInsightsEventSourceEventhub(resource_name opts=opts __props__=__props__)<block_end>@[email protected](name="consumerGroupName")<def_stmt>consumer_group_name self<arrow>pulumi.Output[str]<block_start>"""
Specifies the name of the EventHub Consumer Group that holds the partitions from which events will be read.
"""<line_sep><return>pulumi.get(self "consumer_group_name")<block_end>@[email protected](name="environmentId")<def_stmt>environment_id self<arrow>pulumi.Output[str]<block_start>"""
Specifies the id of the IoT Time Series Insights Environment that the Event Source should be associated with. Changing this forces a new resource to created.
"""<line_sep><return>pulumi.get(self "environment_id")<block_end>@[email protected](name="eventSourceResourceId")<def_stmt>event_source_resource_id self<arrow>pulumi.Output[str]<block_start>"""
Specifies the resource id where events will be coming from.
"""<line_sep><return>pulumi.get(self "event_source_resource_id")<block_end>@[email protected](name="eventhubName")<def_stmt>eventhub_name self<arrow>pulumi.Output[str]<block_start>"""
Specifies the name of the EventHub which will be associated with this resource.
"""<line_sep><return>pulumi.get(self "eventhub_name")<block_end>@[email protected]<def_stmt>location self<arrow>pulumi.Output[str]<block_start>"""
Specifies the supported Azure location where the resource exists. Changing this forces a new resource to be created.
"""<line_sep><return>pulumi.get(self "location")<block_end>@[email protected]<def_stmt>name self<arrow>pulumi.Output[str]<block_start>"""
Specifies the name of the Azure IoT Time Series Insights EventHub Event Source. Changing this forces a new resource to be created. Must be globally unique.
"""<line_sep><return>pulumi.get(self "name")<block_end>@[email protected](name="namespaceName")<def_stmt>namespace_name self<arrow>pulumi.Output[str]<block_start>"""
Specifies the EventHub Namespace name.
"""<line_sep><return>pulumi.get(self "namespace_name")<block_end>@[email protected](name="sharedAccessKey")<def_stmt>shared_access_key self<arrow>pulumi.Output[str]<block_start>"""
Specifies the value of the Shared Access Policy key that grants the Time Series Insights service read access to the EventHub.
"""<line_sep><return>pulumi.get(self "shared_access_key")<block_end>@[email protected](name="sharedAccessKeyName")<def_stmt>shared_access_key_name self<arrow>pulumi.Output[str]<block_start>"""
Specifies the name of the Shared Access key that grants the Event Source access to the EventHub.
"""<line_sep><return>pulumi.get(self "shared_access_key_name")<block_end>@[email protected]<def_stmt>tags self<arrow>pulumi.Output[Optional[Mapping[str str]]]<block_start>"""
A mapping of tags to assign to the resource.
"""<line_sep><return>pulumi.get(self "tags")<block_end>@[email protected](name="timestampPropertyName")<def_stmt>timestamp_property_name self<arrow>pulumi.Output[str]<block_start>"""
Specifies the value that will be used as the event source's timestamp. This value defaults to the event creation time.
"""<line_sep><return>pulumi.get(self "timestamp_property_name")<block_end><block_end>
|
<import_stmt>matplotlib.pyplot<as>plt<import_stmt>numpy<as>np<def_stmt>draw_text ax x y<block_start>max_y=np.max(y)<for_stmt>i,v enumerate(y)<block_start>text=str(y[i])<line_sep>ax.text(x[i]-0.045<times>len(text) y[i] r'\textbf{'+text+'}')<block_end><block_end><def_stmt>draw_error_bar ax x xticks y_gpu e_gpu title ylabel<block_start>offset=0.2<line_sep>width=offset<times>2<line_sep>bar_gpu=ax.bar(x y_gpu width=width color=(0.86 0.27 0.22))<line_sep>ax.errorbar(x y_gpu yerr=e_gpu fmt='.' color=(0.96 0.71 0) capsize=10)<line_sep>ax.yaxis.grid(<true>)<line_sep>ax.set_xticks(x)<line_sep>ax.set_xticklabels(xticks)<line_sep>ax.set_title(title)<line_sep>ax.set_ylabel(ylabel)<block_end><if_stmt>__name__<eq>'__main__'<block_start>plt.rc('text' usetex=<true>)<line_sep>plt.rc('font' family='serif')<line_sep>fig,axs=plt.subplots(figsize=(5 3))<line_sep>x=np.array([1 2 3 4 5])<line_sep>labels=[r'\textit{fr2\_desktop}' r'\textit{fr3\_household}' r'\textit{lounge}' r'\textit{copyroom}' r'\textit{livingroom1}']<line_sep># mean = 8.713374, std = 1.637024
y_gpu_mc=np.array([8.71 11.63 6.28 5.18 4.07])<line_sep>e_gpu_mc=np.array([1.64 3.25 1.98 1.94 1.15])<line_sep>draw_error_bar(axs x labels y_gpu_mc e_gpu_mc r'\textbf{Marching Cubes for Voxels in Frustum}' r'\textbf{Average time per frame} (ms)')<line_sep>fig.tight_layout()<line_sep># bar_cpu_odom = plt.bar(x + offset, y_cpu_odom, width=width)
# plt.errorbar(x + offset, y_cpu_odom, yerr=e_cpu_odom, fmt='.g', capsize=20)
# for i, v in enumerate(y_cpu_odom):
# plt.text(x[i] + offset + 0.02, y_cpu_odom[i] + 5, str(y_cpu_odom[i]))
plt.savefig('mc_time.pdf' bbox_inches='tight')<line_sep>plt.show()<block_end>
|
"""
GravMag: Reduction to the pole of a total field anomaly using FFT
"""<import_from_stmt>fatiando mesher gridder utils<import_from_stmt>fatiando.gravmag prism transform<import_from_stmt>fatiando.vis mpl<line_sep># Direction of the Geomagnetic field
inc,dec=-60 0<line_sep># Make a model with only induced magnetization
model=[mesher.Prism(-100 100 -100 100 0 2000 {'magnetization':utils.ang2vec(10 inc dec)})]<line_sep>area=(-5000 5000 -5000 5000)<line_sep>shape=(100 100)<line_sep>z0=-500<line_sep>x,y,z=gridder.regular(area shape z=z0)<line_sep>tf=utils.contaminate(prism.tf(x y z model inc dec) 1 seed=0)<line_sep># Reduce to the pole using FFT. Since there is only induced magnetization, the
# magnetization direction (sinc and sdec) is the same as the geomagnetic field
pole=transform.reduce_to_pole(x y tf shape inc dec sinc=inc sdec=dec)<line_sep># Calculate the true value at the pole for comparison
true=prism.tf(x y z model 90 0 pmag=utils.ang2vec(10 90 0))<line_sep>fig,axes=mpl.subplots(1 3 figsize=(14 4))<for_stmt>ax axes<block_start>ax.set_aspect('equal')<block_end>mpl.sca(axes[0])<line_sep>mpl.title("Original total field anomaly")<line_sep>mpl.contourf(y x tf shape 30 cmap=mpl.cm.RdBu_r)<line_sep>mpl.colorbar(pad=0).set_label('nT')<line_sep>mpl.m2km()<line_sep>mpl.sca(axes[1])<line_sep>mpl.title("True value at pole")<line_sep>mpl.contourf(y x true shape 30 cmap=mpl.cm.RdBu_r)<line_sep>mpl.colorbar(pad=0).set_label('nT')<line_sep>mpl.m2km()<line_sep>mpl.sca(axes[2])<line_sep>mpl.title("Reduced to the pole")<line_sep>mpl.contourf(y x pole shape 30 cmap=mpl.cm.RdBu_r)<line_sep>mpl.colorbar(pad=0).set_label('nT')<line_sep>mpl.m2km()<line_sep>mpl.tight_layout()<line_sep>mpl.show()<line_sep>
|
# Copyright (c) ZenML GmbH 2020. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express
# or implied. See the License for the specific language governing
# permissions and limitations under the License.
"""Definition of the data Processing Backend"""<import_from_stmt>typing Optional List Text<import_from_stmt>zenml.backends BaseBackend<class_stmt>ProcessingBaseBackend(BaseBackend)<block_start>"""
Use this class to run a ZenML pipeline locally.
Every ZenML pipeline runs in backends.
A dedicated processing backend can be used to efficiently process large
amounts of incoming data in parallel, potentially distributed across
multiple machines. This can happen on local processing backends as well
as cloud-based variants like Google Cloud Dataflow. More powerful machines
with higher core counts and clock speeds can be leveraged to increase
processing throughput significantly.
"""<line_sep>BACKEND_TYPE='processing'<def_stmt>get_beam_args self pipeline_name:Text=<none> pipeline_root:Text=<none><arrow>Optional[List[Text]]<block_start>"""
Returns a list of beam args for the pipeline.
Args:
pipeline_name: Name of the pipeline.
pipeline_root: Root dir of pipeline.
"""<line_sep># TODO: [LOW] Check if multiprocessing slows pipeline down or not.
<return>[# '--direct_running_mode=multi_processing',
# # 0 means auto-detect based on on the number of CPUs available
# # during execution time.
# '--direct_num_workers=0',
]<block_end><block_end>
|
<import_stmt>sys subprocess<import_stmt>os<import_stmt>os.path<import_stmt>shutil<import_stmt>re<import_stmt>glob<import_from_stmt>optparse OptionParser<line_sep>#-------------------------------------------------------------------------------
# the main function
# Below we deform the moving image segmentation by the current result as well as
# by a previous stored result. This makes this test a regression test.
#
# We could instead compare with a fixed image segmentation, but that would require
# the tested registrations to be relatively good, which they are not to save time.
<def_stmt>main # usage, parse parameters
<block_start>usage="usage: %prog [options] arg"<line_sep>parser=OptionParser(usage)<line_sep># option to debug and verbose
parser.add_option("-v" "--verbose" action="store_true" dest="verbose")<line_sep># options to control files
parser.add_option("-d" "--directory" dest="directory" help="elastix output directory")<line_sep>parser.add_option("-m" "--movingsegmentation" dest="mseg" help="moving image segmentation")<line_sep>parser.add_option("-b" "--baselinetp" dest="btp" help="baseline transform parameter file")<line_sep>parser.add_option("-p" "--path" dest="path" help="path where executables can be found")<line_sep>(options args)=parser.parse_args()<line_sep># Check if option -d and -m and -b are given
<if_stmt>options.directory<eq><none><block_start>parser.error("The option directory (-d) should be given")<line_sep><block_end><if_stmt>options.mseg<eq><none><block_start>parser.error("The option directory (-m) should be given")<line_sep><block_end><if_stmt>options.btp<eq><none><block_start>parser.error("The option directory (-b) should be given")<line_sep><block_end># Get the transform parameters files
tpFileName_in=os.path.join(options.directory "TransformParameters.0.txt")<line_sep>tpFileName=os.path.join(options.directory "TransformParameters.seg.txt")<line_sep>tpFileName_b_in=options.btp<line_sep>tpFileName_b=os.path.join(options.directory "TransformParameters.baseline.seg.txt")<line_sep># Sanity checks
<if_stmt><not>os.path.exists(tpFileName_in)<block_start>print("ERROR: the file "+tpFileName_in+" does not exist")<line_sep><return>1<line_sep><block_end># Below we use programs that are compiled with elastix, and are thus available
# in the binary directory. The user of this script has to supply the path
# to the binary directory via the command line.
# In order to make sure that python is able to find these programs we add
# the paths to the local environment.
_path=os.path.dirname(options.path)<line_sep>_path<augadd>os.pathsep+os.getenv('PATH')<line_sep>os.environ['PATH']=_path<line_sep>#
# Deform the moving image segmentation by the current result
#
print("Deforming moving image segmentation using "+tpFileName_in)<line_sep># Make the transform parameters file suitable for binary images
f1=open(tpFileName_in 'r')<line_sep>f2=open(tpFileName 'w')<for_stmt>line f1<block_start>lineout=line.replace('(FinalBSplineInterpolationOrder 3)' '(FinalBSplineInterpolationOrder 0)')<line_sep>lineout=re.sub("(ResultImageFormat \"mhd\")" "ResultImageFormat \"mha\"" lineout)<line_sep>lineout=re.sub("(ResultImagePixelType \"short\")" "ResultImagePixelType \"unsigned char\"" lineout)<line_sep>lineout=re.sub("(CompressResultImage \"false\")" "CompressResultImage \"true\"" lineout)<line_sep>f2.write(lineout)<line_sep><block_end>f1.close()<line_sep>f2.close()<line_sep># Transform the moving image segmentation to mimick the baseline result
seg=os.path.join(options.directory "result.mha")<line_sep>seg_defm=os.path.join(options.directory "segmentation_deformed.mha")<line_sep>subprocess.call(["transformix" "-in" options.mseg "-out" options.directory "-tp" tpFileName] stdout=subprocess.PIPE)<line_sep><if_stmt>(os.path.exists(seg_defm))<block_start>os.remove(seg_defm)<line_sep><block_end>shutil.move(seg seg_defm)<line_sep>#
# Deform the moving image segmentation by the baseline result
#
print("Deforming moving image segmentation using "+tpFileName_b_in)<line_sep># Make the transform parameters file suitable for binary images
f1=open(tpFileName_b_in 'r')<line_sep>f2=open(tpFileName_b 'w')<for_stmt>line f1<block_start>lineout=line.replace('(FinalBSplineInterpolationOrder 3)' '(FinalBSplineInterpolationOrder 0)')<line_sep>lineout=re.sub("(ResultImageFormat \"mhd\")" "ResultImageFormat \"mha\"" lineout)<line_sep>lineout=re.sub("(ResultImagePixelType \"short\")" "ResultImagePixelType \"unsigned char\"" lineout)<line_sep>lineout=re.sub("(CompressResultImage \"false\")" "CompressResultImage \"true\"" lineout)<line_sep>f2.write(lineout)<line_sep><block_end>f1.close()<line_sep>f2.close()<line_sep># Transform the moving image segmentation to mimick the fixed image segmentation
seg_defb=os.path.join(options.directory "segmentation_baseline.mha")<line_sep>subprocess.call(["transformix" "-in" options.mseg "-out" options.directory "-tp" tpFileName_b] stdout=subprocess.PIPE)<line_sep><if_stmt>(os.path.exists(seg_defb))<block_start>os.remove(seg_defb)<line_sep><block_end>shutil.move(seg seg_defb)<line_sep># Compute the overlap between baseline segmentation and deformed moving segmentation
<try_stmt># This will work from python 2.7 on
<block_start>outputAsString=subprocess.check_output(["elxComputeOverlap" "-in" seg_defm seg_defb]).decode("utf-8")<block_end><except_stmt># Workaround for python 2.6 and lower. For MacMini specifically.
<block_start>outputAsString=subprocess.Popen(["elxComputeOverlap" "-in" seg_defm seg_defb] stdout=subprocess.PIPE).communicate()[0].decode("utf-8")<line_sep><block_end>overlap=outputAsString[outputAsString.find("Overlap"):].strip("Overlap: ")<line_sep># Report
print("The segmentation overlap between current and baseline is "+overlap)<if_stmt>float(overlap)<g>0.99<block_start>print("SUCCESS: overlap is higher than 0.99")<line_sep><return>0<block_end><else_stmt><block_start>print("FAILURE: overlap is lower than 0.99")<line_sep><return>1<line_sep><block_end><block_end>#-------------------------------------------------------------------------------
<if_stmt>__name__<eq>'__main__'<block_start>sys.exit(main())<block_end>
|
<import_from_stmt>datetime datetime timedelta<import_from_stmt>numbers Number<import_from_stmt>random choice uniform gauss random randint<import_from_stmt>typing List Union<import_from_stmt>plenum.common.metrics_collector MetricsName MetricsEvent MetricsCollector<import_from_stmt>plenum.common.value_accumulator ValueAccumulator<def_stmt>gen_metrics_name <arrow>MetricsName<block_start><return>choice(list(MetricsName))<block_end><def_stmt>gen_next_timestamp prev=<none><arrow>datetime<block_start><def_stmt>round_ts ts:datetime<arrow>datetime<block_start>us=round(ts.microsecond-500 -3)<line_sep><return>ts.replace(microsecond=us)<block_end><if_stmt>prev<is><none><block_start><return>round_ts(datetime.utcnow())<block_end><return>round_ts(prev+timedelta(seconds=uniform(0.001 10.0)))<block_end><def_stmt>generate_events num:int min_ts=<none><arrow>List[MetricsEvent]<block_start>ts=gen_next_timestamp(min_ts)<line_sep>result=[]<for_stmt>_ range(num)<block_start>ts=gen_next_timestamp(ts)<line_sep>name=gen_metrics_name()<if_stmt>random()<g>0.5<block_start>value=gauss(0.0 100.0)<block_end><else_stmt><block_start>value=ValueAccumulator([gauss(0.0 100.0)<for>_ range(randint(2 5))])<block_end>result<augadd>[MetricsEvent(ts name value)]<block_end><return>result<block_end><class_stmt>MockEvent<block_start><def_stmt>__init__ self name count sum<block_start>self.name=name<line_sep>self.count=count<line_sep>self.sum=sum<block_end><def_stmt>__eq__ self other<block_start><if_stmt><not>isinstance(other MockEvent)<block_start><return><false><block_end><if_stmt>self.name<ne>other.name<block_start><return><false><block_end><if_stmt>self.count<ne>other.count<block_start><return><false><block_end><return>self.sum<eq>other.sum<block_end>@property<def_stmt>avg self<block_start><return>self.sum/self.count<block_end><block_end><class_stmt>MockMetricsCollector(MetricsCollector)<block_start><def_stmt>__init__ self<block_start>super().__init__()<line_sep>self.events=[]<block_end><def_stmt>store_event self name:MetricsName value:Union[Number ValueAccumulator]<block_start><if_stmt>isinstance(value Number)<block_start>self.events.append(MockEvent(name 1 value))<block_end><else_stmt><block_start>self.events.append(MockEvent(name value.count value.sum))<block_end><block_end><block_end>
|
<class_stmt>Solution<block_start><def_stmt>findMaxConsecutiveOnes self nums:List[int]<arrow>int<block_start>res=cnt=0<for_stmt>i nums<block_start><if_stmt>i<block_start>cnt<augadd>1<block_end><else_stmt><block_start><if_stmt>cnt<block_start>res=max(res cnt)<line_sep>cnt=0<block_end><block_end><block_end><return>max(res cnt)<block_end><block_end>
|
<import_stmt>os<import_stmt>cv2<import_stmt>numpy<as>np<import_from_stmt>utils get_config get_csv_folds<import_from_stmt>dataset.h5like_interface H5LikeFileInterface<import_from_stmt>eval Evaluator flip<import_from_stmt>dataset.carvana_dataset CarvanaDataset<class_stmt>FullImageEvaluator(Evaluator)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<block_end><def_stmt>process_data self predicted model data prefix=""<block_start>names,samples,masks=self.get_data(data)<for_stmt>i range(len(names))<block_start>self.prev_name=names[i]<line_sep>self.full_pred=np.squeeze(predicted[i <ellipsis>])<if_stmt>samples<is><not><none><block_start>self.full_image=(samples[i <ellipsis>]<times>255).astype(np.uint8)<block_end><if_stmt>masks<is><not><none><block_start>self.full_mask=(np.squeeze(masks[i <ellipsis>])<times>255).astype(np.uint8)<block_end>self.on_image_constructed(prefix)<block_end><block_end><def_stmt>save self name prefix=""<block_start>cv2.imwrite(os.path.join(self.config.results_dir 'mask_{}'.format(name)) (self.full_pred<times>255).astype(np.uint8))<block_end><block_end><class_stmt>CarvanaEval(FullImageEvaluator)<block_start><def_stmt>__init__ self *args **kwargs<block_start>super().__init__(*args **kwargs)<block_end><def_stmt>save self name prefix=""<block_start>name,ext=os.path.splitext(name)<line_sep>cv2.imwrite(os.path.join('..' 'results' self.config.folder "{}{}.png".format(prefix name)) (self.full_pred[:1280 :1918]<times>255).astype(np.uint8))<block_end><block_end><def_stmt>eval_config config_path<block_start>test=<true><line_sep>config=get_config(config_path)<line_sep>num_workers=0<if>os.name<eq>'nt'<else>3<line_sep>root=config.dataset_path<line_sep>image_folder_name='train_hq'<if><not>test<else>'test_hq'<line_sep>c_ds=CarvanaDataset(root config.img_rows config.img_cols image_folder_name=image_folder_name apply_clahe=config.use_clahe)<line_sep>ds=H5LikeFileInterface(c_ds)<if_stmt><not>test<block_start>f='f04a.csv'<if>'f04a'<in>config.folder<else>'fma.csv'<line_sep>folds=get_csv_folds(os.path.join('..' f) os.listdir(os.path.join(root image_folder_name)))<block_end><else_stmt><block_start>folds=[([] list(range(len(c_ds))))<for>i range(5)]<block_end>keval=CarvanaEval(config ds folds test=test flips=flip.FLIP_LR num_workers=num_workers border=0)<line_sep>keval.need_dice=<true><line_sep>skip_folds=[i<for>i range(5)<if>config.fold<is><not><none><and>i<ne>int(config.fold)]<line_sep>print('skipping folds: ' skip_folds)<line_sep>keval.predict(skip_folds=skip_folds)<block_end><if_stmt>__name__<eq>"__main__"<block_start><for_stmt>config os.listdir('../configs')<block_start>eval_config(os.path.join('..' 'configs' config))<block_end><block_end>
|
<import_from_stmt>PokerRL.game._.rl_env.poker_types.DiscretizedPokerEnv DiscretizedPokerEnv<import_from_stmt>PokerRL.game._.rl_env.poker_types.LimitPokerEnv LimitPokerEnv<import_from_stmt>PokerRL.rl.rl_util get_builder_from_str get_env_cls_from_str<def_stmt>get_env_builder_rlbr t_prof<block_start>env_bldr_cls=get_builder_from_str(t_prof.env_builder_cls_str)<line_sep><return>env_bldr_cls(env_cls=get_env_cls_from_str(t_prof.game_cls_str) env_args=t_prof.module_args["rlbr"].get_rlbr_env_args(agents_env_args=t_prof.module_args["env"]))<block_end><def_stmt>reset_episode_multi_action_space rlbr_env_wrapper opponent_agent<block_start>ret=rlbr_env_wrapper.reset()<line_sep>opponent_agent.reset(deck_state_dict=rlbr_env_wrapper.env.cards_state_dict())<line_sep><return>ret<block_end><def_stmt>notify_agent_multi_action_space action_int rlbr_seat_id rlbr_env_wrapper opponent_agent<block_start>_type=type(rlbr_env_wrapper.env)<if_stmt>issubclass(_type LimitPokerEnv)<block_start>opponent_agent.notify_of_action(p_id_acted=rlbr_seat_id action_he_did=action_int)<block_end><elif_stmt>issubclass(_type DiscretizedPokerEnv)<block_start><if_stmt>action_int<ge>2<block_start>raise_frac=rlbr_env_wrapper.env.bet_sizes_list_as_frac_of_pot[action_int-2]<line_sep>opponent_agent.notify_of_raise_frac_action(p_id_acted=rlbr_seat_id frac=raise_frac)<block_end><else_stmt><block_start>opponent_agent.notify_of_action(p_id_acted=rlbr_seat_id action_he_did=action_int)<block_end><block_end><else_stmt><block_start><raise>ValueError(_type)<block_end><block_end><def_stmt>step_from_opp_action action_int opponent rlbr_env_wrapper<block_start>_type=type(rlbr_env_wrapper.env)<if_stmt>issubclass(_type LimitPokerEnv)<block_start><return>rlbr_env_wrapper.step(action=action_int)<block_end><elif_stmt>issubclass(_type DiscretizedPokerEnv)<block_start><if_stmt>action_int<ge>2<block_start>raise_frac=opponent.env_bldr.env_args.bet_sizes_list_as_frac_of_pot[action_int-2]<line_sep><return>rlbr_env_wrapper.step_raise_pot_frac(pot_frac=raise_frac)<block_end><else_stmt><block_start><return>rlbr_env_wrapper.step(action=action_int)<block_end><block_end><else_stmt><block_start><raise>ValueError(_type)<block_end><block_end>
|
<import_stmt>re<import_from_stmt>marko block HTMLRenderer<class_stmt>PhotoSet(block.BlockElement)<block_start>pattern=re.compile(r' {,3}(!\[([^\[\]\n]+)?\]\(([^)\n]+)\))'<concat>r'( {,3}(!\[([^\[\]\n]+)?\]\(([^)\n]+)\)))*[^\n\S]*$\n?' re.M)<line_sep>inline_children=<true><def_stmt>__init__ self match<block_start>self.children=match.group()<block_end>@classmethod<def_stmt>match cls source<block_start><return>source.expect_re(cls.pattern)<block_end>@classmethod<def_stmt>parse cls source<block_start>rv=source.match<line_sep>source.consume()<line_sep><return>rv<block_end><block_end><class_stmt>FlogRendererMixin<block_start><def_stmt>render_image self element<block_start>result=super().render_image(element)<line_sep>result=result.replace('<img' '<img data-original="{}"'.format(self.escape_url(element.dest)))<line_sep>caption=('<figcaption>{}</figcaption>'.format(element.title)<if>element.title<else>'')<line_sep><return>'<figure>{}{}</figure>'.format(result caption)<block_end><def_stmt>render_photo_set self element<block_start><return>'<div class="photo-set d-lg-flex">\n{}</div>\n'.format(self.render_children(element))<block_end><def_stmt>render_fenced_code self element<block_start>rv=['<div class="block-code">\n'<concat>'<div class="code-head clearfix">{}<span class="copy-code"'<concat>' title="Copy code">{}</span></div>\n'.format(element.extra element.lang.upper()) super().render_fenced_code(element) '</div>\n']<line_sep><return>''.join(rv)<block_end><def_stmt>_open_heading_group self<block_start><return>'<div class="list-group">\n'<block_end><def_stmt>_close_heading_group self<block_start><return>'</div>\n'<block_end><def_stmt>_render_toc_item self slug text<block_start><return>'<a class="list-group-item" href="#{}">{}</a>\n'.format(slug re.sub(r"<.+?>" "" text))<block_end><def_stmt>render_toc self maxlevel=3<block_start><return>super().render_toc(maxlevel).replace('<div class="list-group">' '<div class="list-group" id="table-of-content">' 1)<block_end><def_stmt>render_html_block self element# Disable tag filter, use the original render function
<block_start><return>HTMLRenderer.render_html_block(self element)<block_end><block_end><class_stmt>StrictHTMLRendererMixin<block_start><def_stmt>render_html_block self element# Disable tag filter, use the original render function
<block_start><return>self.tagfilter.sub(r'<\1' element.children)<block_end><block_end><class_stmt>Flog<block_start>elements=[PhotoSet]<line_sep>renderer_mixins=[FlogRendererMixin]<block_end><class_stmt>StrictFlog<block_start>renderer_mixins=[StrictHTMLRendererMixin]<block_end>
|
<import_stmt>pytest<import_from_stmt>click.testing CliRunner<import_from_stmt>unittest.mock MagicMock<import_from_stmt>prefect.cli.heartbeat heartbeat flow_run<import_from_stmt>prefect.utilities.configuration set_temporary_config<def_stmt>test_heartbeat_init <block_start>runner=CliRunner()<line_sep>result=runner.invoke(heartbeat)<assert_stmt>result.exit_code<eq>0<assert_stmt>"Send heartbeats back to the Prefect API."<in>result.output<block_end><def_stmt>test_heartbeat_help <block_start>runner=CliRunner()<line_sep>result=runner.invoke(heartbeat ["--help"])<assert_stmt>result.exit_code<eq>0<assert_stmt>"Send heartbeats back to the Prefect API."<in>result.output<block_end><def_stmt>test_heartbeat_flow_run patch_post cloud_api<block_start>patch_post(dict(data=dict(update_flow_run_heartbeat="success")))<with_stmt>set_temporary_config({"cloud.heartbeat_interval":0.1})<block_start>runner=CliRunner()<line_sep>result=runner.invoke(heartbeat ["flow-run" "--id" "id" "--num" "1"])<assert_stmt>result.exit_code<eq>0<block_end><block_end><def_stmt>test_heartbeat_multiple_flow_run_heartbeats patch_post cloud_api<block_start>post=patch_post(dict(data=dict(update_flow_run_heartbeat="success")))<with_stmt>set_temporary_config({"cloud.heartbeat_interval":0.1})<block_start>runner=CliRunner()<line_sep>result=runner.invoke(heartbeat ["flow-run" "--id" "id" "--num" "2"])<assert_stmt>result.exit_code<eq>0<assert_stmt>post.called<assert_stmt>post.call_count<eq>2<block_end><block_end><def_stmt>test_heartbeat_is_robust_to_exceptions cloud_api monkeypatch caplog<block_start>Client=MagicMock()<line_sep>monkeypatch.setattr("prefect.cli.heartbeat.Client" Client)<line_sep>monkeypatch.setattr("prefect.cli.heartbeat.time.sleep" MagicMock())<line_sep>Client().update_flow_run_heartbeat.side_effect=ValueError("Foo")<line_sep>runner=CliRunner()<line_sep>result=runner.invoke(heartbeat ["flow-run" "--id" "id" "--num" "2"])<assert_stmt>result.exit_code<eq>0<line_sep># Called twice despite raising errors
<assert_stmt>Client().update_flow_run_heartbeat.call_count<eq>2<assert_stmt>(f"Failed to send heartbeat with exception: {ValueError('Foo')!r}"<in>caplog.text)<assert_stmt>"Traceback"<in>caplog.text<block_end><def_stmt>test_heartbeat_does_not_ignore_base_exceptions cloud_api monkeypatch caplog<block_start>Client=MagicMock()<line_sep>monkeypatch.setattr("prefect.cli.heartbeat.Client" Client)<line_sep>monkeypatch.setattr("prefect.cli.heartbeat.time.sleep" MagicMock())<line_sep>Client().update_flow_run_heartbeat.side_effect=KeyboardInterrupt()<line_sep>runner=CliRunner()<line_sep>result=runner.invoke(heartbeat ["flow-run" "--id" "id" "--num" "2"])<assert_stmt>result.exit_code<eq>1<line_sep># Called _once_, error caused immediate exit
<assert_stmt>Client().update_flow_run_heartbeat.call_count<eq>1<assert_stmt>("Heartbeat process encountered terminal exception: KeyboardInterrupt()"<in>caplog.text)<assert_stmt>"Traceback"<in>caplog.text<block_end>@pytest.mark.parametrize("terminal_exc" [<true> <false>])<def_stmt>test_heartbeat_exceptions_are_logged_to_cloud cloud_api monkeypatch terminal_exc<block_start>Client=MagicMock()<line_sep>LOG_MANAGER=MagicMock()<line_sep>monkeypatch.setattr("prefect.cli.heartbeat.Client" Client)<line_sep>monkeypatch.setattr("prefect.utilities.logging.LOG_MANAGER" LOG_MANAGER)<line_sep>monkeypatch.setattr("prefect.cli.heartbeat.time.sleep" MagicMock())<line_sep>Client().update_flow_run_heartbeat.side_effect=(KeyboardInterrupt()<if>terminal_exc<else>ValueError("Foo"))<line_sep>runner=CliRunner()<line_sep>runner.invoke(heartbeat ["flow-run" "--id" "id" "--num" "2"])<line_sep># The exception was logged both times
log=LOG_MANAGER.enqueue.call_args[0][0]<assert_stmt>log["flow_run_id"]<eq>"id"<assert_stmt>log["name"]<eq>"prefect.subprocess_heartbeat"<assert_stmt>log["level"]<eq>"ERROR"<if_stmt>terminal_exc<block_start><assert_stmt>("Heartbeat process encountered terminal exception: KeyboardInterrupt()"<in>log["message"])<block_end><else_stmt><block_start><assert_stmt>(f"Failed to send heartbeat with exception: {ValueError('Foo')!r}"<in>log["message"])<block_end><assert_stmt>"Traceback"<in>log["message"]<block_end>
|
# Copyright Contributors to the Amundsen project.
# SPDX-License-Identifier: Apache-2.0
|
<import_from_stmt>social.backends.flickr FlickrOAuth<as>FlickrBackend<line_sep>
|
<import_from_stmt>nltk_contrib.classifier.attribute Attribute Attributes<import_from_stmt>nltk_contrib.classifier.confusionmatrix ConfusionMatrix<import_from_stmt>nltk_contrib.classifier.decisionstump DecisionStump<import_from_stmt>nltk_contrib.classifier.decisiontree DecisionTree<import_from_stmt>nltk_contrib.classifier.featureselect FeatureSelection<import_from_stmt>nltk_contrib.classifier.discretise Discretiser<import_from_stmt>nltk_contrib.classifier.instances TrainingInstances TestInstances GoldInstances<import_from_stmt>nltk_contrib.classifier.instance TrainingInstance TestInstance GoldInstance<import_from_stmt>nltk_contrib.classifier.knn IB1<import_from_stmt>nltk_contrib.classifier.naivebayes NaiveBayes<import_from_stmt>nltk_contrib.classifier.oner OneR<import_from_stmt>nltk_contrib.classifier.zeror ZeroR<import_from_stmt>nltk_contrib.classifier.format c45<line_sep>
|
<import_stmt>random<import_stmt>string<import_stmt>timeit<import_stmt>unittest<import_from_stmt>collections Hashable<import_stmt>six<import_from_stmt>six.moves xrange<import_stmt>match_counter<import_from_stmt>testutil repeat_until_passes<line_sep># Here's an alternative implementation. Unlike the simple one, it never constructs a new data
# structure, or modifies dictionary keys while iterating, but it is still slower.
<class_stmt>MatchCounterOther(object)<block_start><def_stmt>__init__ self _sample<block_start>self.sample_counts={v:0<for>v _sample}<block_end><def_stmt>count_unique self iterable<block_start><for_stmt>v iterable<block_start><try_stmt><block_start>n=self.sample_counts.get(v)<if_stmt>n<is><not><none><block_start>self.sample_counts[v]=n+1<block_end><block_end><except_stmt>TypeError<block_start><pass><block_end><block_end>matches=0<for_stmt>v,n six.iteritems(self.sample_counts)<block_start><if_stmt>n<g>0<block_start>matches<augadd>1<line_sep>self.sample_counts[v]=0<block_end><block_end><return>matches<block_end><block_end># If not for dealing with unhashable errors, `.intersection(iterable)` would be by far the
# fastest. But with the extra iteration and especially checking for Hashable, it's super slow.
<class_stmt>MatchCounterIntersection(object)<block_start><def_stmt>__init__ self _sample<block_start>self.sample=set(_sample)<block_end><def_stmt>count_unique self iterable<block_start><return>len(self.sample.intersection(v<for>v iterable<if>isinstance(v Hashable)))<block_end><block_end># This implementation doesn't measure the intersection, but it's interesting to compare its
# timings: this is still slower! Presumably because set intersection is native code that's more
# optimized than checking membership many times from Python.
<class_stmt>MatchCounterSimple(object)<block_start><def_stmt>__init__ self _sample<block_start>self.sample=set(_sample)<block_end><def_stmt>count_all self iterable<block_start><return>sum(1<for>r iterable<if>present(r self.sample))<block_end><block_end># This is much faster than using `isinstance(v, Hashable) and v in value_set`
<def_stmt>present v value_set<block_start><try_stmt><block_start><return>v<in>value_set<block_end><except_stmt>TypeError<block_start><return><false><block_end><block_end># Set up a predictable random number generator.
r=random.Random(17)<def_stmt>random_string <block_start>length=r.randint(10 20)<line_sep><return>''.join(r.choice(string.ascii_letters)<for>x xrange(length))<block_end><def_stmt>sample_with_repl population n<block_start><return>[r.choice(population)<for>x xrange(n)]<block_end># Here's some sample generated data.
sample=[random_string()<for>x xrange(200)]<line_sep>data1=sample_with_repl([random_string()<for>x xrange(20)]+r.sample(sample 5) 1000)<line_sep>data2=sample_with_repl([random_string()<for>x xrange(100)]+r.sample(sample 15) 500)<line_sep># Include an example with an unhashable value, to ensure all implementation can handle it.
data3=sample_with_repl([random_string()<for>x xrange(10)]+sample 2000)+[[1 2 3]]<class_stmt>TestMatchCounter(unittest.TestCase)<block_start><def_stmt>test_match_counter self<block_start>m=match_counter.MatchCounter(sample)<line_sep>self.assertEqual(m.count_unique(data1) 5)<line_sep>self.assertEqual(m.count_unique(data2) 15)<line_sep>self.assertEqual(m.count_unique(data3) 200)<line_sep>m=MatchCounterOther(sample)<line_sep>self.assertEqual(m.count_unique(data1) 5)<line_sep>self.assertEqual(m.count_unique(data2) 15)<line_sep>self.assertEqual(m.count_unique(data3) 200)<line_sep># Do it again to ensure that we clear out state between counting.
self.assertEqual(m.count_unique(data1) 5)<line_sep>self.assertEqual(m.count_unique(data2) 15)<line_sep>self.assertEqual(m.count_unique(data3) 200)<line_sep>m=MatchCounterIntersection(sample)<line_sep>self.assertEqual(m.count_unique(data1) 5)<line_sep>self.assertEqual(m.count_unique(data2) 15)<line_sep>self.assertEqual(m.count_unique(data3) 200)<line_sep>m=MatchCounterSimple(sample)<line_sep>self.assertGreaterEqual(m.count_all(data1) 5)<line_sep>self.assertGreaterEqual(m.count_all(data2) 15)<line_sep>self.assertGreaterEqual(m.count_all(data3) 200)<block_end>@repeat_until_passes(3)<def_stmt>test_timing self<block_start>setup='''
import match_counter
import test_match_counter as t
m1 = match_counter.MatchCounter(t.sample)
m2 = t.MatchCounterOther(t.sample)
m3 = t.MatchCounterSimple(t.sample)
m4 = t.MatchCounterIntersection(t.sample)
'''<line_sep>N=100<line_sep>t1=min(timeit.repeat(stmt='m1.count_unique(t.data1)' setup=setup number=N repeat=3))/N<line_sep>t2=min(timeit.repeat(stmt='m2.count_unique(t.data1)' setup=setup number=N repeat=3))/N<line_sep>t3=min(timeit.repeat(stmt='m3.count_all(t.data1)' setup=setup number=N repeat=3))/N<line_sep>t4=min(timeit.repeat(stmt='m4.count_unique(t.data1)' setup=setup number=N repeat=3))/N<line_sep>#print "Timings/iter data1: %.3fus %.3fus %.3fus %.3fus" % (t1 * 1e6, t2 * 1e6, t3*1e6, t4*1e6)
self.assertLess(t1 t2)<line_sep>self.assertLess(t1 t3)<line_sep>self.assertLess(t1 t4)<line_sep>t1=min(timeit.repeat(stmt='m1.count_unique(t.data2)' setup=setup number=N repeat=3))/N<line_sep>t2=min(timeit.repeat(stmt='m2.count_unique(t.data2)' setup=setup number=N repeat=3))/N<line_sep>t3=min(timeit.repeat(stmt='m3.count_all(t.data2)' setup=setup number=N repeat=3))/N<line_sep>t4=min(timeit.repeat(stmt='m4.count_unique(t.data2)' setup=setup number=N repeat=3))/N<line_sep>#print "Timings/iter data2: %.3fus %.3fus %.3fus %.3fus" % (t1 * 1e6, t2 * 1e6, t3*1e6, t4*1e6)
self.assertLess(t1 t2)<line_sep>self.assertLess(t1 t3)<line_sep>self.assertLess(t1 t4)<line_sep>t1=min(timeit.repeat(stmt='m1.count_unique(t.data3)' setup=setup number=N repeat=3))/N<line_sep>t2=min(timeit.repeat(stmt='m2.count_unique(t.data3)' setup=setup number=N repeat=3))/N<line_sep>t3=min(timeit.repeat(stmt='m3.count_all(t.data3)' setup=setup number=N repeat=3))/N<line_sep>t4=min(timeit.repeat(stmt='m4.count_unique(t.data3)' setup=setup number=N repeat=3))/N<line_sep>#print "Timings/iter data3: %.3fus %.3fus %.3fus %.3fus" % (t1 * 1e6, t2 * 1e6, t3*1e6, t4*1e6)
self.assertLess(t1 t2)<line_sep>#self.assertLess(t1, t3) # This fails on occasion, but it's a fairly pointless check.
self.assertLess(t1 t4)<block_end><block_end><if_stmt>__name__<eq>"__main__"<block_start>unittest.main()<block_end>
|
<import_stmt>argparse<import_stmt>tensorflow<as>tf<import_from_stmt>docqa.model_dir ModelDir<import_stmt>numpy<as>np<def_stmt>main <block_start>parser=argparse.ArgumentParser(description='')<line_sep>parser.add_argument("model")<line_sep>args=parser.parse_args()<line_sep>model_dir=ModelDir(args.model)<line_sep>checkpoint=model_dir.get_best_weights()<line_sep>print(checkpoint)<if_stmt>checkpoint<is><none><block_start>print("Show latest checkpoint")<line_sep>checkpoint=model_dir.get_latest_checkpoint()<block_end><else_stmt><block_start>print("Show best weights")<block_end>reader=tf.train.NewCheckpointReader(checkpoint)<line_sep>param_map=reader.get_variable_to_shape_map()<line_sep>total=0<for_stmt>k sorted(param_map)<block_start>v=param_map[k]<line_sep>print('%s: %s'%(k str(v)))<line_sep>total<augadd>np.prod(v)<block_end>print("%d total"%total)<block_end><if_stmt>__name__<eq>"__main__"<block_start>main()<block_end>
|
n=int(input("Enter a number: "))<for_stmt>i range(1 n+1)<block_start>count=0<for_stmt>j range(1 n+1)<block_start><if_stmt>i%j<eq>0<block_start>count=count+1<block_end><block_end><if_stmt>count<eq>2<block_start>print(i)<block_end><block_end>
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.