file_name
stringlengths
3
137
prefix
stringlengths
0
918k
suffix
stringlengths
0
962k
middle
stringlengths
0
812k
test_models.py
import json import time from datetime import datetime, timedelta from unittest import mock from django.conf import settings from django.core import mail from olympia import amo from olympia.abuse.models import AbuseReport from olympia.access.models import Group, GroupUser from olympia.addons.models import AddonApprovalsCounter, AddonReviewerFlags, AddonUser from olympia.amo.tests import ( TestCase, addon_factory, user_factory, version_factory, ) from olympia.blocklist.models import Block from olympia.constants.promoted import ( LINE, RECOMMENDED, STRATEGIC, ) from olympia.constants.scanners import CUSTOMS, MAD from olympia.files.models import FileValidation, WebextPermission from olympia.promoted.models import PromotedAddon from olympia.ratings.models import Rating from olympia.reviewers.models import ( AutoApprovalNoValidationResultError, AutoApprovalSummary, CannedResponse, ReviewerScore, ReviewerSubscription, send_notifications, set_reviewing_cache, ) from olympia.users.models import UserProfile from olympia.versions.models import Version, version_uploaded class TestReviewerSubscription(TestCase): fixtures = ['base/addon_3615', 'base/users'] def setUp(self): super().setUp() self.addon = addon_factory(name='SubscribingTest') self.listed_version = version_factory(addon=self.addon) self.unlisted_version = version_factory( addon=self.addon, channel=amo.RELEASE_CHANNEL_UNLISTED ) self.listed_reviewer = user_factory(email='listed@reviewer') self.listed_reviewer_group = Group.objects.create( name='Listed Reviewers', rules='Addons:Review' ) GroupUser.objects.create( group=self.listed_reviewer_group, user=self.listed_reviewer ) ReviewerSubscription.objects.create( addon=self.addon, user=self.listed_reviewer, channel=amo.RELEASE_CHANNEL_LISTED, ) self.unlisted_reviewer = user_factory(email='unlisted@reviewer') self.unlisted_reviewer_group = Group.objects.create( name='Unlisted Reviewers', rules='Addons:ReviewUnlisted' ) GroupUser.objects.create( group=self.unlisted_reviewer_group, user=self.unlisted_reviewer ) ReviewerSubscription.objects.create( addon=self.addon, user=self.unlisted_reviewer, channel=amo.RELEASE_CHANNEL_UNLISTED, ) self.admin_reviewer = user_factory(email='admin@reviewer') GroupUser.objects.create( group=self.listed_reviewer_group, user=self.admin_reviewer ) GroupUser.objects.create( group=self.unlisted_reviewer_group, user=self.admin_reviewer ) # Don't subscribe admin to updates yet, will be done in tests. def test_send_notification(self): subscription = ReviewerSubscription.objects.get(user=self.listed_reviewer) subscription.send_notification(self.listed_version) assert len(mail.outbox) == 1 assert mail.outbox[0].to == ['listed@reviewer'] assert mail.outbox[0].subject == ('Mozilla Add-ons: SubscribingTest Updated') def test_send_notifications(self): another_listed_reviewer = user_factory(email='listed2@reviewer') GroupUser.objects.create( group=self.listed_reviewer_group, user=another_listed_reviewer ) ReviewerSubscription.objects.create( addon=self.addon, user=another_listed_reviewer, channel=amo.RELEASE_CHANNEL_LISTED, ) send_notifications(sender=Version, instance=self.listed_version) assert len(mail.outbox) == 2 emails = sorted(o.to for o in mail.outbox) assert emails == [['listed2@reviewer'], ['listed@reviewer']] def test_notifications_setting_persists(self): send_notifications(Version, self.listed_version) assert ReviewerSubscription.objects.count() == 2 mail.outbox = [] send_notifications(Version, self.listed_version) assert len(mail.outbox) == 1 mail.outbox = [] send_notifications(Version, self.unlisted_version) assert ReviewerSubscription.objects.count() == 2 mail.outbox = [] send_notifications(Version, self.unlisted_version) assert len(mail.outbox) == 1 def test_listed_subscription(self): version_uploaded.send(sender=Version, instance=self.listed_version) assert len(mail.outbox) == 1 assert mail.outbox[0].to == ['listed@reviewer'] assert mail.outbox[0].subject == ('Mozilla Add-ons: SubscribingTest Updated') def test_unlisted_subscription(self): version_uploaded.send(sender=Version, instance=self.unlisted_version) assert len(mail.outbox) == 1 assert mail.outbox[0].to == ['unlisted@reviewer'] assert mail.outbox[0].subject == ('Mozilla Add-ons: SubscribingTest Updated') def test_unlisted_subscription_listed_reviewer(self): ReviewerSubscription.objects.create( addon=self.addon, user=self.listed_reviewer, channel=amo.RELEASE_CHANNEL_UNLISTED, ) version_uploaded.send(sender=Version, instance=self.unlisted_version) # No email should be sent since the reviewer does not have access # to unlisted. assert len(mail.outbox) == 1 # Only unlisted@reviewer assert mail.outbox[0].to != ['listed@reviewer'] def test_admin_reviewer_listed_subscription(self): ReviewerSubscription.objects.create( addon=self.addon, user=self.admin_reviewer, channel=amo.RELEASE_CHANNEL_LISTED, ) version_uploaded.send(sender=Version, instance=self.listed_version) assert len(mail.outbox) == 2 emails = sorted(o.to for o in mail.outbox) assert emails == [['admin@reviewer'], ['listed@reviewer']] mail.outbox = [] version_uploaded.send(sender=Version, instance=self.unlisted_version) assert len(mail.outbox) == 1 # Only unlisted@reviewer assert mail.outbox[0].to != ['admin@®reviewer'] def test_admin_reviewer_unlisted_subscription(self): ReviewerSubscription.objects.create( addon=self.addon, user=self.admin_reviewer, channel=amo.RELEASE_CHANNEL_UNLISTED, ) version_uploaded.send(sender=Version, instance=self.unlisted_version) assert len(mail.outbox) == 2 emails = sorted(o.to for o in mail.outbox) assert emails == [['admin@reviewer'], ['unlisted@reviewer']] mail.outbox = [] version_uploaded.send(sender=Version, instance=self.listed_version) assert len(mail.outbox) == 1 # Only listed@reviewer assert mail.outbox[0].to != ['admin@®reviewer'] def test_admin_reviewer_both_subscriptions(self): ReviewerSubscription.objects.create( addon=self.addon, user=self.admin_reviewer, channel=amo.RELEASE_CHANNEL_LISTED, ) ReviewerSubscription.objects.create( addon=self.addon, user=self.admin_reviewer, channel=amo.RELEASE_CHANNEL_UNLISTED, ) version_uploaded.send(sender=Version, instance=self.listed_version) version_uploaded.send(sender=Version, instance=self.unlisted_version) assert len(mail.outbox) == 4 emails = sorted(o.to for o in mail.outbox) assert emails == [ ['admin@reviewer'], ['admin@reviewer'], ['listed@reviewer'], ['unlisted@reviewer'], ] def test_signal_edit(self): self.listed_version.save() self.unlisted_version.save() assert len(mail.outbox) == 0 def test_signal_create(self): version = version_factory(addon=self.addon) version_uploaded.send(sender=Version, instance=version) assert len(mail.outbox) == 1 assert mail.outbox[0].subject == ('Mozilla Add-ons: SubscribingTest Updated') def test_signal_create_twice(self): version = version_factory(addon=self.addon) version_uploaded.send(sender=Version, instance=version) mail.outbox = [] version = version_factory(addon=self.addon) version_uploaded.send(sender=Version, instance=version) assert len(mail.outbox) == 1 def test_no_email_for_ex_reviewers(self): self.listed_reviewer.delete() mail.outbox = [] # deleting the user sends an email for the addon # Remove user_one from reviewers. GroupUser.objects.get( group=self.listed_reviewer_group, user=self.listed_reviewer ).delete() send_notifications(sender=Version, instance=self.listed_version) assert len(mail.outbox) == 0 def test_no_email_address_for_reviewer(self): self.listed_reviewer.update(email=None) send_notifications(sender=Version, instance=self.listed_version) assert len(mail.outbox) == 0 class TestReviewerScore(TestCase): fixtures = ['base/users'] def setUp(self): super().setUp() self.addon = AddonReviewerFlags.objects.create( addon=addon_factory(status=amo.STATUS_NOMINATED), auto_approval_disabled=True, ).addon self.summary = AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.NOT_AUTO_APPROVED, weight=101, ) self.user = UserProfile.objects.get(email='[email protected]') def _give_points(self, user=None, addon=None, status=None): user = user or self.user addon = addon or self.addon ReviewerScore.award_points( user, addon, status or addon.status, version=addon.current_version ) def check_event(self, type, status, event, **kwargs): self.addon.type = type assert ReviewerScore.get_event(self.addon, status, **kwargs) == event def test_events_addons(self): types = { amo.ADDON_ANY: None, amo.ADDON_EXTENSION: 'ADDON', amo.ADDON_DICT: 'DICT', amo.ADDON_LPAPP: 'LP', amo.ADDON_LPADDON: 'LP', amo.ADDON_PLUGIN: 'ADDON', amo.ADDON_API: 'ADDON', amo.ADDON_STATICTHEME: 'STATICTHEME', } statuses = { amo.STATUS_NULL: None, amo.STATUS_NOMINATED: 'FULL', amo.STATUS_APPROVED: 'UPDATE', amo.STATUS_DISABLED: None, amo.STATUS_DELETED: None, } for tk, tv in types.items(): for sk, sv in statuses.items(): try: event = getattr(amo, f'REVIEWED_{tv}_{sv}') except AttributeError: try: event = getattr(amo, 'REVIEWED_%s' % tv) except AttributeError: event = None self.check_event(tk, sk, event) def test_events_post_review(self): se
def test_award_points(self): self._give_points() assert ReviewerScore.objects.all()[0].score == ( amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_MEDIUM_RISK] ) def test_award_points_with_extra_note(self): ReviewerScore.award_points( self.user, self.addon, self.addon.status, extra_note='ÔMG!' ) reviewer_score = ReviewerScore.objects.all()[0] assert reviewer_score.note_key == amo.REVIEWED_EXTENSION_LOW_RISK assert reviewer_score.score == ( amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_LOW_RISK] ) assert reviewer_score.note == 'ÔMG!' def test_award_points_bonus(self): user2 = UserProfile.objects.get(email='[email protected]') bonus_days = 2 days = amo.REVIEWED_OVERDUE_LIMIT + bonus_days bonus_addon = addon_factory( status=amo.STATUS_NOMINATED, type=amo.ADDON_STATICTHEME, file_kw={'status': amo.STATUS_AWAITING_REVIEW}, ) bonus_addon.current_version.update( nomination=(datetime.now() - timedelta(days=days, minutes=5)) ) self._give_points(user2, bonus_addon, amo.STATUS_NOMINATED) score = ReviewerScore.objects.get(user=user2) expected = amo.REVIEWED_SCORES[amo.REVIEWED_STATICTHEME] + ( amo.REVIEWED_OVERDUE_BONUS * bonus_days ) assert score.score == expected def test_award_points_no_bonus_for_content_review(self): self.addon.update(status=amo.STATUS_APPROVED) self.addon.current_version.update(nomination=self.days_ago(28)) self.summary.update(verdict=amo.AUTO_APPROVED, weight=100) ReviewerScore.award_points( self.user, self.addon, self.addon.status, version=self.addon.current_version, post_review=False, content_review=True, ) score = ReviewerScore.objects.get(user=self.user) assert score.score == amo.REVIEWED_SCORES[amo.REVIEWED_CONTENT_REVIEW] def test_award_points_no_bonus_for_post_review(self): self.addon.update(status=amo.STATUS_APPROVED) self.addon.current_version.update(nomination=self.days_ago(29)) self.summary.update(verdict=amo.AUTO_APPROVED, weight=101) ReviewerScore.award_points( self.user, self.addon, self.addon.status, version=self.addon.current_version, post_review=True, content_review=False, ) score = ReviewerScore.objects.get(user=self.user) assert score.score == amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_MEDIUM_RISK] assert score.version == self.addon.current_version def test_award_points_extension_disabled_autoapproval(self): self.version = version_factory( addon=self.addon, version='1.1', file_kw={'status': amo.STATUS_AWAITING_REVIEW}, ) AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.NOT_AUTO_APPROVED, weight=101, ) ReviewerScore.award_points( self.user, self.addon, self.addon.status, version=self.addon.current_version, post_review=False, content_review=False, ) score = ReviewerScore.objects.get(user=self.user) assert score.score == amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_MEDIUM_RISK] assert score.version == self.addon.current_version def test_award_points_langpack_post_review(self): langpack = amo.tests.addon_factory( status=amo.STATUS_APPROVED, type=amo.ADDON_LPAPP ) self.version = version_factory( addon=langpack, version='1.1', file_kw={'status': amo.STATUS_APPROVED}, ) AutoApprovalSummary.objects.create( version=langpack.current_version, verdict=amo.AUTO_APPROVED, weight=101 ) ReviewerScore.award_points( self.user, langpack, langpack.status, version=langpack.current_version, post_review=True, content_review=False, ) score = ReviewerScore.objects.get(user=self.user) assert score.score == amo.REVIEWED_SCORES[amo.REVIEWED_LP_FULL] assert score.version == langpack.current_version def test_award_points_langpack_disabled_auto_approval(self): langpack = amo.tests.addon_factory( status=amo.STATUS_NOMINATED, type=amo.ADDON_LPAPP ) self.version = version_factory( addon=langpack, version='1.1', file_kw={'status': amo.STATUS_AWAITING_REVIEW}, ) AutoApprovalSummary.objects.create( version=langpack.current_version, verdict=amo.NOT_AUTO_APPROVED, weight=101 ) ReviewerScore.award_points( self.user, langpack, langpack.status, version=langpack.current_version, post_review=False, content_review=False, ) score = ReviewerScore.objects.get(user=self.user) assert score.score == amo.REVIEWED_SCORES[amo.REVIEWED_LP_FULL] assert score.version == langpack.current_version def test_award_points_dict_post_review(self): dictionary = amo.tests.addon_factory( status=amo.STATUS_APPROVED, type=amo.ADDON_DICT ) self.version = version_factory( addon=dictionary, version='1.1', file_kw={'status': amo.STATUS_APPROVED}, ) AutoApprovalSummary.objects.create( version=dictionary.current_version, verdict=amo.AUTO_APPROVED, weight=101 ) ReviewerScore.award_points( self.user, dictionary, dictionary.status, version=dictionary.current_version, post_review=True, content_review=False, ) score = ReviewerScore.objects.get(user=self.user) assert score.score == amo.REVIEWED_SCORES[amo.REVIEWED_DICT_FULL] assert score.version == dictionary.current_version def test_award_points_dict_disabled_auto_approval(self): dictionary = amo.tests.addon_factory( status=amo.STATUS_NOMINATED, type=amo.ADDON_DICT ) self.version = version_factory( addon=dictionary, version='1.1', file_kw={'status': amo.STATUS_AWAITING_REVIEW}, ) AutoApprovalSummary.objects.create( version=dictionary.current_version, verdict=amo.NOT_AUTO_APPROVED, weight=101, ) ReviewerScore.award_points( self.user, dictionary, dictionary.status, version=dictionary.current_version, post_review=False, content_review=False, ) score = ReviewerScore.objects.get(user=self.user) assert score.score == amo.REVIEWED_SCORES[amo.REVIEWED_DICT_FULL] assert score.version == dictionary.current_version def test_award_moderation_points(self): ReviewerScore.award_moderation_points(self.user, self.addon, 1) score = ReviewerScore.objects.all()[0] assert score.score == (amo.REVIEWED_SCORES.get(amo.REVIEWED_ADDON_REVIEW)) assert score.note_key == amo.REVIEWED_ADDON_REVIEW assert not score.version def test_get_total(self): user2 = UserProfile.objects.get(email='[email protected]') self._give_points() self._give_points(status=amo.STATUS_APPROVED) self.summary.update(weight=176) self._give_points(user=user2, status=amo.STATUS_NOMINATED) assert ReviewerScore.get_total(self.user) == ( amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_MEDIUM_RISK] + amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_MEDIUM_RISK] ) assert ReviewerScore.get_total(user2) == ( amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_HIGH_RISK] ) def test_get_recent(self): self._give_points() time.sleep(1) # Wait 1 sec so ordering by created is checked. self.summary.update(weight=176) self._give_points(status=amo.STATUS_APPROVED) self.summary.update(weight=1) user2 = UserProfile.objects.get(email='[email protected]') self._give_points(user=user2) scores = ReviewerScore.get_recent(self.user) assert len(scores) == 2 assert scores[0].score == ( amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_HIGH_RISK] ) assert scores[1].score == ( amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_MEDIUM_RISK] ) def test_get_leaderboards(self): user2 = UserProfile.objects.get(email='[email protected]') self._give_points() self._give_points(status=amo.STATUS_APPROVED) self.summary.update(weight=176) self._give_points(user=user2, status=amo.STATUS_NOMINATED) leaders = ReviewerScore.get_leaderboards(self.user) assert leaders['user_rank'] == 1 assert leaders['leader_near'] == [] assert leaders['leader_top'][0]['rank'] == 1 assert leaders['leader_top'][0]['user_id'] == self.user.id assert leaders['leader_top'][0]['total'] == ( amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_MEDIUM_RISK] + amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_MEDIUM_RISK] ) assert leaders['leader_top'][1]['rank'] == 2 assert leaders['leader_top'][1]['user_id'] == user2.id assert leaders['leader_top'][1]['total'] == ( amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_HIGH_RISK] ) self._give_points( user=user2, addon=amo.tests.addon_factory(type=amo.ADDON_STATICTHEME) ) leaders = ReviewerScore.get_leaderboards( self.user, addon_type=amo.ADDON_STATICTHEME ) assert len(leaders['leader_top']) == 1 assert leaders['leader_top'][0]['user_id'] == user2.id def test_only_active_reviewers_in_leaderboards(self): user2 = UserProfile.objects.create(username='former-reviewer') self._give_points() self._give_points(status=amo.STATUS_APPROVED) self._give_points(user=user2, status=amo.STATUS_NOMINATED) leaders = ReviewerScore.get_leaderboards(self.user) assert leaders['user_rank'] == 1 assert leaders['leader_near'] == [] assert leaders['leader_top'][0]['user_id'] == self.user.id assert len(leaders['leader_top']) == 1 # Only the reviewer is here. assert user2.id not in [ leader['user_id'] for leader in leaders['leader_top'] ], 'Unexpected non-reviewer user found in leaderboards.' def test_no_admins_or_staff_in_leaderboards(self): user2 = UserProfile.objects.get(email='[email protected]') self._give_points() self._give_points(status=amo.STATUS_APPROVED) self._give_points(user=user2, status=amo.STATUS_NOMINATED) leaders = ReviewerScore.get_leaderboards(self.user) assert leaders['user_rank'] == 1 assert leaders['leader_near'] == [] assert leaders['leader_top'][0]['user_id'] == self.user.id assert len(leaders['leader_top']) == 1 # Only the reviewer is here. assert user2.id not in [ leader['user_id'] for leader in leaders['leader_top'] ], 'Unexpected admin user found in leaderboards.' def test_get_leaderboards_last(self): users = [] for i in range(6): user = UserProfile.objects.create(username='user-%s' % i) GroupUser.objects.create(group_id=50002, user=user) users.append(user) last_user = users.pop(len(users) - 1) for u in users: self._give_points(user=u) # Last user gets lower points by reviewing a theme. addon = self.addon addon.type = amo.ADDON_STATICTHEME self._give_points(user=last_user, addon=addon) leaders = ReviewerScore.get_leaderboards(last_user) assert leaders['user_rank'] == 6 assert len(leaders['leader_top']) == 3 assert len(leaders['leader_near']) == 2 def test_leaderboard_score_when_in_multiple_reviewer_groups(self): group_reviewers = Group.objects.create( name='Reviewers: Addons', rules='Addons:Review' ) group_content_reviewers = Group.objects.create( name='Reviewers: Content', rules='Addons:ContentReview' ) GroupUser.objects.create(group=group_reviewers, user=self.user) GroupUser.objects.create(group=group_content_reviewers, user=self.user) self.summary.update(verdict=amo.AUTO_APPROVED, weight=101) ReviewerScore.award_points( self.user, self.addon, self.addon.status, version=self.addon.current_version, post_review=True, content_review=False, ) assert ReviewerScore._leaderboard_list() == [ ( self.user.id, self.user.name, amo.REVIEWED_SCORES[amo.REVIEWED_EXTENSION_MEDIUM_RISK], ) ] def test_all_users_by_score(self): user2 = UserProfile.objects.create( username='otherreviewer', email='[email protected]' ) self.grant_permission(user2, 'Addons:ThemeReview', name='Reviewers: Themes') amo.REVIEWED_LEVELS[0]['points'] = 180 self._give_points() self._give_points(status=amo.STATUS_APPROVED) self._give_points(user=user2, status=amo.STATUS_NOMINATED) users = ReviewerScore.all_users_by_score() assert len(users) == 2 # First user. assert users[0]['total'] == 180 assert users[0]['user_id'] == self.user.id assert users[0]['level'] == amo.REVIEWED_LEVELS[0]['name'] # Second user. assert users[1]['total'] == 90 assert users[1]['user_id'] == user2.id assert users[1]['level'] == '' def test_caching(self): self._give_points() with self.assertNumQueries(1): ReviewerScore.get_total(self.user) with self.assertNumQueries(0): ReviewerScore.get_total(self.user) with self.assertNumQueries(1): ReviewerScore.get_recent(self.user) with self.assertNumQueries(0): ReviewerScore.get_recent(self.user) with self.assertNumQueries(2): ReviewerScore.get_leaderboards(self.user) with self.assertNumQueries(0): ReviewerScore.get_leaderboards(self.user) with self.assertNumQueries(1): ReviewerScore.get_breakdown(self.user) with self.assertNumQueries(0): ReviewerScore.get_breakdown(self.user) # New points invalidates all caches. self._give_points() with self.assertNumQueries(1): ReviewerScore.get_total(self.user) with self.assertNumQueries(1): ReviewerScore.get_recent(self.user) with self.assertNumQueries(2): ReviewerScore.get_leaderboards(self.user) with self.assertNumQueries(1): ReviewerScore.get_breakdown(self.user) class TestAutoApprovalSummary(TestCase): def setUp(self): self.addon = addon_factory( average_daily_users=666, version_kw={'version': '1.0'} ) AutoApprovalSummary.objects.create( version=self.addon.current_version, verdict=amo.AUTO_APPROVED, confirmed=True, ) self.current_file_validation = FileValidation.objects.create( file=self.addon.current_version.file, validation='{}' ) self.version = version_factory( addon=self.addon, version='1.1', file_kw={'status': amo.STATUS_AWAITING_REVIEW}, ) self.file = self.version.file self.file_validation = FileValidation.objects.create( file=self.version.file, validation='{}' ) AddonApprovalsCounter.objects.create(addon=self.addon, counter=1) def test_calculate_score_no_scanner_results(self): summary = AutoApprovalSummary.objects.create(version=self.version) assert not self.version.scannerresults.exists() assert summary.calculate_score() == 0 assert summary.score == 0 # Make one on a non-MAD scanner, it should be ignored. self.version.scannerresults.create(scanner=CUSTOMS, score=0.9) assert summary.calculate_score() == 0 assert summary.score == 0 def test_calculate_score_negative_score_on_scanner_result(self): self.version.scannerresults.create(scanner=MAD, score=-1) summary = AutoApprovalSummary.objects.create(version=self.version) assert summary.calculate_score() == 0 assert summary.score == 0 def test_calculate_score(self): self.version.scannerresults.create(scanner=MAD, score=0.738) summary = AutoApprovalSummary.objects.create(version=self.version) assert summary.calculate_score() == 73 assert summary.score == 73 self.version.scannerresults.update(score=0.858) assert summary.calculate_score() == 85 assert summary.score == 85 def test_negative_weight(self): summary = AutoApprovalSummary.objects.create(version=self.version, weight=-300) summary = AutoApprovalSummary.objects.get(pk=summary.pk) assert summary.weight == -300 def test_calculate_weight(self): summary = AutoApprovalSummary(version=self.version) assert summary.weight_info == {} weight_info = summary.calculate_weight() expected_result = {} assert weight_info == expected_result assert summary.weight_info == weight_info def test_calculate_weight_admin_code_review(self): AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=True ) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 100 assert summary.metadata_weight == 100 assert summary.code_weight == 0 assert weight_info['admin_code_review'] == 100 def test_calculate_weight_abuse_reports(self): # Extra abuse report for a different add-on, does not count. AbuseReport.objects.create(addon=addon_factory()) # Extra abuse report for a different user, does not count. AbuseReport.objects.create(user=user_factory()) # Extra old abuse report, does not count either. old_report = AbuseReport.objects.create(addon=self.addon) old_report.update(created=self.days_ago(43)) # Recent abuse reports. AbuseReport.objects.create(addon=self.addon) recent_report = AbuseReport.objects.create(addon=self.addon) recent_report.update(created=self.days_ago(41)) # Recent abuse report for one of the developers of the add-on. author = user_factory() AddonUser.objects.create(addon=self.addon, user=author) AbuseReport.objects.create(user=author) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 45 assert summary.metadata_weight == 45 assert summary.code_weight == 0 assert weight_info['abuse_reports'] == 45 # Should be capped at 100. We're already at 45, adding 4 more should # result in a weight of 100 instead of 105. for i in range(0, 4): AbuseReport.objects.create(addon=self.addon) weight_info = summary.calculate_weight() assert summary.weight == 100 assert weight_info['abuse_reports'] == 100 def test_calculate_weight_abuse_reports_use_created_from_instance(self): # Create an abuse report 60 days in the past. It should be ignored it # we were calculating from today, but use an AutoApprovalSummary # instance that is 20 days old, making the abuse report count. report = AbuseReport.objects.create(addon=self.addon) report.update(created=self.days_ago(60)) summary = AutoApprovalSummary.objects.create(version=self.version) summary.update(created=self.days_ago(20)) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 15 assert summary.metadata_weight == 15 assert summary.code_weight == 0 assert weight_info['abuse_reports'] == 15 def test_calculate_weight_negative_ratings(self): # Positive rating, does not count. Rating.objects.create( user=user_factory(), addon=self.addon, version=self.version, rating=5 ) # Negative rating, but too old, does not count. old_rating = Rating.objects.create( user=user_factory(), addon=self.addon, version=self.version, rating=1 ) old_rating.update(created=self.days_ago(370)) # Negative review on a different add-on, does not count either. extra_addon = addon_factory() Rating.objects.create( user=user_factory(), addon=extra_addon, version=extra_addon.current_version, rating=1, ) # Recent negative ratings. ratings = [ Rating( user=user_factory(), addon=self.addon, version=self.version, rating=3 ) for i in range(0, 49) ] Rating.objects.bulk_create(ratings) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 0 # Not enough negative ratings yet... assert summary.metadata_weight == 0 assert summary.code_weight == 0 assert weight_info == {} # Create one more to get to weight == 1. Rating.objects.create( user=user_factory(), addon=self.addon, version=self.version, rating=2 ) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 1 assert summary.metadata_weight == 1 assert summary.code_weight == 0 assert weight_info == {'negative_ratings': 1} # Create 5000 more (sorry!) to make sure it's capped at 100. ratings = [ Rating( user=user_factory(), addon=self.addon, version=self.version, rating=3 ) for i in range(0, 5000) ] Rating.objects.bulk_create(ratings) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 100 assert summary.metadata_weight == 100 assert summary.code_weight == 0 assert weight_info == {'negative_ratings': 100} def test_calculate_weight_reputation(self): summary = AutoApprovalSummary(version=self.version) self.addon.update(reputation=0) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 0 assert summary.metadata_weight == 0 assert summary.code_weight == 0 assert weight_info == {} self.addon.update(reputation=3) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == -300 assert summary.metadata_weight == -300 assert summary.code_weight == 0 assert weight_info == {'reputation': -300} self.addon.update(reputation=1000) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == -300 assert summary.metadata_weight == -300 assert summary.code_weight == 0 assert weight_info == {'reputation': -300} self.addon.update(reputation=-1000) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 0 assert summary.metadata_weight == 0 assert summary.code_weight == 0 assert weight_info == {} def test_calculate_weight_average_daily_users(self): self.addon.update(average_daily_users=142444) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 14 assert summary.metadata_weight == 14 assert summary.code_weight == 0 assert weight_info == {'average_daily_users': 14} self.addon.update(average_daily_users=1756567658) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 100 assert summary.metadata_weight == 100 assert summary.code_weight == 0 assert weight_info == {'average_daily_users': 100} def test_calculate_weight_past_rejection_history(self): # Old rejected version, does not count. version_factory( addon=self.addon, file_kw={'reviewed': self.days_ago(370), 'status': amo.STATUS_DISABLED}, ) # Version disabled by the developer, not Mozilla (original_status # is set to something different than STATUS_NULL), does not count. version_factory( addon=self.addon, file_kw={ 'reviewed': self.days_ago(15), 'status': amo.STATUS_DISABLED, 'original_status': amo.STATUS_APPROVED, }, ) # Rejected version. version_factory( addon=self.addon, file_kw={'reviewed': self.days_ago(14), 'status': amo.STATUS_DISABLED}, ) # Another rejected version version_factory( addon=self.addon, file_kw={ 'reviewed': self.days_ago(13), 'status': amo.STATUS_DISABLED, }, ) # Rejected version on a different add-on, does not count. version_factory( addon=addon_factory(), file_kw={'reviewed': self.days_ago(12), 'status': amo.STATUS_DISABLED}, ) # Approved version, does not count. new_approved_version = version_factory( addon=self.addon, file_kw={'reviewed': self.days_ago(11)} ) FileValidation.objects.create(file=new_approved_version.file, validation='{}') summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 20 assert summary.metadata_weight == 20 assert summary.code_weight == 0 assert weight_info == {'past_rejection_history': 20} # Should be capped at 100. for i in range(0, 10): version_factory( addon=self.addon, version=str(i), file_kw={ 'reviewed': self.days_ago(10), 'status': amo.STATUS_DISABLED, }, ) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 100 assert summary.metadata_weight == 100 assert summary.code_weight == 0 assert weight_info == {'past_rejection_history': 100} def test_calculate_weight_uses_eval_or_document_write(self): validation_data = { 'messages': [ { 'id': ['DANGEROUS_EVAL'], } ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 50 assert summary.metadata_weight == 0 assert summary.code_weight == 50 assert weight_info == {'uses_eval_or_document_write': 50} validation_data = { 'messages': [ { 'id': ['NO_DOCUMENT_WRITE'], } ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 50 assert summary.metadata_weight == 0 assert summary.code_weight == 50 assert weight_info == {'uses_eval_or_document_write': 50} # Still only 20 if both appear. validation_data = { 'messages': [ { 'id': ['DANGEROUS_EVAL'], }, { 'id': ['NO_DOCUMENT_WRITE'], }, ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 50 assert summary.metadata_weight == 0 assert summary.code_weight == 50 assert weight_info == {'uses_eval_or_document_write': 50} def test_calculate_weight_uses_implied_eval(self): validation_data = { 'messages': [ { 'id': ['NO_IMPLIED_EVAL'], } ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 5 assert summary.metadata_weight == 0 assert summary.code_weight == 5 assert weight_info == {'uses_implied_eval': 5} def test_calculate_weight_uses_innerhtml(self): validation_data = { 'messages': [ { 'id': ['UNSAFE_VAR_ASSIGNMENT'], } ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 50 assert summary.metadata_weight == 0 assert summary.code_weight == 50 assert weight_info == {'uses_innerhtml': 50} def test_calculate_weight_uses_innerhtml_multiple_times(self): validation_data = { 'messages': [ { 'id': ['UNSAFE_VAR_ASSIGNMENT'], }, { 'id': ['IGNORE_ME'], }, { 'id': ['UNSAFE_VAR_ASSIGNMENT'], }, { 'id': ['UNSAFE_VAR_ASSIGNMENT'], }, ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info # 50 base, + 10 per additional instance. assert summary.weight == 70 assert summary.metadata_weight == 0 assert summary.code_weight == 70 assert weight_info == {'uses_innerhtml': 70} def test_calculate_weight_uses_custom_csp(self): validation_data = { 'messages': [ { 'id': ['MANIFEST_CSP'], } ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 90 assert summary.metadata_weight == 0 assert summary.code_weight == 90 assert weight_info == {'uses_custom_csp': 90} def test_calculate_weight_uses_native_messaging(self): WebextPermission.objects.create(file=self.file, permissions=['nativeMessaging']) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 100 assert summary.metadata_weight == 0 assert summary.code_weight == 100 assert weight_info == {'uses_native_messaging': 100} def test_calculate_weight_uses_remote_scripts(self): validation_data = { 'messages': [ { 'id': ['REMOTE_SCRIPT'], } ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 100 assert summary.metadata_weight == 0 assert summary.code_weight == 100 assert weight_info == {'uses_remote_scripts': 100} def test_calculate_weight_violates_mozilla_conditions_of_use(self): validation_data = { 'messages': [ { 'id': ['MOZILLA_COND_OF_USE'], } ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 20 assert summary.metadata_weight == 0 assert summary.code_weight == 20 assert weight_info == {'violates_mozilla_conditions': 20} def test_calculate_weight_uses_unknown_minified_code_nothing(self): validation_data = { 'metadata': {'unknownMinifiedFiles': []} # Empty list: no weight. } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 0 assert summary.metadata_weight == 0 assert summary.code_weight == 0 assert weight_info == {} validation_data = { 'metadata': { # Missing property: no weight. } } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 0 assert summary.metadata_weight == 0 assert summary.code_weight == 0 assert weight_info == {} validation_data = { # Missing metadata: no weight. } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 0 assert summary.metadata_weight == 0 assert summary.code_weight == 0 assert weight_info == {} def test_calculate_weight_uses_unknown_minified_code(self): validation_data = {'metadata': {'unknownMinifiedFiles': ['something']}} self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 100 assert summary.metadata_weight == 0 assert summary.code_weight == 100 assert weight_info == {'uses_unknown_minified_code': 100} def test_calculate_weight_uses_unknown_minified_code_multiple_times(self): validation_data = { 'metadata': {'unknownMinifiedFiles': ['something', 'foobar', 'another']} } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info # 100 base, + 20 per additional instance. assert summary.weight == 120 assert summary.metadata_weight == 0 assert summary.code_weight == 120 assert weight_info == {'uses_unknown_minified_code': 120} def test_calculate_size_of_code_changes_no_current_validation(self): # Delete the validation for the previously confirmed version and reload # the version we're testing (otherwise the file validation has already # been loaded and is still attached to the instance...) self.current_file_validation.delete() self.version = Version.objects.get(pk=self.version.pk) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 500 assert summary.metadata_weight == 0 assert summary.code_weight == 500 assert weight_info == {'no_validation_result': 500} def test_calculate_size_of_code_changes_no_new_validation(self): # Delete the validation for the new version and reload that version. # (otherwise the file validation has already been loaded and is still # attached to the instance...) self.file_validation.delete() self.version = Version.objects.get(pk=self.version.pk) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 500 assert summary.metadata_weight == 0 assert summary.code_weight == 500 assert weight_info == {'no_validation_result': 500} def test_calculate_size_of_code_changes_no_reported_size(self): summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.calculate_size_of_code_changes() == 0 assert summary.weight == 0 assert summary.metadata_weight == 0 assert summary.code_weight == 0 assert weight_info == {} def test_calculate_size_of_code_changes_no_previous_version_size(self): validation_data = { 'metadata': { 'totalScannedFileSize': 15000, } } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) assert summary.calculate_size_of_code_changes() == 15000 weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 3 assert summary.metadata_weight == 0 assert summary.code_weight == 3 assert weight_info == {'size_of_code_changes': 3} def test_calculate_size_of_code_changes(self): old_validation_data = { 'metadata': { 'totalScannedFileSize': 5000, } } self.current_file_validation.update(validation=json.dumps(old_validation_data)) new_validation_data = { 'metadata': { 'totalScannedFileSize': 15000, } } self.file_validation.update(validation=json.dumps(new_validation_data)) summary = AutoApprovalSummary(version=self.version) assert summary.calculate_size_of_code_changes() == 10000 weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 2 assert summary.metadata_weight == 0 assert summary.code_weight == 2 assert weight_info == {'size_of_code_changes': 2} def test_calculate_size_of_code_change_use_previously_confirmed(self): old_validation_data = { 'metadata': { 'totalScannedFileSize': 5000, } } self.current_file_validation.update(validation=json.dumps(old_validation_data)) new_validation_data = { 'metadata': { 'totalScannedFileSize': 15000, } } self.file_validation.update(validation=json.dumps(new_validation_data)) # Add a new current_version, unconfirmed. This version will be ignored # for the comparison as all we care about is the previous confirmed # version. self.addon.current_version.update(created=self.days_ago(2)) new_version = version_factory(addon=self.addon) self.addon.reload() assert self.addon.current_version == new_version AutoApprovalSummary.objects.create( version=new_version, verdict=amo.AUTO_APPROVED ) new_validation_data = { 'metadata': { 'totalScannedFileSize': 14999, } } FileValidation.objects.create( file=new_version.file, validation=json.dumps(new_validation_data) ) summary = AutoApprovalSummary(version=self.version) # Size of code changes should be 10000 and not 1, proving that it # compared with the old, confirmed version. assert summary.calculate_size_of_code_changes() == 10000 weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 2 assert summary.metadata_weight == 0 assert summary.code_weight == 2 assert weight_info == {'size_of_code_changes': 2} def test_calculate_size_of_code_changes_no_negative(self): old_validation_data = { 'metadata': { 'totalScannedFileSize': 20000, } } self.current_file_validation.update(validation=json.dumps(old_validation_data)) new_validation_data = { 'metadata': { 'totalScannedFileSize': 5000, } } self.file_validation.update(validation=json.dumps(new_validation_data)) summary = AutoApprovalSummary(version=self.version) assert summary.calculate_size_of_code_changes() == 15000 weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 3 assert summary.metadata_weight == 0 assert summary.code_weight == 3 assert weight_info == {'size_of_code_changes': 3} def test_calculate_size_of_code_changes_max(self): old_validation_data = { 'metadata': { 'totalScannedFileSize': 50000000, } } self.current_file_validation.update(validation=json.dumps(old_validation_data)) new_validation_data = { 'metadata': { 'totalScannedFileSize': 0, } } self.file_validation.update(validation=json.dumps(new_validation_data)) summary = AutoApprovalSummary(version=self.version) assert summary.calculate_size_of_code_changes() == 50000000 weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 100 assert summary.metadata_weight == 0 assert summary.code_weight == 100 assert weight_info == {'size_of_code_changes': 100} def test_calculate_weight_sum(self): validation_data = { 'messages': [ {'id': ['MANIFEST_CSP']}, {'id': ['UNSAFE_VAR_ASSIGNMENT']}, {'id': ['NO_IMPLIED_EVAL']}, {'id': ['DANGEROUS_EVAL']}, {'id': ['UNSAFE_VAR_ASSIGNMENT']}, # Another one. {'id': ['NOTHING_TO_SEE_HERE_MOVE_ON']}, ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.metadata_weight == 0 assert summary.code_weight == 205 assert summary.weight == 205 expected_result = { 'uses_custom_csp': 90, 'uses_eval_or_document_write': 50, 'uses_implied_eval': 5, 'uses_innerhtml': 60, # There is one extra. } assert weight_info == expected_result def test_count_uses_custom_csp(self): assert AutoApprovalSummary.count_uses_custom_csp(self.version) == 0 validation_data = { 'messages': [ { 'id': ['MANIFEST_CSP'], } ] } self.file_validation.update(validation=json.dumps(validation_data)) assert AutoApprovalSummary.count_uses_custom_csp(self.version) == 1 def test_count_uses_custom_csp_file_validation_missing(self): self.file_validation.delete() self.version.file.refresh_from_db() with self.assertRaises(AutoApprovalNoValidationResultError): AutoApprovalSummary.count_uses_custom_csp(self.version) def test_check_uses_native_messaging(self): assert AutoApprovalSummary.check_uses_native_messaging(self.version) == 0 webext_permissions = WebextPermission.objects.create( file=self.file, permissions=['foobar'] ) del self.file.permissions assert AutoApprovalSummary.check_uses_native_messaging(self.version) == 0 webext_permissions.update(permissions=['nativeMessaging', 'foobar']) del self.file.permissions assert AutoApprovalSummary.check_uses_native_messaging(self.version) == 1 def test_calculate_weight_uses_coinminer(self): validation_data = { 'messages': [ { 'id': ['COINMINER_USAGE_DETECTED'], } ] } self.file_validation.update(validation=json.dumps(validation_data)) summary = AutoApprovalSummary(version=self.version) weight_info = summary.calculate_weight() assert summary.weight_info == weight_info assert summary.weight == 2000 assert weight_info['uses_coinminer'] == 2000 def test_get_pretty_weight_info(self): summary = AutoApprovalSummary(version=self.version) assert summary.weight_info == {} pretty_weight_info = summary.get_pretty_weight_info() assert pretty_weight_info == ['Weight breakdown not available.'] summary.weight_info = { 'key1': 666, 'key2': None, 'key3': 0, 'key4': -1, } pretty_weight_info = summary.get_pretty_weight_info() assert pretty_weight_info == ['key1: 666', 'key4: -1'] def test_check_has_auto_approval_disabled(self): assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) flags = AddonReviewerFlags.objects.create(addon=self.addon) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) flags.update(auto_approval_disabled=True) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is True ) # The auto_approval_disabled flag only applies to listed. self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) def test_check_has_auto_approval_disabled_unlisted(self): self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) flags = AddonReviewerFlags.objects.create(addon=self.addon) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) flags.update(auto_approval_disabled_unlisted=True) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is True ) # The auto_approval_disabled_unlisted flag only applies to unlisted. self.version.update(channel=amo.RELEASE_CHANNEL_LISTED) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) def test_check_has_auto_approval_disabled_until_next_approval(self): assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) flags = AddonReviewerFlags.objects.create(addon=self.addon) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) flags.update(auto_approval_disabled_until_next_approval=True) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is True ) # That flag only applies to listed. self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) def test_check_has_auto_approval_disabled_until_next_approval_unlisted(self): self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) flags = AddonReviewerFlags.objects.create(addon=self.addon) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) flags.update(auto_approval_disabled_until_next_approval_unlisted=True) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is True ) # That flag only applies to unlisted. self.version.update(channel=amo.RELEASE_CHANNEL_LISTED) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) def test_check_has_auto_approval_delayed_until(self): assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) flags = AddonReviewerFlags.objects.create(addon=self.addon) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) past_date = datetime.now() - timedelta(hours=1) flags.update(auto_approval_delayed_until=past_date) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) future_date = datetime.now() + timedelta(hours=1) flags.update(auto_approval_delayed_until=future_date) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is True ) # *That* flag applies to both listed and unlisted. self.version.update(channel=amo.RELEASE_CHANNEL_UNLISTED) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is True ) def test_check_is_promoted_prereview(self): assert AutoApprovalSummary.check_is_promoted_prereview(self.version) is False promoted = PromotedAddon.objects.create(addon=self.addon) assert AutoApprovalSummary.check_is_promoted_prereview(self.version) is False promoted.update(group_id=RECOMMENDED.id) assert AutoApprovalSummary.check_is_promoted_prereview(self.version) is True promoted.update(group_id=STRATEGIC.id) # STRATEGIC isn't prereview assert AutoApprovalSummary.check_is_promoted_prereview(self.version) is False promoted.update(group_id=LINE.id) # LINE is though assert AutoApprovalSummary.check_is_promoted_prereview(self.version) is True def test_check_should_be_delayed(self): assert AutoApprovalSummary.check_should_be_delayed(self.version) is False # Delete current_version, making self.version the first listed version # submitted and add-on creation date recent. self.addon.current_version.delete() self.addon.update(created=datetime.now()) self.addon.update_status() # First test with somehow no nomination date at all. The add-on # creation date is used as a fallback, and it was created recently # so it should be delayed. assert self.version.nomination is None assert AutoApprovalSummary.check_should_be_delayed(self.version) is True # Still using the add-on creation date as fallback, if it's old enough # it should not be delayed. self.addon.update(created=self.days_ago(2)) assert AutoApprovalSummary.check_should_be_delayed(self.version) is False # Now add a recent nomination date. It should be delayed. self.version.update(nomination=datetime.now() - timedelta(hours=22)) assert AutoApprovalSummary.check_should_be_delayed(self.version) is True # Update nomination date in the past, it should no longer be delayed. self.version.update(nomination=self.days_ago(2)) assert AutoApprovalSummary.check_should_be_delayed(self.version) is False # Unlisted shouldn't be affected. self.version.update( nomination=datetime.now() - timedelta(hours=22), channel=amo.RELEASE_CHANNEL_UNLISTED, ) assert ( AutoApprovalSummary.check_has_auto_approval_disabled(self.version) is False ) def test_check_should_be_delayed_only_until_first_content_review(self): assert AutoApprovalSummary.check_should_be_delayed(self.version) is False # Delete current_version, making self.version the first listed version # submitted and add-on creation date recent. self.addon.current_version.delete() self.addon.update(created=datetime.now()) self.addon.update_status() # Also remove AddonApprovalsCounter to start fresh. self.addon.addonapprovalscounter.delete() # Set a recent nomination date. It should be delayed. self.version.update(nomination=datetime.now() - timedelta(hours=12)) assert AutoApprovalSummary.check_should_be_delayed(self.version) is True # Add AddonApprovalsCounter with default values, it should still be # delayed. self.addon.addonapprovalscounter = AddonApprovalsCounter.objects.create( addon=self.addon ) assert self.addon.addonapprovalscounter.last_content_review is None assert AutoApprovalSummary.check_should_be_delayed(self.version) is True # Once there is a content review, it should no longer be delayed. self.addon.addonapprovalscounter.update(last_content_review=datetime.now()) assert AutoApprovalSummary.check_should_be_delayed(self.version) is False def test_check_should_be_delayed_langpacks_are_exempted(self): self.addon.update(type=amo.ADDON_LPAPP) assert AutoApprovalSummary.check_should_be_delayed(self.version) is False # Delete current_version, making self.version the first listed version # submitted and add-on creation date recent. self.addon.current_version.delete() self.addon.update(created=datetime.now()) self.addon.update_status() assert self.version.nomination is None assert AutoApprovalSummary.check_should_be_delayed(self.version) is False self.addon.update(created=self.days_ago(2)) assert AutoApprovalSummary.check_should_be_delayed(self.version) is False self.version.update(nomination=datetime.now() - timedelta(hours=22)) assert AutoApprovalSummary.check_should_be_delayed(self.version) is False self.version.update(nomination=self.days_ago(2)) assert AutoApprovalSummary.check_should_be_delayed(self.version) is False def test_check_is_blocked(self): assert AutoApprovalSummary.check_is_blocked(self.version) is False block = Block.objects.create(addon=self.addon, updated_by=user_factory()) del self.version.addon.block assert AutoApprovalSummary.check_is_blocked(self.version) is True block.update(min_version='9999999') del self.version.addon.block assert AutoApprovalSummary.check_is_blocked(self.version) is False block.update(min_version='0') del self.version.addon.block assert AutoApprovalSummary.check_is_blocked(self.version) is True def test_check_is_locked(self): assert AutoApprovalSummary.check_is_locked(self.version) is False set_reviewing_cache(self.version.addon.pk, settings.TASK_USER_ID) assert AutoApprovalSummary.check_is_locked(self.version) is False set_reviewing_cache(self.version.addon.pk, settings.TASK_USER_ID + 42) assert AutoApprovalSummary.check_is_locked(self.version) is True # Langpacks are never considered locked. self.addon.update(type=amo.ADDON_LPAPP) assert AutoApprovalSummary.check_is_locked(self.version) is False @mock.patch.object(AutoApprovalSummary, 'calculate_weight', spec=True) @mock.patch.object(AutoApprovalSummary, 'calculate_verdict', spec=True) def test_create_summary_for_version( self, calculate_verdict_mock, calculate_weight_mock ): def create_dynamic_patch(name): patcher = mock.patch.object( AutoApprovalSummary, name, spec=getattr(AutoApprovalSummary, name) ) thing = patcher.start() thing.return_value = False self.addCleanup(patcher.stop) return thing calculate_verdict_mock.return_value = {'dummy_verdict': True} dynamic_mocks = [ create_dynamic_patch(f'check_{field}') for field in AutoApprovalSummary.auto_approval_verdict_fields ] summary, info = AutoApprovalSummary.create_summary_for_version( self.version, ) for mocked_method in dynamic_mocks: assert mocked_method.call_count == 1 mocked_method.assert_called_with(self.version) assert calculate_weight_mock.call_count == 1 assert calculate_verdict_mock.call_count == 1 assert calculate_verdict_mock.call_args == ( { 'dry_run': False, }, ) assert summary.pk assert summary.version == self.version assert info == {'dummy_verdict': True} def test_create_summary_for_version_no_mocks(self): AddonReviewerFlags.objects.create( addon=self.addon, needs_admin_code_review=True ) self.file_validation.update( validation=json.dumps( { 'messages': [ {'id': ['DANGEROUS_EVAL']}, ] } ) ) self.version.scannerresults.create( scanner=MAD, score=0.95, version=self.version ) summary, info = AutoApprovalSummary.create_summary_for_version( self.version, ) assert summary.verdict == amo.AUTO_APPROVED assert summary.score == 95 assert summary.weight == 150 assert summary.code_weight == 50 assert summary.metadata_weight == 100 assert summary.weight_info == { 'admin_code_review': 100, 'uses_eval_or_document_write': 50, } @mock.patch.object(AutoApprovalSummary, 'calculate_verdict', spec=True) def test_create_summary_no_previously_approved_versions( self, calculate_verdict_mock ): AddonApprovalsCounter.objects.all().delete() self.version.reload() calculate_verdict_mock.return_value = {'dummy_verdict': True} summary, info = AutoApprovalSummary.create_summary_for_version(self.version) assert summary.pk assert info == {'dummy_verdict': True} def test_create_summary_already_existing(self): # Create a dummy summary manually, then call the method to create a # real one. It should have just updated the previous instance. summary = AutoApprovalSummary.objects.create( version=self.version, is_locked=True ) assert summary.pk assert summary.version == self.version assert summary.verdict == amo.NOT_AUTO_APPROVED previous_summary_pk = summary.pk summary, info = AutoApprovalSummary.create_summary_for_version(self.version) assert summary.pk == previous_summary_pk assert summary.version == self.version assert summary.is_locked is False assert summary.verdict == amo.AUTO_APPROVED assert info == { 'has_auto_approval_disabled': False, 'is_locked': False, 'is_promoted_prereview': False, 'should_be_delayed': False, 'is_blocked': False, } def test_calculate_verdict_failure_dry_run(self): summary = AutoApprovalSummary.objects.create( version=self.version, is_locked=True ) info = summary.calculate_verdict(dry_run=True) assert info == { 'has_auto_approval_disabled': False, 'is_locked': True, 'is_promoted_prereview': False, 'should_be_delayed': False, 'is_blocked': False, } assert summary.verdict == amo.WOULD_NOT_HAVE_BEEN_AUTO_APPROVED def test_calculate_verdict_failure(self): summary = AutoApprovalSummary.objects.create( version=self.version, is_locked=True ) info = summary.calculate_verdict() assert info == { 'has_auto_approval_disabled': False, 'is_locked': True, 'is_promoted_prereview': False, 'should_be_delayed': False, 'is_blocked': False, } assert summary.verdict == amo.NOT_AUTO_APPROVED def test_calculate_verdict_success(self): summary = AutoApprovalSummary.objects.create(version=self.version) info = summary.calculate_verdict() assert info == { 'has_auto_approval_disabled': False, 'is_locked': False, 'is_promoted_prereview': False, 'should_be_delayed': False, 'is_blocked': False, } assert summary.verdict == amo.AUTO_APPROVED def test_calculate_verdict_success_dry_run(self): summary = AutoApprovalSummary.objects.create(version=self.version) info = summary.calculate_verdict(dry_run=True) assert info == { 'has_auto_approval_disabled': False, 'is_locked': False, 'is_promoted_prereview': False, 'should_be_delayed': False, 'is_blocked': False, } assert summary.verdict == amo.WOULD_HAVE_BEEN_AUTO_APPROVED def test_calculate_verdict_has_auto_approval_disabled(self): summary = AutoApprovalSummary.objects.create( version=self.version, has_auto_approval_disabled=True ) info = summary.calculate_verdict() assert info == { 'has_auto_approval_disabled': True, 'is_locked': False, 'is_promoted_prereview': False, 'should_be_delayed': False, 'is_blocked': False, } assert summary.verdict == amo.NOT_AUTO_APPROVED def test_calculate_verdict_is_promoted_prereview(self): summary = AutoApprovalSummary.objects.create( version=self.version, is_promoted_prereview=True ) info = summary.calculate_verdict() assert info == { 'has_auto_approval_disabled': False, 'is_locked': False, 'is_promoted_prereview': True, 'should_be_delayed': False, 'is_blocked': False, } assert summary.verdict == amo.NOT_AUTO_APPROVED def test_calculate_verdict_is_blocked(self): summary = AutoApprovalSummary.objects.create( version=self.version, is_blocked=True ) info = summary.calculate_verdict() assert info == { 'has_auto_approval_disabled': False, 'is_locked': False, 'is_promoted_prereview': False, 'should_be_delayed': False, 'is_blocked': True, } assert summary.verdict == amo.NOT_AUTO_APPROVED def test_calculate_verdict_should_be_delayed(self): summary = AutoApprovalSummary.objects.create( version=self.version, should_be_delayed=True ) info = summary.calculate_verdict() assert info == { 'has_auto_approval_disabled': False, 'is_locked': False, 'is_promoted_prereview': False, 'should_be_delayed': True, 'is_blocked': False, } assert summary.verdict == amo.NOT_AUTO_APPROVED def test_verdict_info_prettifier(self): verdict_info = { 'has_auto_approval_disabled': True, 'is_locked': True, 'is_promoted_prereview': True, 'should_be_delayed': True, 'is_blocked': True, } result = list(AutoApprovalSummary.verdict_info_prettifier(verdict_info)) assert result == [ 'Has auto-approval disabled/delayed flag set', 'Version string and guid match a blocklist Block', 'Is locked by a reviewer', 'Is in a promoted addon group that requires pre-review', "Delayed because it's the first listed version", ] result = list(AutoApprovalSummary.verdict_info_prettifier({})) assert result == [] class TestCannedResponse(TestCase): def test_basic(self): response = CannedResponse.objects.create( name='Terms of services', response='test', category=amo.CANNED_RESPONSE_CATEGORY_OTHER, type=amo.CANNED_RESPONSE_TYPE_ADDON, ) assert response.name == 'Terms of services' assert response.response == 'test' assert response.category == amo.CANNED_RESPONSE_CATEGORY_OTHER assert response.type == amo.CANNED_RESPONSE_TYPE_ADDON def test_category_default(self): response = CannedResponse.objects.create( name='Terms of services', response='test', type=amo.CANNED_RESPONSE_TYPE_ADDON, ) assert response.category == amo.CANNED_RESPONSE_CATEGORY_OTHER
lf.addon.update(status=amo.STATUS_APPROVED) base_args = (self.addon, self.addon.status) # No version. assert ( ReviewerScore.get_event(*base_args, version=None, post_review=True) == amo.REVIEWED_EXTENSION_LOW_RISK ) # Now with a summary... low risk. self.summary.update(verdict=amo.AUTO_APPROVED, weight=-10) assert ( ReviewerScore.get_event( *base_args, version=self.addon.current_version, post_review=True ) is amo.REVIEWED_EXTENSION_LOW_RISK ) # Medium risk. self.summary.update(weight=91) assert ( ReviewerScore.get_event( *base_args, version=self.addon.current_version, post_review=True ) is amo.REVIEWED_EXTENSION_MEDIUM_RISK ) # High risk. self.summary.update(weight=176) assert ( ReviewerScore.get_event( *base_args, version=self.addon.current_version, post_review=True ) is amo.REVIEWED_EXTENSION_HIGH_RISK ) # Highest risk. self.summary.update(weight=276) assert ( ReviewerScore.get_event( *base_args, version=self.addon.current_version, post_review=True ) is amo.REVIEWED_EXTENSION_HIGHEST_RISK ) # Highest risk again. self.summary.update(weight=65535) assert ( ReviewerScore.get_event( *base_args, version=self.addon.current_version, post_review=True ) is amo.REVIEWED_EXTENSION_HIGHEST_RISK ) # Content review is always the same. assert ( ReviewerScore.get_event( *base_args, version=self.addon.current_version, post_review=True, content_review=True, ) == amo.REVIEWED_CONTENT_REVIEW )
upload-avatar.dto.ts
export class UploadAvatarDto { titleUrl?: string; }
zz_generated.deepcopy.go
// +build !ignore_autogenerated /* Copyright 2017 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // This file was autogenerated by deepcopy-gen. Do not edit it manually! package certificates import ( v1 "k8s.io/apimachinery/pkg/apis/meta/v1" conversion "k8s.io/apimachinery/pkg/conversion" runtime "k8s.io/apimachinery/pkg/runtime" reflect "reflect" ) func
() { SchemeBuilder.Register(RegisterDeepCopies) } // RegisterDeepCopies adds deep-copy functions to the given scheme. Public // to allow building arbitrary schemes. func RegisterDeepCopies(scheme *runtime.Scheme) error { return scheme.AddGeneratedDeepCopyFuncs( conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequest, InType: reflect.TypeOf(&CertificateSigningRequest{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestCondition, InType: reflect.TypeOf(&CertificateSigningRequestCondition{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestList, InType: reflect.TypeOf(&CertificateSigningRequestList{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestSpec, InType: reflect.TypeOf(&CertificateSigningRequestSpec{})}, conversion.GeneratedDeepCopyFunc{Fn: DeepCopy_certificates_CertificateSigningRequestStatus, InType: reflect.TypeOf(&CertificateSigningRequestStatus{})}, ) } func DeepCopy_certificates_CertificateSigningRequest(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*CertificateSigningRequest) out := out.(*CertificateSigningRequest) *out = *in if newVal, err := c.DeepCopy(&in.ObjectMeta); err != nil { return err } else { out.ObjectMeta = *newVal.(*v1.ObjectMeta) } if err := DeepCopy_certificates_CertificateSigningRequestSpec(&in.Spec, &out.Spec, c); err != nil { return err } if err := DeepCopy_certificates_CertificateSigningRequestStatus(&in.Status, &out.Status, c); err != nil { return err } return nil } } func DeepCopy_certificates_CertificateSigningRequestCondition(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*CertificateSigningRequestCondition) out := out.(*CertificateSigningRequestCondition) *out = *in if newVal, err := c.DeepCopy(&in.LastUpdateTime); err != nil { return err } else { out.LastUpdateTime = *newVal.(*v1.Time) } return nil } } func DeepCopy_certificates_CertificateSigningRequestList(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*CertificateSigningRequestList) out := out.(*CertificateSigningRequestList) *out = *in if in.Items != nil { in, out := &in.Items, &out.Items *out = make([]CertificateSigningRequest, len(*in)) for i := range *in { if err := DeepCopy_certificates_CertificateSigningRequest(&(*in)[i], &(*out)[i], c); err != nil { return err } } } return nil } } func DeepCopy_certificates_CertificateSigningRequestSpec(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*CertificateSigningRequestSpec) out := out.(*CertificateSigningRequestSpec) *out = *in if in.Request != nil { in, out := &in.Request, &out.Request *out = make([]byte, len(*in)) copy(*out, *in) } if in.Usages != nil { in, out := &in.Usages, &out.Usages *out = make([]KeyUsage, len(*in)) copy(*out, *in) } if in.Groups != nil { in, out := &in.Groups, &out.Groups *out = make([]string, len(*in)) copy(*out, *in) } if in.Extra != nil { in, out := &in.Extra, &out.Extra *out = make(map[string]ExtraValue) for key, val := range *in { if newVal, err := c.DeepCopy(&val); err != nil { return err } else { (*out)[key] = *newVal.(*ExtraValue) } } } return nil } } func DeepCopy_certificates_CertificateSigningRequestStatus(in interface{}, out interface{}, c *conversion.Cloner) error { { in := in.(*CertificateSigningRequestStatus) out := out.(*CertificateSigningRequestStatus) *out = *in if in.Conditions != nil { in, out := &in.Conditions, &out.Conditions *out = make([]CertificateSigningRequestCondition, len(*in)) for i := range *in { if err := DeepCopy_certificates_CertificateSigningRequestCondition(&(*in)[i], &(*out)[i], c); err != nil { return err } } } if in.Certificate != nil { in, out := &in.Certificate, &out.Certificate *out = make([]byte, len(*in)) copy(*out, *in) } return nil } }
init
checkout-receipt.component.spec.ts
import { ComponentFixture, TestBed, async } from '@angular/core/testing'; import { TranslateModule } from '@ngx-translate/core'; import { MockComponent } from 'ng-mocks'; import { User } from 'ish-core/models/user/user.model'; import { BasketMockData } from 'ish-core/utils/dev/basket-mock-data'; import { ContentIncludeComponent } from 'ish-shared/cms/components/content-include/content-include.component'; import { AddressComponent } from 'ish-shared/components/address/address/address.component'; import { BasketCostSummaryComponent } from 'ish-shared/components/basket/basket-cost-summary/basket-cost-summary.component'; import { LineItemListComponent } from 'ish-shared/components/basket/line-item-list/line-item-list.component'; import { InfoBoxComponent } from 'ish-shared/components/common/info-box/info-box.component'; import { ModalDialogLinkComponent } from 'ish-shared/components/common/modal-dialog-link/modal-dialog-link.component'; import { CheckoutReceiptComponent } from './checkout-receipt.component'; describe('Checkout Receipt Component', () => { let component: CheckoutReceiptComponent; let fixture: ComponentFixture<CheckoutReceiptComponent>; let element: HTMLElement; beforeEach(async(() => { TestBed.configureTestingModule({ declarations: [ CheckoutReceiptComponent, MockComponent(AddressComponent), MockComponent(BasketCostSummaryComponent), MockComponent(ContentIncludeComponent), MockComponent(InfoBoxComponent), MockComponent(LineItemListComponent), MockComponent(ModalDialogLinkComponent), ], imports: [TranslateModule.forRoot()], }).compileComponents(); })); beforeEach(() => { fixture = TestBed.createComponent(CheckoutReceiptComponent); component = fixture.componentInstance; element = fixture.nativeElement; component.order = BasketMockData.getOrder(); component.user = { email: '[email protected]' } as User; }); it('should be created', () => { expect(component).toBeTruthy(); expect(element).toBeTruthy(); expect(() => fixture.detectChanges()).not.toThrow(); });
it('should display the document number after creation', () => { fixture.detectChanges(); expect(element.querySelector('[data-testing-id="order-document-number"]').innerHTML.trim()).toEqual('12345678'); }); it('should display the home link after creation', () => { fixture.detectChanges(); expect(element.querySelector('[data-testing-id="home-link"]')).toBeTruthy(); }); it('should display the my account link after creation', () => { fixture.detectChanges(); expect(element.querySelector('[data-testing-id="myaccount-link"]')).toBeTruthy(); }); });
postbuild.js
const README_PATH = 'README.md'; const ASSETS_PATH = 'projects/demo/src/assets'; const PATH_TO_README = DIST_LIB_PATH + README_PATH; copyExtraFiles(); function copyExtraFiles() { if (!fs.existsSync(README_PATH)) { throw new Error('Requested files do not exit'); } else { copyReadmeIntoDistFolder(README_PATH, PATH_TO_README); } } function copyReadmeIntoDistFolder(srcPath, toPath) { const fileBody = fs.readFileSync(srcPath).toString(); const withoutLogos = fileBody .replace(`![ng-web-apis logo](${ASSETS_PATH}/logo.svg) `, '') .replace(`<img src="${ASSETS_PATH}/web-api.svg" align="top"> `, ''); fs.writeFileSync(toPath, withoutLogos); }
const fs = require('fs'); const DIST_LIB_PATH = 'dist/payment-request/';
memory_redundancy.rs
// Rust test file autogenerated with cargo build (src/build_spectests.rs). // Please do NOT modify it by hand, as it will be reseted on next build. // Test based on spectests/memory_redundancy.wast #![allow( warnings, dead_code )] use std::panic; use wabt::wat2wasm; use crate::webassembly::{instantiate, compile, ImportObject, ResultObject, Instance, Export}; use super::_common::{ spectest_importobject, NaNCheck, }; // Line 5 fn create_module_1() -> ResultObject { let module_str = "(module (type (;0;) (func)) (type (;1;) (func (result i32))) (type (;2;) (func (result f32))) (type (;3;) (func (param i32) (result i32))) (func (;0;) (type 0) i32.const 0 i32.const 0 i32.store i32.const 4 i32.const 0 i32.store i32.const 8 i32.const 0 i32.store i32.const 12 i32.const 0 i32.store) (func (;1;) (type 1) (result i32) i32.const 8 i32.const 0 i32.store i32.const 5 f32.const -0x0p+0 (;=-0;) f32.store i32.const 8 i32.load) (func (;2;) (type 1) (result i32) (local i32 i32) i32.const 8 i32.load set_local 0 i32.const 5 i32.const -2147483648 i32.store i32.const 8 i32.load set_local 1 get_local 0 get_local 1 i32.add) (func (;3;) (type 2) (result f32) (local f32) i32.const 8 i32.const 589505315 i32.store i32.const 11 f32.load set_local 0 i32.const 8 i32.const 0 i32.store get_local 0) (func (;4;) (type 3) (param i32) (result i32) i32.const 16) (func (;5;) (type 1) (result i32) (local i32 i32) i32.const 4 call 4 set_local 0 i32.const 4 call 4 set_local 1 get_local 0 i32.const 42 i32.store get_local 1 i32.const 43 i32.store get_local 0 i32.load) (memory (;0;) 1 1) (export \"zero_everything\" (func 0)) (export \"test_store_to_load\" (func 1)) (export \"test_redundant_load\" (func 2)) (export \"test_dead_store\" (func 3)) (export \"malloc\" (func 4)) (export \"malloc_aliasing\" (func 5))) "; let wasm_binary = wat2wasm(module_str.as_bytes()).expect("WAST not valid or malformed"); instantiate(wasm_binary, spectest_importobject()).expect("WASM can't be instantiated") } fn start_module_1(result_object: &ResultObject) { result_object.instance.start(); } // Line 59 fn c1_l59_action_invoke(result_object: &ResultObject) { println!("Executing function {}", "c1_l59_action_invoke"); let func_index = match result_object.module.info.exports.get("test_store_to_load") { Some(&Export::Function(index)) => index, _ => panic!("Function not found"), }; let invoke_fn: fn(&Instance) -> i32 = get_instance_function!(result_object.instance, func_index); let result = invoke_fn(&result_object.instance); assert_eq!(result, 128 as i32); } // Line 60 fn c2_l60_action_invoke(result_object: &ResultObject) { println!("Executing function {}", "c2_l60_action_invoke"); let func_index = match result_object.module.info.exports.get("zero_everything") { Some(&Export::Function(index)) => index, _ => panic!("Function not found"), }; let invoke_fn: fn(&Instance) = get_instance_function!(result_object.instance, func_index); let result = invoke_fn(&result_object.instance); } // Line 61 fn c3_l61_action_invoke(result_object: &ResultObject) { println!("Executing function {}", "c3_l61_action_invoke"); let func_index = match result_object.module.info.exports.get("test_redundant_load") { Some(&Export::Function(index)) => index, _ => panic!("Function not found"), }; let invoke_fn: fn(&Instance) -> i32 = get_instance_function!(result_object.instance, func_index); let result = invoke_fn(&result_object.instance); assert_eq!(result, 128 as i32); } // Line 62 fn c4_l62_action_invoke(result_object: &ResultObject) { println!("Executing function {}", "c4_l62_action_invoke"); let func_index = match result_object.module.info.exports.get("zero_everything") { Some(&Export::Function(index)) => index, _ => panic!("Function not found"), }; let invoke_fn: fn(&Instance) = get_instance_function!(result_object.instance, func_index); let result = invoke_fn(&result_object.instance); } // Line 63 fn c5_l63_action_invoke(result_object: &ResultObject) { println!("Executing function {}", "c5_l63_action_invoke"); let func_index = match result_object.module.info.exports.get("test_dead_store") { Some(&Export::Function(index)) => index, _ => panic!("Function not found"), }; let invoke_fn: fn(&Instance) -> f32 = get_instance_function!(result_object.instance, func_index); let result = invoke_fn(&result_object.instance); assert_eq!(result, 0.000000000000000000000000000000000000000000049 as f32); } // Line 64 fn c6_l64_action_invoke(result_object: &ResultObject) { println!("Executing function {}", "c6_l64_action_invoke"); let func_index = match result_object.module.info.exports.get("zero_everything") { Some(&Export::Function(index)) => index, _ => panic!("Function not found"),
} // Line 65 fn c7_l65_action_invoke(result_object: &ResultObject) { println!("Executing function {}", "c7_l65_action_invoke"); let func_index = match result_object.module.info.exports.get("malloc_aliasing") { Some(&Export::Function(index)) => index, _ => panic!("Function not found"), }; let invoke_fn: fn(&Instance) -> i32 = get_instance_function!(result_object.instance, func_index); let result = invoke_fn(&result_object.instance); assert_eq!(result, 43 as i32); } #[test] fn test_module_1() { let result_object = create_module_1(); // We group the calls together start_module_1(&result_object); c1_l59_action_invoke(&result_object); c2_l60_action_invoke(&result_object); c3_l61_action_invoke(&result_object); c4_l62_action_invoke(&result_object); c5_l63_action_invoke(&result_object); c6_l64_action_invoke(&result_object); c7_l65_action_invoke(&result_object); }
}; let invoke_fn: fn(&Instance) = get_instance_function!(result_object.instance, func_index); let result = invoke_fn(&result_object.instance);
Filter.tsx
import { Color } from 'color/Color'; import { HSL } from 'color/HSL'; import { RGB } from 'color/RGB'; export class
implements Color { filterName: string; baseColor!: HSL; constructor(filterName: string) { this.filterName = filterName; } toString(): string { return "url('#" + this.filterName + "')"; } clone(): Color { return new Filter(this.filterName); } shiftHue(shift: number): void {} increaseSaturation(increasePercentage: number): void {} increaseLuminance(increasePercentage: number): void {} decreaseSaturation(decreasePercentage: number): void {} decreaseLuminance(decreasePercentage: number): void {} toHsl(): HSL { if (this.baseColor === undefined) { throw new Error('No base color available'); } return this.baseColor; } toFilter() {} toRGB(): RGB { throw new Error('Method not implemented.'); } }
Filter
StakksRates.js
import React from 'react' import PropTypes from 'prop-types' import FetchPonyfill from 'fetch-ponyfill' const fetch = FetchPonyfill().fetch const FEED_URL = 'https://api.coinmarketcap.com/v1/ticker/payshares/' const UPDATE_INTERVAL = 5 * 60 * 1000 class StakksRatesContainer extends React.PureComponent { componentDidMount() { this.updatePrice() this.intervalId = setInterval( () => this.updatePrice.bind(this), UPDATE_INTERVAL ) } componentWillUnmount() { clearInterval(this.intervalId) } updatePrice() { fetch(FEED_URL) .then(rsp => rsp.json()) .then(rspJson => { const stakks = rspJson[0] const newState = { change: stakks.percent_change_24h, usd: stakks.price_usd, } this.setState(newState) }) .catch(err => { console.error(`Failed to fetch price: [${err}]`) console.error(`stack: [${err.stack}]`) }) } render() { if (!this.state) return null return <StakksRates {...this.state} /> } } class StakksRates extends React.PureComponent { isPositive(changeNumStr) { const asFloat = Number.parseFloat(changeNumStr) return Number.isNaN(asFloat) === false && Number(asFloat) >= 0
const positive = this.isPositive(change) const valueStr = `${positive ? '+' : ''}${this.props.change}%` const style = { color: positive ? '#00c292' : '#fb9678', } return <span style={style}>{valueStr}</span> } render() { return ( <span> XPS/USD: {this.props.usd} {this.renderChange(this.props.change)} </span> ) } } StakksRates.propTypes = { change: PropTypes.string.isRequired, usd: PropTypes.string.isRequired, } export {StakksRatesContainer as default, StakksRates}
} renderChange(change) {
dbChevronDoubleUpSmall.d.ts
import { DbIconDefinition } from "@digibearapp/digibear-common-types";
declare const dbChevronDoubleUpSmall: DbIconDefinition; export default dbChevronDoubleUpSmall;
routes.py
#!/usr/bin/env python import os import hashlib from flask import Flask, request, make_response, abort from flask.ext.sqlalchemy import SQLAlchemy from flask import jsonify from sqlalchemy import desc, asc, Table, insert # Author: Christian Charukiewicz # Email: [email protected] app = Flask(__name__) db = SQLAlchemy(app) # MySQL configuration app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://username:password@localhost/beer_manager' # Database Models class User(db.Model): __tablename__ = 'users' id = db.Column(db.Integer, primary_key = True) username = db.Column(db.String(128)) email = db.Column(db.String(128)) password = db.Column(db.String(128)) salt = db.Column(db.String(128)) beer_added_today = db.Column(db.Integer) class Beer(db.Model): __tablename__ = 'beers' id = db.Column(db.Integer, primary_key = True) name = db.Column(db.String(128)) ibu = db.Column(db.Integer) calories = db.Column(db.Integer) abv = db.Column(db.Float(6)) style = db.Column(db.String(128)) brewery_location = db.Column(db.String(128)) glass_type = db.Column(db.Integer) class Glass(db.Model): __tablename__ = 'glasses' id = db.Column(db.Integer, primary_key = True) name = db.Column(db.String(128)) class Review(db.Model): __tablename__ = 'reviews' id = db.Column(db.Integer, primary_key = True) created = db.Column(db.DateTime) user_id = db.Column(db.Integer) beer_id = db.Column(db.Integer) aroma = db.Column(db.Float(6)) appearance = db.Column(db.Float(6)) taste = db.Column(db.Float(6)) palate = db.Column(db.Float(6)) bottle_style = db.Column(db.Float(6)) overall = db.Column(db.Float(6)) posted_this_week = db.Column(db.Integer) class Favorite(db.Model): __tablename__ = 'favorites' id = db.Column(db.Integer, primary_key = True) user_id = db.Column(db.Integer) beer_id = db.Column(db.Integer) # End of Database Models # Make the 500 error more relevant to our application @app.errorhandler(500) def missing_data(error): return make_response(jsonify({'error': 'Missing or Invalid Input Data'}), 500) # Begin API route paths @app.route('/') def index():
@app.route('/users/', methods=['GET', 'POST']) def users(): if request.method == 'GET': sort = request.args.get('sort', 'asc') limit = request.args.get('limit') if sort == 'desc': results = User.query.order_by(desc('users.username')).limit(limit).all() elif sort == 'asc': results = User.query.order_by(asc('users.username')).limit(limit).all() json_results = [] for result in results: d = { 'id': result.id, 'username': result.username, 'email': result.email, 'beer_added_today': result.beer_added_today } json_results.append(d) return jsonify(users=json_results) if request.method == 'POST': username = request.get_json().get('username') email = request.get_json().get('email') password = request.get_json().get('password') # Create password salt and hash salt+password salt = os.urandom(16) salt = str(salt).encode('utf-8') password = password.encode('utf-8') passhash = hashlib.sha256() passhash.update(salt+password) if username == "" or username == None: abort(500) if email == "" or email == None: abort(500) if password == "" or password == None: abort(500) newUser = User( username = username, email = email, password = passhash.hexdigest(), salt = salt, beer_added_today = 0) dbsession = db.session() dbsession.add(newUser) dbsession.commit() success = "User Created" return jsonify(response = success) @app.route('/users/<int:uid>/', methods=['GET', 'PUT', 'DELETE']) def user(uid): if request.method == 'GET': result = User.query.filter_by(id = uid).first() reviews_posted = [] if result != None: review_results = Review.query.filter_by(user_id = uid).all() for review_result in review_results: r = { 'review_id': review_result.id, 'created': review_result.created, 'beer_id': review_result.beer_id, 'aroma': review_result.aroma, 'appearance': review_result.appearance, 'taste': review_result.taste, 'palate': review_result.palate, 'bottle_style': review_result.bottle_style, 'overall': review_result.overall } reviews_posted.append(r) favorites = [] if result != None: favorite_results = Review.query.filter_by(user_id = uid).all() for favorite_result in favorite_results: r = { 'favorite_id': favorite_result.id, 'beer_id': favorite_result.beer_id} favorites.append(r) d = { 'id': result.id, 'username': result.username, 'email': result.email, 'beer_added_today': result.beer_added_today, 'password': result.password, 'salt': result.salt, 'reviews_posted': reviews_posted, 'favorite_beers': favorites } return jsonify(user = d) if request.method == 'PUT': username = request.get_json().get('username') email = request.get_json().get('email') password = request.get_json().get('password') if password != None: salt = os.urandom(16) salt = str(salt).encode('utf-8') password = password.encode('utf-8') passhash = hashlib.sha256() passhash.update(salt+password) dbsession = db.session() query = dbsession.query(User) query = query.filter(User.id == uid) record = query.one() if username != None: record.username = username if email != None: record.email = email if password != None: record.password = passhash.hexdigest() record.salt = salt dbsession.commit() success = "User Updated" return jsonify(response = success) if request.method == 'DELETE': dbsession = db.session() dbsession.query(User).filter_by(id = uid).delete() dbsession.commit() success = "User Deleted" return jsonify(response = success) @app.route('/beers/', methods=['GET', 'POST']) def beers(): if request.method == 'GET': sort = request.args.get('sort', 'asc') limit = request.args.get('limit') stat = request.args.get('stat') if stat != None: if sort == 'desc': results = Beer.query.order_by(desc('beers.' + stat)).limit(limit).all() elif sort == 'asc': results = Beer.query.order_by(asc('beers.' + stat)).limit(limit).all() else: results = Beer.query.limit(limit).all() json_results = [] for result in results: d = { 'id': result.id, 'name': result.name, 'ibu': result.ibu, 'calories': result.calories, 'abv': result.abv, 'style': result.style, 'brewery_location': result.brewery_location, 'glass_type': result.glass_type } json_results.append(d) return jsonify(beers=json_results) if request.method == 'POST': name = request.get_json().get('name') ibu = request.get_json().get('ibu') calories = request.get_json().get('calories') abv = request.get_json().get('abv') style = request.get_json().get('style') brewery_location = request.get_json().get('brewery_location') glass_type = request.get_json().get('glass_type') user_id = request.get_json().get('user_id') if name == "" or name == None: abort(500) if ibu == "" or ibu == None: abort(500) if calories == "" or calories == None: abort(500) if abv == "" or abv == None: abort(500) if style == "" or style == None: abort(500) if brewery_location == "" or brewery_location == None: abort(500) if glass_type == "" or glass_type == None: abort(500) result = User.query.filter_by(id = user_id).first() if result.beer_added_today == 1: error = "This user has already added a beer today" return jsonify(response = error) glasscheck = Glass.query.filter_by(id = glass_type).first() if glasscheck == None: error = "Invalid beer glass type" return jsonify(response = error) dbsession = db.session() query = dbsession.query(User) query = query.filter(User.id == user_id) record = query.one() record.beer_added_today = 1 dbsession.commit() newBeer = Beer( name = name, ibu = ibu, calories = calories, abv = abv, style = style, brewery_location = brewery_location, glass_type = glass_type) dbsession = db.session() dbsession.add(newBeer) dbsession.commit() success = "Beer Created" return jsonify(response = success) @app.route('/beers/<int:beer_id>/', methods=['GET', 'DELETE']) def beer(beer_id): if request.method == 'GET': result = Beer.query.filter_by(id = beer_id).first() # Retrieve all reviews associated with the specified beer all_results = [] average_overall = [] if result != None: review_results = Review.query.filter_by(beer_id = beer_id).all() for review_result in review_results: r = { 'review_id': review_result.id, 'created': review_result.created, 'user_id': review_result.user_id, 'beer_id': review_result.beer_id, 'aroma': review_result.aroma, 'appearance': review_result.appearance, 'taste': review_result.taste, 'palate': review_result.palate, 'bottle_style': review_result.bottle_style, 'overall': review_result.overall} average_overall.append(float(review_result.overall)) all_results.append(r) # Average of overall ratings for all reviews overall_rating = sum(average_overall) / float(len(average_overall)) d = { 'id': result.id, 'name': result.name, 'ibu': result.ibu, 'calories': result.calories, 'abv': result.abv, 'style': result.style, 'brewery_location': result.brewery_location, 'glass_type': result.glass_type, 'reviews': all_results, 'overall_rating': overall_rating } return jsonify(beer = d) if request.method == 'DELETE': dbsession = db.session() dbsession.query(Beer).filter_by(id = beer_id).delete() dbsession.query(Review).filter_by(beer_id = beer_id).delete() dbsession.query(Favorite).filter_by(beer_id = beer_id).delete() dbsession.commit() success = "Beer Deleted" return jsonify(response = success) @app.route('/glasses/', methods=['GET', 'POST']) def glasses(): if request.method == 'GET': sort = request.args.get('sort', 'asc') limit = request.args.get('limit') if sort == 'desc': results = Glass.query.order_by(desc('glasses.name')).limit(limit).all() elif sort == 'asc': results = Glass.query.order_by(asc('glasses.name')).limit(limit).all() json_results = [] for result in results: d = { 'id': result.id, 'name': result.name } json_results.append(d) return jsonify(glasses=json_results) if request.method == 'POST': name = request.get_json().get('name') if name == "" or name == None: abort(500) newGlass = Glass( name = name) dbsession = db.session() dbsession.add(newGlass) dbsession.commit() success = "Beer Glass Created" return jsonify(response = success) @app.route('/glasses/<int:glass_id>/', methods=['DELETE']) def glass(glass_id): if request.method == 'DELETE': dbsession = db.session() dbsession.query(Glass).filter_by(id = glass_id).delete() dbsession.commit() success = "Beer Glass Deleted" return jsonify(response = success) @app.route('/reviews/', methods=['GET', 'POST']) def reviews(): if request.method == 'GET': sort = request.args.get('sort', 'asc') limit = request.args.get('limit') stat = request.args.get('stat') if stat != None: if sort == 'desc': results = Review.query.order_by(desc('reviews.' + stat)).limit(limit).all() elif sort == 'asc': results = Review.query.order_by(asc('reviews.' + stat)).limit(limit).all() else: results = Review.query.limit(limit).all() json_results = [] for result in results: d = { 'id': result.id, 'created': result.created, 'user_id': result.user_id, 'beer_id': result.beer_id, 'aroma': result.aroma, 'appearance': result.appearance, 'taste': result.taste, 'palate': result.palate, 'bottle_style': result.bottle_style, 'overall': result.overall } json_results.append(d) return jsonify(review=json_results) if request.method == 'POST': user_id = request.get_json().get('user_id') beer_id = request.get_json().get('beer_id') aroma = request.get_json().get('aroma') appearance = request.get_json().get('appearance') taste = request.get_json().get('taste') palate = request.get_json().get('palate') bottle_style = request.get_json().get('bottle_style') if user_id == "" or user_id == None: abort(500) if beer_id == "" or beer_id == None: abort(500) if aroma == "" or aroma == None: abort(500) if appearance == "" or appearance == None: abort(500) if taste == "" or taste == None: abort(500) if palate == "" or palate == None: abort(500) if bottle_style == "" or bottle_style == None: abort(500) errors = [] error_flag = False if float(aroma) < 0 or float(aroma) > 5: error = { 'error' : "Aroma must be between 0.0 and 5.0" } errors.append(error) if float(appearance) < 0 or float(appearance) > 5: error = { 'error' : "Appearance must be between 0.0 and 5.0" } errors.append(error) if float(taste) < 0 or float(taste) > 10: error = { 'error' : "Taste must be between 0.0 and 10.0" } errors.append(error) if float(palate) < 0 or float(palate) > 5: error = { 'error' : "Palate must be between 0.0 and 5.0" } errors.append(error) if float(bottle_style) < 0 or float(bottle_style) > 5: error = { 'error' : "Bottle Style must be between 0.0 and 5.0" } errors.append(error) if len(errors) > 0: return jsonify(response = errors) usercheck = User.query.filter_by(id = user_id).first() if usercheck == None: error = "The user does not exist" return jsonify(response = error) beercheck = Beer.query.filter_by(id = beer_id).first() if beercheck == None: error = "The beer does not exist" return jsonify(response = error) weeklycheck = Review.query.filter_by(user_id = user_id, beer_id = beer_id, posted_this_week = 1).first() if weeklycheck != None: error = "This user has already reviewed this beer this week" return jsonify(response = error) ratings = [float(aroma), float(appearance), float(taste), float(palate), float(bottle_style)] overall = sum(ratings) / float(len(ratings)) newReview = Review( user_id = user_id, beer_id = beer_id, aroma = aroma, appearance = appearance, taste = taste, palate = palate, bottle_style = bottle_style, overall = overall, posted_this_week = 1) dbsession = db.session() dbsession.add(newReview) dbsession.commit() success = "Review Created" return jsonify(response = success) @app.route('/reviews/<int:review_id>/', methods=['GET', 'DELETE']) def review(review_id): if request.method == 'GET': result = Review.query.filter_by(id = review_id).first() d = { 'id': result.id, 'created': result.created, 'user_id': result.user_id, 'beer_id': result.beer_id, 'aroma': result.aroma, 'appearance': result.appearance, 'taste': result.taste, 'palate': result.palate, 'bottle_style': result.bottle_style, 'overall': result.overall } return jsonify(review = d) if request.method == 'DELETE': dbsession = db.session() dbsession.query(Review).filter_by(id = review_id).delete() dbsession.commit() success = "Review Deleted" return jsonify(response = success) @app.route('/favorites/', methods=['POST']) def favorites(): if request.method == 'POST': user_id = request.get_json().get('user_id') beer_id = request.get_json().get('beer_id') if user_id == "" or user_id == None: abort(500) if beer_id == "" or beer_id == None: abort(500) usercheck = User.query.filter_by(id = user_id).first() if usercheck == None: error = "The user does not exist" return jsonify(response = error) beercheck = Beer.query.filter_by(id = beer_id).first() if beercheck == None: error = "The beer does not exist" return jsonify(response = error) newFavorite = Favorite( user_id = user_id, beer_id = beer_id) dbsession = db.session() dbsession.add(newFavorite) dbsession.commit() success = "Favorite Created" return jsonify(response = success) @app.route('/favorites/<int:favorite_id>/', methods=['DELETE']) def favorite(favorite_id): if request.method == 'DELETE': dbsession = db.session() dbsession.query(Favorite).filter_by(id = favorite_id).delete() dbsession.commit() success = "Favorite Deleted" return jsonify(response = success) @app.route('/cronjobs/', methods=['GET']) def cronjobs(): if request.method == 'GET': job_type = request.args.get('jobtype') if job_type == "daily": dbsession = db.session() query = dbsession.query(User) records = query.all() for record in records: record.beer_added_today = 0 dbsession.commit() success = "Daily records cleared" return jsonify(response = success) if job_type == "weekly": dbsession = db.session() query = dbsession.query(Review) records = query.all() for record in records: record.posted_this_week = 0 dbsession.commit() success = "Weekly records cleared" return jsonify(response = success) if __name__ == '__main__': app.run(debug=False)
dbsession = db.session() users = dbsession.query(User.id).count() beers = dbsession.query(Beer.id).count() reviews = dbsession.query(Review.id).count() glasses = dbsession.query(Glass.id).count() favorites = dbsession.query(Favorite.id).count() d = { "total_users": users, "total_beers": beers, "total_reviews": reviews, "total_beer_glasses": glasses, "total_favorites": favorites } return jsonify(statistics = d) dbsession.commit()
15.2.3.6-4-492.js
/// Copyright (c) 2009 Microsoft Corporation /// /// Redistribution and use in source and binary forms, with or without modification, are permitted provided /// that the following conditions are met: /// * Redistributions of source code must retain the above copyright notice, this list of conditions and /// the following disclaimer. /// * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and /// the following disclaimer in the documentation and/or other materials provided with the distribution. /// * Neither the name of Microsoft nor the names of its contributors may be used to /// endorse or promote products derived from this software without specific prior written permission. /// /// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR
/// FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT /// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS /// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, /// OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF /// ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ES5Harness.registerTest({ id: "15.2.3.6-4-492", path: "TestCases/chapter15/15.2/15.2.3/15.2.3.6/15.2.3.6-4-492.js", description: "ES5 Attributes - fail to update [[Configurable]] attribute of accessor property ([[Get]] is undefined, [[Set]] is a Function, [[Enumerable]] is false, [[Configurable]] is false) to different value", test: function testcase() { var obj = {}; var verifySetFunc = "data"; var setFunc = function (value) { verifySetFunc = value; }; Object.defineProperty(obj, "prop", { get: undefined, set: setFunc, enumerable: false, configurable: false }); var desc1 = Object.getOwnPropertyDescriptor(obj, "prop"); try { Object.defineProperty(obj, "prop", { configurable: true }); return false; } catch (e) { var desc2 = Object.getOwnPropertyDescriptor(obj, "prop"); delete obj.prop; return desc1.configurable === false && desc2.configurable === false && obj.hasOwnProperty("prop") && e instanceof TypeError; } }, precondition: function prereq() { return fnExists(Object.defineProperty) && fnExists(Object.getOwnPropertyDescriptor); } });
/// IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS /// FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
0011_auto_20200515_1115.py
# Generated by Django 2.2.12 on 2020-05-15 14:15 from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ("datasets", "0010_auto_20200515_0959"), ] operations = [ migrations.AlterField( model_name="citycouncilagenda", name="crawled_at", field=models.DateTimeField(verbose_name="Coletado em"), ), migrations.AlterField( model_name="citycouncilagenda", name="crawled_from", field=models.URLField(verbose_name="Fonte"), ), migrations.AlterField( model_name="citycouncilagenda", name="created_at", field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"), ), migrations.AlterField( model_name="citycouncilagenda", name="date", field=models.DateField(verbose_name="Data"), ), migrations.AlterField( model_name="citycouncilagenda", name="details", field=models.TextField(blank=True, null=True, verbose_name="Detalhes"), ), migrations.AlterField( model_name="citycouncilagenda", name="event_type", field=models.CharField( choices=[ ("sessao_ordinaria", "Sessão Ordinária"), ("ordem_do_dia", "Ordem do Dia"), ("sessao_solene", "Sessão Solene"), ("sessao_especial", "Sessão Especial"), ("audiencia_publica", "Audiência Pública"), ], max_length=20, verbose_name="Tipo do evento", ), ), migrations.AlterField( model_name="citycouncilagenda", name="notes", field=models.TextField(blank=True, null=True, verbose_name="Anotações"), ), migrations.AlterField( model_name="citycouncilagenda", name="title", field=models.CharField( blank=True, max_length=100, null=True, verbose_name="Título" ), ), migrations.AlterField( model_name="citycouncilagenda", name="updated_at", field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"), ), migrations.AlterField( model_name="citycouncilattendancelist", name="council_member", field=models.CharField(max_length=200, verbose_name="Vereador"), ), migrations.AlterField( model_name="citycouncilattendancelist", name="crawled_at", field=models.DateTimeField(verbose_name="Coletado em"), ), migrations.AlterField( model_name="citycouncilattendancelist", name="crawled_from", field=models.URLField(verbose_name="Fonte"), ), migrations.AlterField( model_name="citycouncilattendancelist", name="created_at", field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"), ), migrations.AlterField( model_name="citycouncilattendancelist", name="date", field=models.DateField(verbose_name="Data"), ), migrations.AlterField( model_name="citycouncilattendancelist", name="description", field=models.CharField( blank=True, max_length=200, null=True, verbose_name="Descrição" ), ), migrations.AlterField( model_name="citycouncilattendancelist", name="notes", field=models.TextField(blank=True, null=True, verbose_name="Anotações"), ), migrations.AlterField( model_name="citycouncilattendancelist", name="status", field=models.CharField( choices=[ ("presente", "Presente"), ("falta_justificada", "Falta Justificada"), ("licenca_justificada", "Licença Justificada"), ("ausente", "Ausente"), ], max_length=20, verbose_name="Situação", ), ), migrations.AlterField( model_name="citycouncilattendancelist", name="updated_at", field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"), ), migrations.AlterField( model_name="citycouncilcontract", name="crawled_at", field=models.DateTimeField(verbose_name="Coletado em"), ), migrations.AlterField( model_name="citycouncilcontract", name="crawled_from", field=models.URLField(verbose_name="Fonte"), ), migrations.AlterField( model_name="citycouncilcontract", name="created_at", field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"), ), migrations.AlterField( model_name="citycouncilcontract", name="excluded", field=models.BooleanField(default=False, verbose_name="Excluído?"), ), migrations.AlterField( model_name="citycouncilcontract", name="notes", field=models.TextField(blank=True, null=True, verbose_name="Anotações"), ), migrations.AlterField( model_name="citycouncilcontract", name="updated_at", field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"), ), migrations.AlterField( model_name="citycouncilexpense", name="budget_unit", field=models.PositiveIntegerField( default=101, verbose_name="Unidade orçamentária" ), ), migrations.AlterField( model_name="citycouncilexpense", name="company_or_person", field=models.TextField( blank=True, null=True, verbose_name="Empresa ou pessoa" ), ), migrations.AlterField( model_name="citycouncilexpense", name="crawled_at", field=models.DateTimeField(verbose_name="Coletado em"), ), migrations.AlterField( model_name="citycouncilexpense", name="crawled_from", field=models.URLField(verbose_name="Fonte"), ), migrations.AlterField( model_name="citycouncilexpense", name="created_at", field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"), ), migrations.AlterField( model_name="citycouncilexpense", name="date", field=models.DateField(verbose_name="Data"), ), migrations.AlterField( model_name="citycouncilexpense", name="document", field=models.CharField( blank=True, max_length=50, null=True, verbose_name="CNPJ ou CPF" ), ), migrations.AlterField( model_name="citycouncilexpense", name="excluded", field=models.BooleanField(default=False, verbose_name="Excluído?"), ), migrations.AlterField( model_name="citycouncilexpense", name="external_file_code", field=models.CharField( blank=True, max_length=50, null=True, verbose_name="Código do arquivo (externo)", ), ), migrations.AlterField( model_name="citycouncilexpense", name="external_file_line", field=models.CharField( blank=True, max_length=50, null=True, verbose_name="Linha do arquivo (externo)", ), ), migrations.AlterField( model_name="citycouncilexpense", name="function", field=models.CharField( blank=True, max_length=50, null=True, verbose_name="Função" ), ), migrations.AlterField( model_name="citycouncilexpense", name="group", field=models.CharField( blank=True, max_length=100, null=True, verbose_name="Grupo" ), ), migrations.AlterField( model_name="citycouncilexpense", name="legal_status", field=models.CharField( blank=True, max_length=200, null=True, verbose_name="Natureza" ), ), migrations.AlterField( model_name="citycouncilexpense", name="modality", field=models.CharField( blank=True, choices=[ ("convenio", "Convênio"), ("tomada_de_precos", "Tomada de Preço"), ("pregao", "Pregão"), ("inexigibilidade", "Inexigibilidade"), ("convite", "Convite"), ("concorrencia", "Concorrência"), ("dispensa", "Dispensa"), ("isento", "Isento"), ], max_length=50, null=True, verbose_name="Modalidade", ), ), migrations.AlterField( model_name="citycouncilexpense", name="notes", field=models.TextField(blank=True, null=True, verbose_name="Anotações"), ), migrations.AlterField( model_name="citycouncilexpense", name="number", field=models.CharField( blank=True, max_length=50, null=True, verbose_name="Número" ), ), migrations.AlterField( model_name="citycouncilexpense", name="phase", field=models.CharField( choices=[ ("empenho", "Empenho"), ("liquidacao", "Liquidação"), ("pagamento", "Pagamento"), ], max_length=20, verbose_name="Fase", ), ), migrations.AlterField( model_name="citycouncilexpense", name="phase_code", field=models.CharField( blank=True, max_length=20, null=True, verbose_name="Código da fase" ), ), migrations.AlterField( model_name="citycouncilexpense", name="process_number", field=models.CharField( blank=True, max_length=50, null=True, verbose_name="Número do processo" ), ), migrations.AlterField( model_name="citycouncilexpense", name="published_at", field=models.DateField(blank=True, null=True, verbose_name="Publicado em"), ), migrations.AlterField( model_name="citycouncilexpense", name="resource", field=models.CharField( blank=True, max_length=200, null=True, verbose_name="Fonte" ), ), migrations.AlterField( model_name="citycouncilexpense", name="subfunction", field=models.CharField( blank=True, max_length=50, null=True, verbose_name="Subfunção" ), ), migrations.AlterField( model_name="citycouncilexpense", name="subgroup", field=models.CharField( blank=True, max_length=100, null=True, verbose_name="Subgrupos" ), ), migrations.AlterField( model_name="citycouncilexpense", name="summary", field=models.TextField(blank=True, null=True, verbose_name="Descrição"), ), migrations.AlterField( model_name="citycouncilexpense", name="updated_at", field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"), ), migrations.AlterField( model_name="citycouncilminute", name="crawled_at", field=models.DateTimeField(verbose_name="Coletado em"), ), migrations.AlterField( model_name="citycouncilminute", name="crawled_from", field=models.URLField(verbose_name="Fonte"), ), migrations.AlterField( model_name="citycouncilminute", name="created_at", field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"), ), migrations.AlterField( model_name="citycouncilminute", name="date", field=models.DateField(verbose_name="Data"), ), migrations.AlterField( model_name="citycouncilminute", name="event_type", field=models.CharField( choices=[ ("sessao_ordinaria", "Sessão Ordinária"), ("ordem_do_dia", "Ordem do Dia"), ("sessao_solene", "Sessão Solene"), ("sessao_especial", "Sessão Especial"), ("audiencia_publica", "Audiência Pública"), ], max_length=20, verbose_name="Tipo de evento", ), ), migrations.AlterField( model_name="citycouncilminute", name="file_content", field=models.TextField( blank=True, null=True, verbose_name="Conteúdo do arquivo" ), ), migrations.AlterField( model_name="citycouncilminute", name="file_url", field=models.URLField(blank=True, null=True, verbose_name="Endereço (URL)"), ), migrations.AlterField( model_name="citycouncilminute", name="notes", field=models.TextField(blank=True, null=True, verbose_name="Anotações"), ), migrations.AlterField( model_name="citycouncilminute", name="title", field=models.CharField( blank=True, max_length=300, null=True, verbose_name="Título" ), ), migrations.AlterField( model_name="citycouncilminute", name="updated_at", field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"), ), migrations.AlterField( model_name="cityhallbid", name="crawled_at", field=models.DateTimeField(verbose_name="Coletado em"), ), migrations.AlterField( model_name="cityhallbid", name="crawled_from", field=models.URLField(verbose_name="Fonte"), ), migrations.AlterField( model_name="cityhallbid", name="created_at", field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"), ), migrations.AlterField( model_name="cityhallbid", name="notes", field=models.TextField(blank=True, null=True, verbose_name="Anotações"), ), migrations.AlterField( model_name="cityhallbid", name="updated_at", field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"),
model_name="cityhallbidevent", name="crawled_at", field=models.DateTimeField(verbose_name="Coletado em"), ), migrations.AlterField( model_name="cityhallbidevent", name="crawled_from", field=models.URLField(verbose_name="Fonte"), ), migrations.AlterField( model_name="cityhallbidevent", name="created_at", field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"), ), migrations.AlterField( model_name="cityhallbidevent", name="notes", field=models.TextField(blank=True, null=True, verbose_name="Anotações"), ), migrations.AlterField( model_name="cityhallbidevent", name="updated_at", field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"), ), migrations.AlterField( model_name="gazette", name="crawled_at", field=models.DateTimeField(verbose_name="Coletado em"), ), migrations.AlterField( model_name="gazette", name="crawled_from", field=models.URLField(verbose_name="Fonte"), ), migrations.AlterField( model_name="gazette", name="created_at", field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"), ), migrations.AlterField( model_name="gazette", name="date", field=models.DateField(null=True, verbose_name="Data"), ), migrations.AlterField( model_name="gazette", name="file_content", field=models.TextField( blank=True, null=True, verbose_name="Conteúdo do arquivo" ), ), migrations.AlterField( model_name="gazette", name="file_url", field=models.URLField(blank=True, null=True, verbose_name="Endereço (URL)"), ), migrations.AlterField( model_name="gazette", name="is_legacy", field=models.BooleanField(default=False, verbose_name="É do site antigo?"), ), migrations.AlterField( model_name="gazette", name="notes", field=models.TextField(blank=True, null=True, verbose_name="Anotações"), ), migrations.AlterField( model_name="gazette", name="power", field=models.CharField( choices=[ ("executivo", "Poder Executivo"), ("legislativo", "Poder Legislativo"), ], max_length=25, verbose_name="Poder", ), ), migrations.AlterField( model_name="gazette", name="updated_at", field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"), ), migrations.AlterField( model_name="gazette", name="year_and_edition", field=models.CharField(max_length=100, verbose_name="Ano e edição"), ), migrations.AlterField( model_name="gazetteevent", name="crawled_at", field=models.DateTimeField(verbose_name="Coletado em"), ), migrations.AlterField( model_name="gazetteevent", name="crawled_from", field=models.URLField(verbose_name="Fonte"), ), migrations.AlterField( model_name="gazetteevent", name="created_at", field=models.DateTimeField(auto_now_add=True, verbose_name="Criado em"), ), migrations.AlterField( model_name="gazetteevent", name="notes", field=models.TextField(blank=True, null=True, verbose_name="Anotações"), ), migrations.AlterField( model_name="gazetteevent", name="published_on", field=models.CharField( blank=True, max_length=100, null=True, verbose_name="Publicado em" ), ), migrations.AlterField( model_name="gazetteevent", name="secretariat", field=models.CharField( blank=True, max_length=100, null=True, verbose_name="Secretaria" ), ), migrations.AlterField( model_name="gazetteevent", name="summary", field=models.TextField(blank=True, null=True, verbose_name="Sumário"), ), migrations.AlterField( model_name="gazetteevent", name="title", field=models.CharField( blank=True, max_length=300, null=True, verbose_name="Título" ), ), migrations.AlterField( model_name="gazetteevent", name="updated_at", field=models.DateTimeField(auto_now=True, verbose_name="Atualizado em"), ), ]
), migrations.AlterField(
routes_test.go
package routes
"net/http" "net/http/cookiejar" "net/http/httptest" "net/url" "os" "testing" "github.com/mikestefanello/pagoda/config" "github.com/mikestefanello/pagoda/services" "github.com/PuerkitoBio/goquery" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) var ( srv *httptest.Server c *services.Container ) func TestMain(m *testing.M) { // Set the environment to test config.SwitchEnvironment(config.EnvTest) // Start a new container c = services.NewContainer() // Start a test HTTP server BuildRouter(c) srv = httptest.NewServer(c.Web) // Run tests exitVal := m.Run() // Shutdown the container and test server if err := c.Shutdown(); err != nil { panic(err) } srv.Close() os.Exit(exitVal) } type httpRequest struct { route string client http.Client body url.Values t *testing.T } func request(t *testing.T) *httpRequest { jar, err := cookiejar.New(nil) require.NoError(t, err) r := httpRequest{ t: t, body: url.Values{}, client: http.Client{ Jar: jar, }, } return &r } func (h *httpRequest) setClient(client http.Client) *httpRequest { h.client = client return h } func (h *httpRequest) setRoute(route string, params ...interface{}) *httpRequest { h.route = srv.URL + c.Web.Reverse(route, params) return h } func (h *httpRequest) setBody(body url.Values) *httpRequest { h.body = body return h } func (h *httpRequest) get() *httpResponse { resp, err := h.client.Get(h.route) require.NoError(h.t, err) r := httpResponse{ t: h.t, Response: resp, } return &r } func (h *httpRequest) post() *httpResponse { // Make a get request to get the CSRF token doc := h.get(). assertStatusCode(http.StatusOK). toDoc() // Extract the CSRF and include it in the POST request body csrf := doc.Find(`input[name="csrf"]`).First() token, exists := csrf.Attr("value") assert.True(h.t, exists) h.body["csrf"] = []string{token} // Make the POST requests resp, err := h.client.PostForm(h.route, h.body) require.NoError(h.t, err) r := httpResponse{ t: h.t, Response: resp, } return &r } type httpResponse struct { *http.Response t *testing.T } func (h *httpResponse) assertStatusCode(code int) *httpResponse { assert.Equal(h.t, code, h.Response.StatusCode) return h } func (h *httpResponse) assertRedirect(t *testing.T, route string, params ...interface{}) *httpResponse { assert.Equal(t, c.Web.Reverse(route, params), h.Header.Get("Location")) return h } func (h *httpResponse) toDoc() *goquery.Document { doc, err := goquery.NewDocumentFromReader(h.Body) require.NoError(h.t, err) err = h.Body.Close() assert.NoError(h.t, err) return doc }
import (
cal_clever.py
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ clever.py Compute CLEVER score using collected Lipschitz constants Copyright (C) 2017-2018, IBM Corp. Copyright (C) 2017, Lily Weng <[email protected]> and Huan Zhang <[email protected]> This program is licenced under the Apache 2.0 licence, contained in the LICENCE file in this directory. """ from clever import clever_score dataset='mnist' models=['vgg13'] attacks=['oritest'] istarget='target' #clever_score(data_folder='lipschitz_mat/target/mnist_lenet1')
for attack in attacks: clever_score(data_folder='lipschitz_mat/'+istarget+'/'+dataset+'/'+model+'/'+attack+'/'+dataset+'_'+model, untargeted=False)
for model in models:
satuan.js
$(document).ready(function() { pageLoad(1) }) $('#search').on('keypress', function(e) { if (e.which == 13) { pageLoad(1); } }); function pageLoad(page=1) { var search = $('#search').val(); var limit = $('#limit').val(); var id_th = $('#hidden_id_th').val(); var column_name = $('#hidden_column_name').val(); var sort_type = $('#hidden_sort_type').val(); $.ajax({ url: base_url + "/Satuan/fetch_data", type: 'GET', dataType: 'html', data: { page : page, sortby : column_name, sorttype : sort_type, limit : limit, search : search, }, beforeSend: function() {}, success: function(result) { $('#list').html(result); $('#hidden_page').val(page); sort_finish(id_th,sort_type); } }); } function sort_table(id, column){ var sort = $(id).attr("data-sort"); $('#hidden_id_th').val(id); $('#hidden_column_name').val(column);
if(sort=="asc"){ sort = 'desc'; }else if(sort=="desc"){ sort = 'asc'; }else{ sort = 'asc'; } $('#hidden_sort_type').val(sort); $('#hidden_page').val(1); pageLoad(1); } $('#btn-create').on('click', function() { $.ajax({ url: base_url + "/Satuan/load_modal", type: 'POST', data : {}, dataType: 'html', beforeSend: function() {}, success: function(result) { $('#div_modal').html(result); $('#modalTitleAdd').show(); $('#modeform').val('ADD'); $('#formModal').modal('show'); } }); }); $(document).on('click', '.btn-edit', function(event) { event.preventDefault(); var id = $(this).attr('data-id'); $.ajax({ url: base_url + "/Satuan/load_modal", type: 'POST', dataType: 'html', data:{id:id}, beforeSend: function () {}, success: function (result) { $('#div_modal').html(result); $('#modalTitleEdit').show(); $('#modeform').val('UPDATE'); $('#formModal').modal('show'); } }); }); $(document).on('click', '.btn-delete', function(e) { var id = $(this).attr('data-id'); var title = $(this).attr('data-name'); var page = $('#hidden_page').val(); Swal.fire({ title: 'Hapus Satuan', text: "Apakah Anda yakin menghapus data ?", icon: 'warning', showCancelButton: true, confirmButtonColor: '#d33', cancelButtonColor: '#95a5a6', confirmButtonText: 'Hapus', cancelButtonText: 'Batal', showLoaderOnConfirm: true, preConfirm: function () { return new Promise(function (resolve) { $.ajax({ method: 'GET', dataType: 'json', url: base_url + "/Satuan/delete/" + id, data: {}, success: function (data) { if (data.success === true) { Toast.fire({ icon: 'success', title: data.message }); swal.hideLoading() pageLoad(page); } else { Swal.fire({ icon: 'error', title: 'Oops...', text: data.message }); } }, fail: function (e) { alert(e); } }); }); }, allowOutsideClick: false }); e.preventDefault(); }); $(document).on('submit', '#formData', function(event) { event.preventDefault(); var modeform = $('#modeform').val(); var page = (modeform=='UPDATE') ? $('#hidden_page').val() : 1; $.ajax({ url: base_url + "/Satuan/save", method: 'POST', dataType: 'json', data: new FormData($('#formData')[0]), async: true, processData: false, contentType: false, success: function (data) { if (data.success == true) { Toast.fire({ icon: 'success', title: data.message }); $('#formModal').modal('hide'); pageLoad(page); } else { Swal.fire({icon: 'error',title: 'Oops...',text: data.message}); } }, fail: function (event) { alert(event); } }); });
normal.py
import math import torch from pixelflow.distributions import Distribution from pixelflow.utils import sum_except_batch from torch.distributions import Normal class StandardNormal(Distribution): """A multivariate Normal with zero mean and unit covariance.""" def
(self, shape): super(StandardNormal, self).__init__() self.shape = torch.Size(shape) self.register_buffer('base_measure', - 0.5 * self.shape.numel() * torch.log(torch.tensor(2 * math.pi))) def log_prob(self, x): return self.base_measure - 0.5 * sum_except_batch(x**2) def sample(self, num_samples): return torch.randn((num_samples,) + self.shape, device=self.base_measure.device, dtype=self.base_measure.dtype) def sample_shape(self, num_samples): return (num_samples,) + self.shape
__init__
custom_dsstore.py
#!/usr/bin/env python3 # Copyright (c) 2013-2018 The Bitcoin Core developers # Distributed under the MIT software license, see the accompanying # file COPYING or http://www.opensource.org/licenses/mit-license.php. import biplist from ds_store import DSStore from mac_alias import Alias import sys output_file = sys.argv[1] package_name_ns = sys.argv[2] ds = DSStore.open(output_file, 'w+') ds['.']['bwsp'] = { 'ShowStatusBar': False, 'WindowBounds': '{{300, 280}, {500, 343}}', 'ContainerShowSidebar': False, 'SidebarWidth': 0, 'ShowTabView': False, 'PreviewPaneVisibility': False, 'ShowToolbar': False, 'ShowSidebar': False, 'ShowPathbar': True } icvp = { 'gridOffsetX': 0.0,
'backgroundImageAlias': b'\x00\x00\x00\x00\x02\x1e\x00\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xd1\x94\\\xb0H+\x00\x05\x00\x00\x00\x98\x0fbackground.tiff\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x99\xd19\xb0\xf8\x00\x00\x00\x00\x00\x00\x00\x00\xff\xff\xff\xff\x00\x00\r\x02\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x0b.background\x00\x00\x10\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x11\x00\x08\x00\x00\xd19\xb0\xf8\x00\x00\x00\x01\x00\x04\x00\x00\x00\x98\x00\x0e\x00 \x00\x0f\x00b\x00a\x00c\x00k\x00g\x00r\x00o\x00u\x00n\x00d\x00.\x00t\x00i\x00f\x00f\x00\x0f\x00\x02\x00\x00\x00\x12\x00\x1c/.background/background.tiff\x00\x14\x01\x06\x00\x00\x00\x00\x01\x06\x00\x02\x00\x00\x0cMacintosh HD\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\xce\x97\xab\xc3H+\x00\x00\x01\x88[\x88\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x02u\xab\x8d\xd1\x94\\\xb0devrddsk\xff\xff\xff\xff\x00\x00\t \x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x07bitcoin\x00\x00\x10\x00\x08\x00\x00\xce\x97\xab\xc3\x00\x00\x00\x11\x00\x08\x00\x00\xd1\x94\\\xb0\x00\x00\x00\x01\x00\x14\x01\x88[\x88\x00\x16\xa9\t\x00\x08\xfaR\x00\x08\xfaQ\x00\x02d\x8e\x00\x0e\x00\x02\x00\x00\x00\x0f\x00\x1a\x00\x0c\x00M\x00a\x00c\x00i\x00n\x00t\x00o\x00s\x00h\x00 \x00H\x00D\x00\x13\x00\x01/\x00\x00\x15\x00\x02\x00\x14\xff\xff\x00\x00\xff\xff\x00\x00', 'backgroundColorBlue': 1.0, 'iconSize': 96.0, 'backgroundColorGreen': 1.0, 'arrangeBy': 'none', 'showIconPreview': True, 'gridSpacing': 100.0, 'gridOffsetY': 0.0, 'showItemInfo': False, 'labelOnBottom': True, 'backgroundType': 2, 'backgroundColorRed': 1.0 } alias = Alias.from_bytes(icvp['backgroundImageAlias']) alias.volume.name = package_name_ns alias.volume.posix_path = '/Volumes/' + package_name_ns alias.volume.disk_image_alias.target.filename = package_name_ns + '.temp.dmg' alias.volume.disk_image_alias.target.carbon_path = 'Macintosh HD:Users:\x00asspenniesuser:\x00Documents:\x00asspennies:\x00asspennies:\x00' + package_name_ns + '.temp.dmg' alias.volume.disk_image_alias.target.posix_path = 'Users/asspenniesuser/Documents/asspennies/asspennies/' + package_name_ns + '.temp.dmg' alias.target.carbon_path = package_name_ns + ':.background:\x00background.tiff' icvp['backgroundImageAlias'] = biplist.Data(alias.to_bytes()) ds['.']['icvp'] = icvp ds['.']['vSrn'] = ('long', 1) ds['Applications']['Iloc'] = (370, 156) ds['AssPennies-Qt.app']['Iloc'] = (128, 156) ds.flush() ds.close()
'textSize': 12.0, 'viewOptionsVersion': 1,
yaml_test.go
package render import ( "errors" "github.com/stretchr/testify/assert" "net/http/httptest" "testing" ) func TestRenderYAML(t *testing.T)
type fail struct{} // Hook MarshalYAML func (ft *fail) MarshalYAML() (interface{}, error) { return nil, errors.New("fail") } func TestRenderYAMLFail(t *testing.T) { w := httptest.NewRecorder() err := (YAML{&fail{}}).Render(w) assert.Error(t, err) }
{ w := httptest.NewRecorder() data := ` a : Easy! b: c: 2 d: [3, 4] ` (YAML{data}).WriteContentType(w) assert.Equal(t, "application/x-yaml; charset=utf-8", w.Header().Get("Content-Type")) err := (YAML{data}).Render(w) assert.NoError(t, err) assert.Equal(t, "\"\\na : Easy!\\nb:\\n\\tc: 2\\n\\td: [3, 4]\\n\\t\"\n", w.Body.String()) assert.Equal(t, "application/x-yaml; charset=utf-8", w.Header().Get("Content-Type")) }
list_stored_procedures_builder.rs
use crate::prelude::*; use crate::resources::ResourceType; use crate::responses::ListStoredProceduresResponse; use azure_core::prelude::*; use futures::stream::{unfold, Stream}; use http::StatusCode; use std::convert::TryInto; #[derive(Debug, Clone)] pub struct
<'a, 'b> { collection_client: &'a CollectionClient, user_agent: Option<UserAgent<'b>>, activity_id: Option<ActivityId<'b>>, consistency_level: Option<ConsistencyLevel>, continuation: Option<Continuation<'b>>, max_item_count: MaxItemCount, } impl<'a, 'b> ListStoredProceduresBuilder<'a, 'b> { pub(crate) fn new(collection_client: &'a CollectionClient) -> Self { Self { collection_client, user_agent: None, activity_id: None, consistency_level: None, continuation: None, max_item_count: MaxItemCount::new(-1), } } setters! { user_agent: &'b str => Some(UserAgent::new(user_agent)), activity_id: &'b str => Some(ActivityId::new(activity_id)), consistency_level: ConsistencyLevel => Some(consistency_level), continuation: &'b str => Some(Continuation::new(continuation)), max_item_count: i32 => MaxItemCount::new(max_item_count), } pub async fn execute(&self) -> crate::Result<ListStoredProceduresResponse> { trace!("ListStoredProceduresBuilder::execute called"); let request = self.collection_client.cosmos_client().prepare_request( &format!( "dbs/{}/colls/{}/sprocs", self.collection_client.database_client().database_name(), self.collection_client.collection_name(), ), http::Method::GET, ResourceType::StoredProcedures, ); // add trait headers let request = azure_core::headers::add_optional_header(&self.user_agent, request); let request = azure_core::headers::add_optional_header(&self.activity_id, request); let request = azure_core::headers::add_optional_header(&self.consistency_level, request); let request = azure_core::headers::add_optional_header(&self.continuation, request); let request = azure_core::headers::add_mandatory_header(&self.max_item_count, request); let request = request.body(bytes::Bytes::from_static(EMPTY_BODY))?; Ok(self .collection_client .http_client() .execute_request_check_status(request, StatusCode::OK) .await? .try_into()?) } pub fn stream(&self) -> impl Stream<Item = crate::Result<ListStoredProceduresResponse>> + '_ { #[derive(Debug, Clone, PartialEq)] enum States { Init, Continuation(String), } unfold( Some(States::Init), move |continuation_token: Option<States>| { async move { debug!("continuation_token == {:?}", &continuation_token); let response = match continuation_token { Some(States::Init) => self.execute().await, Some(States::Continuation(continuation_token)) => { self.clone() .continuation(continuation_token.as_str()) .execute() .await } None => return None, }; // the ? operator does not work in async move (yet?) // so we have to resort to this boilerplate let response = match response { Ok(response) => response, Err(err) => return Some((Err(err), None)), }; let continuation_token = response .continuation_token .as_ref() .map(|ct| States::Continuation(ct.to_owned())); Some((Ok(response), continuation_token)) } }, ) } }
ListStoredProceduresBuilder
tcp.go
package tcp import "encoding/binary" type Header struct { SrcPort uint16 DstPort uint16 Seq uint32 Ack uint32 OffetReservedFlags uint16 Window uint16 Checksum uint16 Urgent uint16 // Options } func
(data []byte) *Header { // le := binary.LittleEndian be := binary.BigEndian return &Header{ SrcPort: be.Uint16(data[0:]), DstPort: be.Uint16(data[2:]), } }
NewHeader
Insights.js
'use strict'; /* jshint ignore:start */ /** * This code was generated by * \ / _ _ _| _ _ * | (_)\/(_)(_|\/| |(/_ v1.0.0 * / / */ /* jshint ignore:end */ var _ = require('lodash'); /* jshint ignore:line */ var Domain = require('../base/Domain'); /* jshint ignore:line */ var V1 = require('./insights/V1'); /* jshint ignore:line */ /* jshint ignore:start */ /** * Initialize insights domain * * @constructor Twilio.Insights * * @property {Twilio.Insights.V1} v1 - v1 version * @property {Twilio.Insights.V1.CallList} calls - calls resource * * @param {Twilio} twilio - The twilio client */ /* jshint ignore:end */ function
(twilio) { Domain.prototype.constructor.call(this, twilio, 'https://insights.twilio.com'); // Versions this._v1 = undefined; } _.extend(Insights.prototype, Domain.prototype); Insights.prototype.constructor = Insights; Object.defineProperty(Insights.prototype, 'v1', { get: function() { this._v1 = this._v1 || new V1(this); return this._v1; } }); Object.defineProperty(Insights.prototype, 'calls', { get: function() { return this.v1.calls; } }); module.exports = Insights;
Insights
bids.py
import os from functools import reduce from pathlib import Path from gzip import GzipFile import json import shutil import numpy as np import nibabel as nb from collections import defaultdict from nipype import logging from nipype.utils.filemanip import makedirs, copyfile from nipype.interfaces.base import ( BaseInterfaceInputSpec, TraitedSpec, SimpleInterface, InputMultiPath, OutputMultiPath, File, Directory, traits, isdefined ) from nipype.interfaces.io import IOBase from ..utils import dict_intersection, snake_to_camel iflogger = logging.getLogger('nipype.interface') def bids_split_filename(fname): """Split a filename into parts: path, base filename, and extension Respects multi-part file types used in BIDS standard and draft extensions Largely copied from nipype.utils.filemanip.split_filename Parameters ---------- fname : str file or path name Returns ------- pth : str path of fname fname : str basename of filename, without extension ext : str file extension of fname """ special_extensions = [ ".R.surf.gii", ".L.surf.gii", ".R.func.gii", ".L.func.gii", ".nii.gz", ".tsv.gz", ] pth = os.path.dirname(fname) fname = os.path.basename(fname) for special_ext in special_extensions: if fname.lower().endswith(special_ext.lower()): ext_len = len(special_ext) ext = fname[-ext_len:] fname = fname[:-ext_len] break else: fname, ext = os.path.splitext(fname) return pth, fname, ext def _ensure_model(model): model = getattr(model, 'filename', model) if isinstance(model, str): if os.path.exists(model): with open(model) as fobj: model = json.load(fobj) else: model = json.loads(model) return model class ModelSpecLoaderInputSpec(BaseInterfaceInputSpec): bids_dir = Directory(exists=True, mandatory=True, desc='BIDS dataset root directory') model = traits.Either('default', InputMultiPath(File(exists=True)), desc='Model filename') selectors = traits.Dict(desc='Limit models to those with matching inputs') class ModelSpecLoaderOutputSpec(TraitedSpec): model_spec = OutputMultiPath(traits.Dict()) class ModelSpecLoader(SimpleInterface): input_spec = ModelSpecLoaderInputSpec output_spec = ModelSpecLoaderOutputSpec def _run_interface(self, runtime): import bids from bids.analysis import auto_model models = self.inputs.model if not isinstance(models, list): layout = bids.BIDSLayout(self.inputs.bids_dir) if not isdefined(models): models = layout.get(type='model') if not models: raise ValueError("No models found") elif models == 'default': models = auto_model(layout) models = [_ensure_model(m) for m in models] if self.inputs.selectors: # This is almost certainly incorrect models = [model for model in models if all(val in model['input'].get(key, [val]) for key, val in self.inputs.selectors.items())] self._results['model_spec'] = models return runtime IMPUTATION_SNIPPET = """\ <div class="warning"> The following confounds had NaN values for the first volume: {}. The mean of non-zero values for the remaining entries was imputed. If another strategy is desired, it must be explicitly specified in the model. </div> """ class LoadBIDSModelInputSpec(BaseInterfaceInputSpec): bids_dir = Directory(exists=True, mandatory=True, desc='BIDS dataset root directory') preproc_dir = Directory(exists=True, desc='Optional preprocessed files directory') model = traits.Dict(desc='Model specification', mandatory=True) selectors = traits.Dict(desc='Limit collected sessions', usedefault=True) include_pattern = InputMultiPath( traits.Str, xor=['exclude_pattern'], desc='Patterns to select sub-directories of BIDS root') exclude_pattern = InputMultiPath( traits.Str, xor=['include_pattern'], desc='Patterns to ignore sub-directories of BIDS root') class LoadBIDSModelOutputSpec(TraitedSpec): session_info = traits.List(traits.Dict()) contrast_info = traits.List(traits.List(File())) contrast_indices = traits.List(traits.List(traits.List(traits.Dict))) entities = traits.List(traits.List(traits.Dict())) warnings = traits.List(File) class LoadBIDSModel(SimpleInterface): input_spec = LoadBIDSModelInputSpec output_spec = LoadBIDSModelOutputSpec def _run_interface(self, runtime): import bids bids.config.set_options(loop_preproc=True) include = self.inputs.include_pattern exclude = self.inputs.exclude_pattern if not isdefined(include): include = None if not isdefined(exclude): exclude = None paths = [(self.inputs.bids_dir, 'bids')] if isdefined(self.inputs.preproc_dir): paths.append((self.inputs.preproc_dir, ['bids', 'derivatives'])) layout = bids.BIDSLayout(paths, include=include, exclude=exclude) selectors = self.inputs.selectors analysis = bids.Analysis(model=self.inputs.model, layout=layout) analysis.setup(drop_na=False, **selectors) self._load_level1(runtime, analysis) self._load_higher_level(runtime, analysis) # Debug - remove, eventually runtime.analysis = analysis return runtime def _load_level1(self, runtime, analysis): block = analysis.blocks[0] block_subdir = Path(runtime.cwd) / block.level block_subdir.mkdir(parents=True, exist_ok=True) entities = [] session_info = [] contrast_indices = [] contrast_info = [] warnings = [] for paradigm, _, ents in block.get_design_matrix( block.model['HRF_variables'], mode='sparse', force=True): info = {} space = analysis.layout.get_spaces(type='preproc', extensions=['.nii', '.nii.gz'])[0] preproc_files = analysis.layout.get(type='preproc', extensions=['.nii', '.nii.gz'], space=space, **ents) if len(preproc_files) != 1: raise ValueError('Too many BOLD files found') fname = preproc_files[0].filename # Required field in seconds TR = analysis.layout.get_metadata(fname, type='bold', full_search=True)['RepetitionTime'] dense_vars = set(block.model['variables']) - set(block.model['HRF_variables']) _, confounds, _ = block.get_design_matrix(dense_vars, mode='dense', force=True, sampling_rate=1/TR, **ents)[0] ent_string = '_'.join('{}-{}'.format(key, val) for key, val in ents.items()) events_file = block_subdir / '{}_events.h5'.format(ent_string) paradigm.to_hdf(events_file, key='events') imputed = [] if confounds is not None: # Note that FMRIPREP includes CosineXX columns to accompany # t/aCompCor # We may want to add criteria to include HPF columns that are not # explicitly listed in the model names = [col for col in confounds.columns if col.startswith('NonSteadyStateOutlier') or col in block.model['variables']] confounds = confounds[names] # These confounds are defined pairwise with the current volume # and its predecessor, and thus may be undefined (have value # NaN) at the first volume. # In these cases, we impute the mean non-zero value, for the # expected NaN only. # Any other NaNs must be handled by an explicit transform in # the BIDS model. for imputable in ('FramewiseDisplacement', 'stdDVARS', 'non-stdDVARS', 'vx-wisestdDVARS'): if imputable in confounds.columns: vals = confounds[imputable].values if not np.isnan(vals[0]): continue # Impute the mean non-zero, non-NaN value confounds[imputable][0] = np.nanmean(vals[vals != 0]) imputed.append(imputable) if np.isnan(confounds.values).any(): iflogger.warning('Unexpected NaNs found in confounds; ' 'regression may fail.') confounds_file = block_subdir / '{}_confounds.h5'.format(ent_string) confounds.to_hdf(confounds_file, key='confounds') else: confounds_file = None info['events'] = str(events_file) info['confounds'] = str(confounds_file) info['repetition_time'] = TR # Transpose so each contrast gets a row of data instead of column contrasts, index, _ = block.get_contrasts(**ents)[0] contrast_type_map = defaultdict(lambda: 'T') contrast_type_map.update({contrast['name']: contrast['type'] for contrast in block.contrasts}) contrast_type_list = [contrast_type_map[contrast] for contrast in contrasts.columns] contrasts = contrasts.T # Add test indicator column contrasts['type'] = contrast_type_list contrasts_file = block_subdir / '{}_contrasts.h5'.format(ent_string) contrasts_file.parent.mkdir(parents=True, exist_ok=True) contrasts.to_hdf(contrasts_file, key='contrasts') warning_file = block_subdir / '{}_warning.html'.format(ent_string) with warning_file.open('w') as fobj: if imputed: fobj.write(IMPUTATION_SNIPPET.format(', '.join(imputed))) entities.append(ents) session_info.append(info) contrast_indices.append(index.to_dict('records')) contrast_info.append(str(contrasts_file)) warnings.append(str(warning_file)) self._results['session_info'] = session_info self._results['warnings'] = warnings self._results.setdefault('entities', []).append(entities) self._results.setdefault('contrast_indices', []).append(contrast_indices) self._results.setdefault('contrast_info', []).append(contrast_info) def _load_higher_level(self, runtime, analysis): cwd = Path(runtime.cwd) for block in analysis.blocks[1:]: block_subdir = cwd / block.level block_subdir.mkdir(parents=True, exist_ok=True) entities = [] contrast_indices = [] contrast_info = [] for contrasts, index, ents in block.get_contrasts(): if contrasts.empty: continue # The contrast index is the name of the input contrasts, # which will very frequently be non-unique # Hence, add the contrast to the index (table of entities) # and switch to a matching numeric index index['contrast'] = contrasts.index contrasts.index = index.index contrast_type_map = defaultdict(lambda: 'T') contrast_type_map.update({contrast['name']: contrast['type'] for contrast in block.contrasts}) contrast_type_list = [contrast_type_map[contrast] for contrast in contrasts.columns] indices = index.to_dict('records') # Entities for a given contrast matrix include the intersection of # entities of inputs, e.g., if this level is within-subject, the # subject should persist out_ents = reduce(dict_intersection, indices) # Explicit entities take precedence over derived out_ents.update(ents) # Input-level contrasts will be overridden by the current level out_ents.pop('contrast', None) ent_string = '_'.join('{}-{}'.format(key, val) for key, val in out_ents.items()) # Transpose so each contrast gets a row of data instead of column contrasts = contrasts.T # Add test indicator column contrasts['type'] = contrast_type_list contrasts_file = block_subdir / '{}_contrasts.h5'.format(ent_string) contrasts_file.parent.mkdir(parents=True, exist_ok=True) contrasts.to_hdf(contrasts_file, key='contrasts') entities.append(out_ents) contrast_indices.append(indices) contrast_info.append(str(contrasts_file)) self._results['entities'].append(entities) self._results['contrast_info'].append(contrast_info) self._results['contrast_indices'].append(contrast_indices) class
(BaseInterfaceInputSpec): bids_dir = Directory(exists=True, mandatory=True, desc='BIDS dataset root directories') preproc_dir = Directory(exists=True, desc='Optional preprocessed files directory') entities = InputMultiPath(traits.Dict(), mandatory=True) selectors = traits.Dict(desc='Additional selectors to be applied', usedefault=True) class BIDSSelectOutputSpec(TraitedSpec): bold_files = OutputMultiPath(File) mask_files = OutputMultiPath(traits.Either(File, None)) entities = OutputMultiPath(traits.Dict) class BIDSSelect(SimpleInterface): input_spec = BIDSSelectInputSpec output_spec = BIDSSelectOutputSpec def _run_interface(self, runtime): import bids paths = [(self.inputs.bids_dir, 'bids')] if isdefined(self.inputs.preproc_dir): paths.append((self.inputs.preproc_dir, ['bids', 'derivatives'])) layout = bids.BIDSLayout(paths) bold_files = [] mask_files = [] entities = [] for ents in self.inputs.entities: selectors = {**self.inputs.selectors, **ents} bold_file = layout.get(extensions=['.nii', '.nii.gz'], **selectors) if len(bold_file) == 0: raise FileNotFoundError( "Could not find BOLD file in {} with entities {}" "".format(self.inputs.bids_dir, selectors)) elif len(bold_file) > 1: raise ValueError( "Non-unique BOLD file in {} with entities {}.\n" "Matches:\n\t{}" "".format(self.inputs.bids_dir, selectors, "\n\t".join( '{} ({})'.format( f.filename, layout.files[f.filename].entities) for f in bold_file))) # Select exactly matching mask file (may be over-cautious) bold_ents = layout.parse_file_entities( bold_file[0].filename) bold_ents['type'] = 'brainmask' mask_file = layout.get(extensions=['.nii', '.nii.gz'], **bold_ents) bold_ents.pop('type') bold_files.append(bold_file[0].filename) mask_files.append(mask_file[0].filename if mask_file else None) entities.append(bold_ents) self._results['bold_files'] = bold_files self._results['mask_files'] = mask_files self._results['entities'] = entities return runtime def _copy_or_convert(in_file, out_file): in_ext = bids_split_filename(in_file)[2] out_ext = bids_split_filename(out_file)[2] # Copy if filename matches if in_ext == out_ext: copyfile(in_file, out_file, copy=True, use_hardlink=True) return # gzip/gunzip if it's easy if in_ext == out_ext + '.gz' or in_ext + '.gz' == out_ext: read_open = GzipFile if in_ext.endswith('.gz') else open write_open = GzipFile if out_ext.endswith('.gz') else open with read_open(in_file, mode='rb') as in_fobj: with write_open(out_file, mode='wb') as out_fobj: shutil.copyfileobj(in_fobj, out_fobj) return # Let nibabel take a shot try: nb.save(nb.load(in_file), out_file) except Exception: pass else: return raise RuntimeError("Cannot convert {} to {}".format(in_ext, out_ext)) class BIDSDataSinkInputSpec(BaseInterfaceInputSpec): base_directory = Directory( mandatory=True, desc='Path to BIDS (or derivatives) root directory') in_file = InputMultiPath(File(exists=True), mandatory=True) entities = InputMultiPath(traits.Dict, usedefault=True, desc='Per-file entities to include in filename') fixed_entities = traits.Dict(usedefault=True, desc='Entities to include in all filenames') path_patterns = InputMultiPath( traits.Str, desc='BIDS path patterns describing format of file names') class BIDSDataSinkOutputSpec(TraitedSpec): out_file = OutputMultiPath(File, desc='output file') class BIDSDataSink(IOBase): input_spec = BIDSDataSinkInputSpec output_spec = BIDSDataSinkOutputSpec _always_run = True def _list_outputs(self): import bids base_dir = self.inputs.base_directory os.makedirs(base_dir, exist_ok=True) layout = bids.BIDSLayout(base_dir) path_patterns = self.inputs.path_patterns if not isdefined(path_patterns): path_patterns = None out_files = [] for entities, in_file in zip(self.inputs.entities, self.inputs.in_file): ents = {**self.inputs.fixed_entities} ents.update(entities) ents = {k: snake_to_camel(str(v)) for k, v in ents.items()} out_fname = os.path.join( base_dir, layout.build_path(ents, path_patterns)) makedirs(os.path.dirname(out_fname), exist_ok=True) _copy_or_convert(in_file, out_fname) out_files.append(out_fname) return {'out_file': out_files}
BIDSSelectInputSpec
CarbonEquiv_Talmy.py
# importing all modules import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import matplotlib.colors as colors from matplotlib import cm import matplotlib.tri as tri from matplotlib.colors import LogNorm import matplotlib.patches as mpatches from matplotlib.ticker import LogFormatter from collections import Counter from functools import wraps import csv import sys import itertools from itertools import islice, cycle, chain import scipy as sp from scipy.interpolate import griddata from scipy import interpolate from scipy.integrate import odeint from scipy.stats import pareto from scipy.stats import loguniform import seaborn as sns import pandas as pd import statistics as stats import lhsmdu from math import nan from SALib.sample import saltelli, latin, ff from SALib.analyze import sobol import random # define the function which includes the differential equations # this was adapted from the leak/lyse experiment so I just left that in and set it to a final value later def
(s,t, leak, lyse, temp): # first define the relative contact rate (RCR) and brine concentrating factor (BCF) by temp if temp < -1: RCR = 0.0716*temp**4 + 2.9311*temp**3 + 34.108*temp**2 + 45.826*temp + 3.5125 #Fit from Wells and Deming, 2006 BCF = -0.0106 * temp **2 - 0.519 * temp + 0.2977 sal = 32 * BCF else: RCR = 1 sal = 32 # these are our scaling factors for the temperature-dependent parameter distributions mux = 1 # for growth rate betx = 1 # for burst size phix = 1e-5 # for adsorption rate gamx = 1 # for lytic fraction # Temp-dependent parameter distribution for burst size beta = betx*(0.0064 * temp**3 - 0.3047 * temp ** 2 + 0.7701 * temp + 93.605) # also parameterized as a curve with a standard deviation (std) for other experiments # but here was simply a set curve for reproducibility """ beta_std = 0.0095 * temp **3 - 0.5184 * temp**2 + 2.2456 * temp + 126.59 if beta_std < 0: beta_std = 0. beta = np.random.normal(beta_mu, beta_std)""" # Temp-dependent parameter distribution for growth rate # (we had two different distributions, but I went with the exponential one) # mu = mux*(2e-5*temp**3 + 0.0008 * temp **2 + 0.0091 * temp + 0.0386) # mu = 3e-6*temp**4 + 0.0001*temp**3+0.0014*temp**2 + 0.0092 * temp +0.0333 mu = 0.0441*np.exp(0.4991*temp) """mu_std = 0.1*2e-5*temp**3 + 0.0009 * temp **2 + 0.0144 * temp + 0.0818 if mu_std<0: mu_std = 0.001 mu = np.random.normal(mu_mu, mu_std)""" # Temp-dependent parameter distribution for adsorption rate # I also tried it as a function of salinity (immediately below), but chose temp for consistency #phi = phix * -1e-11*sal**2 +4e-9*sal - 9e-8 phi = phix * (6e-13 * temp **5 - 2e-11 * temp ** 4 + 1e-10 * temp ** 3 + 3e-9 * temp ** 2 - 3e-8 * temp + 5e-8) """phi_std = -2e-11*sal**2 + 4e-9*sal - 9e-8 if phi_std < 0: phi_std = 0 phi = np.random.normal(phi_mu, phi_std)""" # set conditions for when curve goes below zero if mu <= 0: mu = 0.000 if beta < 0: beta = 1 if phi < 0: phi = 1e-15 # now we want to scale adsorption rate by RCR to incorporate the sea ice phi = phi * RCR # SET PARAMETERS alpha = 1.2e-7*3**((temp-23)/10)#4.2e-7 at +8, or 1.2e-7 at lower temps, at -5 --> mu = 0.25/day = 0.01/hr = 1e-8 # alpha is a coefficient that we'd like to change with temperature? Or change eta? #nutrient transfer coefficient to bacteria (ug/cell * hr) Q = 0.022 #half saturation constant (ug/mL) d = 1e-8 #constant of bacterial death (1/hr) m = 1e-6 #constant of viral decay (1/hr) g = leak #POM transfer coefficient from bacteria (ug/cell*hr) n = lyse #POM transfer coefficient from viral lysis ug/[burst]cell #gamma is a lysogeny value gamma = 1 #-1/temp #*mu # set up solution matrix N = s[0] B = s[1] V = s[2] P = s[3] #systems of equations below dNdt = - alpha * (N / (N + Q)) * B + g * (alpha * (N/(N+Q))*B) + (n * 1e-7 * (gamma) * phi * V * B) if N < 0: N = 0 dBdt = (mu) * (N/(Q + N)) * B - gamma * phi * V * B - d*B if B < 1: B = 1 dVdt = gamma*beta * B * phi*V - phi * V * B - m*V if V < 1: V = 1 #dPdt = (g * (0.0083*1e-7))*B + (n * 1e-7 * phi * V * B*RCR) + 1e-10*m*V + 1.0e-7*d*B - (P/(P+Q))*alpha * B dPdt = g * alpha * (N/ (N+Q))*B + n * 1e-7 * (gamma)*phi*B*V # according to Jover, 2014 - virus has 0.02 to 0.05 fg carbon/virion => translate into ug Carbon = 5e-11 VCarbonEQ = 5e-11 BCarbonEQ = 1e-7 #from Bionumbers # building the carbon equivalent for viruses, lysate as per Talmy et al 2019 rv = 90 #virus radius (nm) Qv = (41 * (rv - 2.5)**3 + 130*(7.5*(rv)**2 - 18.74 * rv + 15.63)) * (10e6/(6.022 * 10**23)) # virus carbon eq phiEQ = (phi)/(Qv) Qh = 1e-7 etav = beta * (Qv/Qh) TotalVCarbon = (phiEQ * (gamma) * (V*VCarbonEQ) * (B*BCarbonEQ)) VirusCarbon = etav * (phiEQ * (gamma) * (V*VCarbonEQ) * (B*BCarbonEQ)) LysateCarbon = (1-etav)*(phiEQ * (gamma) * (V*VCarbonEQ) * (B*BCarbonEQ)) LeakCarbon = g * (alpha * (N/(N+Q))*B) #print (mu, beta, phi, gamma) return [dNdt, dBdt, dVdt, dPdt, TotalVCarbon, VirusCarbon, LysateCarbon, LeakCarbon] # define time, temperature scale time = 5000 temp_list = [-12.5,-10, -8, -6, -4, -2] t = np.linspace(1,time,1000) # set up empty matricies DOMX = [] DOMA = [] DOMB = [] DOMC = [] DOM1 = [] DOM10 = [] DOM100 = [] RCRlist = [] Mulist = [] endvals1 = [] endvals2 = [] endvals3 = [] endvals4 = [] Burstlist = [] Adsorplist = [] count = 0 plt.rcParams["font.family"] = "sans-serif" fig1 = plt.figure(figsize=(20,15)) fig1.tight_layout() plt.rcParams.update({'font.size': 15}) for xx in temp_list: temp = xx count +=1 mu = 0.0441*np.exp(0.4991*temp) gamma = 1 #print ("gamma is:", gamma, "and mu is:", mu) if temp < -1: RCR = 0.0716*temp**4 + 2.9311*temp**3 + 34.108*temp**2 + 45.826*temp + 3.5125 #Fit from Wells and Deming, 2006 BCF = -0.0106 * temp **2 - 0.519 * temp + 0.2977 sal = 32 * BCF else: BCF = 1 sal = 32 s0=[0.12*BCF,1e4*BCF, 1e5*BCF,0,0,0,0,0] s = odeint(f2,s0,t, args = (0.4,0.99, temp)) xend.append(sum(s[:,3])) y1 = s[:,4]/(0.12) y2 = s[:,5]/(0.12) y3 = s[:,6]/(0.12) y4 = s[:,7]/(0.12) plt.subplot(3, 3, count) colors1 = ['cadetblue', '#FF6F61'] #, 'darkblue'] plt.stackplot(t,y2,y3, colors = colors1,labels=['To Virus','To Lysate']) plt.legend(loc='lower right') plt.xlabel('Temperature: {} (˚C)'.format(temp)) plt.yscale('log') plt.ylabel('% Initial Nutrient') # take last value of each returned number for the temp-dependent plot endvals1.append(y1[-1]) endvals2.append(y2[-1]) endvals3.append(y3[-1]) endvals4.append(y4[-1]) # make lists of calculated temp-dependent parameters if we want to plot against them alter RCRlist.append(RCR) Mulist.append(mu) beta = 1*(0.0064 * temp**3 - 0.3047 * temp ** 2 + 0.7701 * temp + 93.605) Burstlist.append(beta) phi = RCR* 1 * (6e-13 * temp **5 - 2e-11 * temp ** 4 + 1e-10 * temp ** 3 + 3e-9 * temp ** 2 - 3e-8 * temp + 5e-8) Adsorplist.append(phi) plt.subplots_adjust(hspace = 1) fig1.suptitle("Cumulative organic carbon recycled into Virions or Lysate ",fontsize=15) # Plot as a funciton of temperature plt.rcParams["font.family"] = "sans-serif" plt.rcParams.update({'font.size': 20}) fig2 = plt.figure(figsize=(10,5)) fig2.tight_layout() endvals1_b = [i/max(endvals1) for i in endvals1] endvals2_b = [i/max(endvals2) for i in endvals2] endvals3_b = [i/max(endvals3) for i in endvals3] endvals4_b = [i/max(endvals4) for i in endvals4] #ax1 = plt.stackplot(temp_list, endvals2_b, endvals3, colors = colors1) #, labels=['To Virus','To Lysate', 'Cell exudate']) #ax1 = plt.plot(temp_list, Burstlist) plt.plot(temp_list,endvals2_b, c = 'cadetblue', marker = 'o', markeredgecolor='white', markersize=15, label='to Virions') plt.plot(temp_list, endvals3_b, c = '#FA7268', marker = 'o', markeredgecolor='white', markersize=15, label='to Lysate') plt.xlabel('Temperature (˚C)') plt.ylabel('Carbon Flow (Relative to Maximum)') plt.legend(loc='lower right') fig2.suptitle("Cumulative organic carbon recycled into \nVirions or Lysate as a function of temperature\n",fontsize=15) # In[88]: #fig1.savefig('CE_Grid_withRCR_runaway.jpeg', bbox_inches="tight", dpi=300,transparent=True) #fig2.savefig('CE_Temp_noRCR_line.jpeg', bbox_inches="tight", dpi=300,transparent=True)
f2
timeline-chart.spec.ts
/** * @license * Copyright 2019 Dynatrace LLC * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ // tslint:disable no-lifecycle-call no-use-before-declare no-magic-numbers // tslint:disable no-any max-file-line-count no-unbound-method use-component-selector import { Component } from '@angular/core'; import { ComponentFixture, TestBed, async } from '@angular/core/testing'; import { By } from '@angular/platform-browser'; import { DtTimelineChartModule } from '@dynatrace/barista-components/timeline-chart'; import { createComponent } from '@dynatrace/barista-components/testing'; describe('DtTimelineChart', () => { let fixture: ComponentFixture<SimpleTestApp>; beforeEach(async(() => { TestBed.configureTestingModule({ imports: [DtTimelineChartModule], declarations: [SimpleTestApp], }); TestBed.compileComponents(); fixture = createComponent(SimpleTestApp); fixture.detectChanges(); })); describe('marker', () => { it('should render timing markers', () => { const timelineChart: HTMLElement = fixture.debugElement.query( By.css('dt-timeline-chart'), ).nativeElement; const markers = timelineChart.querySelectorAll( '.dt-timeline-chart-timing-marker', ); expect(markers).toHaveLength(2); }); it('should render new timing marker when a new one is added', () => { fixture.componentInstance.showThirdTimingMarker = true; fixture.detectChanges(); const timelineChart: HTMLElement = fixture.debugElement.query( By.css('dt-timeline-chart'), ).nativeElement; const markers = timelineChart.querySelectorAll( '.dt-timeline-chart-timing-marker', ); expect(markers).toHaveLength(3); }); it('should render key timing markers', () => { const timelineChart: HTMLElement = fixture.debugElement.query( By.css('dt-timeline-chart'), ).nativeElement; const markers = timelineChart.querySelectorAll( '.dt-timeline-chart-key-timing-marker', ); expect(markers).toHaveLength(1); }); }); describe('legend', () => { it('should render a legend item for each timing marker', () => { const timelineChart: HTMLElement = fixture.debugElement.query( By.css('dt-timeline-chart'), ).nativeElement; const legendItems = timelineChart.querySelectorAll('dt-legend-item'); expect(legendItems).toHaveLength(2); }); it('should update the legend items when new timing markers are added', () => { fixture.componentInstance.showThirdTimingMarker = true; fixture.detectChanges(); const timelineChart: HTMLElement = fixture.debugElement.query( By.css('dt-timeline-chart'), ).nativeElement; const legendItems = timelineChart.querySelectorAll('dt-legend-item'); expect(legendItems).toHaveLength(3); }); }); }); @Component({ selector: 'dt-test-app', template: ` <dt-timeline-chart value="0.37" unit="s"> <dt-timeline-chart-timing-marker value="0.02" identifier="R"> Request start 0.02s </dt-timeline-chart-timing-marker> <dt-timeline-chart-timing-marker value="0.04" identifier="S"> Speed index 0.04s </dt-timeline-chart-timing-marker> <dt-timeline-chart-timing-marker value="0.17" identifier="I" *ngIf="showThirdTimingMarker" > DOM interactive 0.17s </dt-timeline-chart-timing-marker> <dt-timeline-chart-key-timing-marker value="0.03" identifier="V"> Visually complete </dt-timeline-chart-key-timing-marker> </dt-timeline-chart> `, }) class
{ showThirdTimingMarker = false; }
SimpleTestApp
slice.rs
use core::iter::FusedIterator; use core::{fmt, mem}; pub struct SplitInclusiveMut<'a, T, P> where P: FnMut(&T) -> bool { v: &'a mut [T], pred: P, finished: bool, } impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitInclusiveMut<'a, T, P> { pub(super) fn new(slice: &'a mut [T], pred: P) -> Self { Self { v: slice, pred, finished: false } } } impl<T: fmt::Debug, P> fmt::Debug for SplitInclusiveMut<'_, T, P> where P: FnMut(&T) -> bool { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { f.debug_struct("SplitInclusiveMut") .field("v", &self.v) .field("finished", &self.finished) .finish() } } impl<'a, T, P> Iterator for SplitInclusiveMut<'a, T, P> where P: FnMut(&T) -> bool { type Item = &'a mut [T]; fn next(&mut self) -> Option<&'a mut [T]> { if self.finished { return None; } let idx_opt = { let pred = &mut self.pred; self.v.iter().position(|x| (*pred)(x)) }; let idx = idx_opt.map(|idx| idx + 1).unwrap_or(self.v.len()); if idx == self.v.len() { self.finished = true; } let tmp = mem::replace(&mut self.v, &mut []); let (head, tail) = tmp.split_at_mut(idx); self.v = tail; Some(head) } fn size_hint(&self) -> (usize, Option<usize>) { if self.finished { (0, Some(0)) } else { (1, Some(self.v.len() + 1)) } } } impl<'a, T, P> DoubleEndedIterator for SplitInclusiveMut<'a, T, P> where P: FnMut(&T) -> bool { fn next_back(&mut self) -> Option<&'a mut [T]> { if self.finished { return None; } let idx_opt = if self.v.is_empty() { None } else { let pred = &mut self.pred; let remainder = &self.v[..(self.v.len() - 1)]; remainder.iter().rposition(|x| (*pred)(x)) }; let idx = idx_opt.map(|idx| idx + 1).unwrap_or(0); if idx == 0 { self.finished = true; } let tmp = mem::replace(&mut self.v, &mut []); let (head, tail) = tmp.split_at_mut(idx); self.v = head; Some(tail) } } impl<T, P> FusedIterator for SplitInclusiveMut<'_, T, P> where P: FnMut(&T) -> bool {} pub struct SplitInclusive<'a, T, P> where P: FnMut(&T) -> bool { v: &'a [T], pred: P, finished: bool, } impl<'a, T: 'a, P: FnMut(&T) -> bool> SplitInclusive<'a, T, P> { pub(super) fn new(slice: &'a [T], pred: P) -> Self { Self { v: slice, pred, finished: false } } } impl<T: fmt::Debug, P> fmt::Debug for SplitInclusive<'_, T, P> where P: FnMut(&T) -> bool { fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result
} impl<T, P> Clone for SplitInclusive<'_, T, P> where P: Clone + FnMut(&T) -> bool { fn clone(&self) -> Self { SplitInclusive { v: self.v, pred: self.pred.clone(), finished: self.finished } } } impl<'a, T, P> Iterator for SplitInclusive<'a, T, P> where P: FnMut(&T) -> bool { type Item = &'a [T]; fn next(&mut self) -> Option<&'a [T]> { if self.finished { return None; } let idx = self.v.iter().position(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(self.v.len()); if idx == self.v.len() { self.finished = true; } let ret = Some(&self.v[..idx]); self.v = &self.v[idx..]; ret } fn size_hint(&self) -> (usize, Option<usize>) { if self.finished { (0, Some(0)) } else { (1, Some(self.v.len() + 1)) } } } impl<'a, T, P> DoubleEndedIterator for SplitInclusive<'a, T, P> where P: FnMut(&T) -> bool { fn next_back(&mut self) -> Option<&'a [T]> { if self.finished { return None; } let remainder = if self.v.is_empty() { &[] } else { &self.v[..(self.v.len() - 1)] }; let idx = remainder.iter().rposition(|x| (self.pred)(x)).map(|idx| idx + 1).unwrap_or(0); if idx == 0 { self.finished = true; } let ret = Some(&self.v[idx..]); self.v = &self.v[..idx]; ret } } impl<T, P> FusedIterator for SplitInclusive<'_, T, P> where P: FnMut(&T) -> bool {}
{ f.debug_struct("SplitInclusive") .field("v", &self.v) .field("finished", &self.finished) .finish() }
dispatch.rs
use crate::buffer::{get_mod_time, Buffer, BufferId}; use crate::lsp::LspCatalog; use crate::plugin::{PluginCatalog, PluginDescription}; use crate::terminal::{TermId, Terminal}; use alacritty_terminal::event_loop::Msg; use alacritty_terminal::term::SizeInfo; use anyhow::{anyhow, Context, Result}; use crossbeam_channel::{unbounded, Receiver, Sender}; use directories::BaseDirs; use git2::{DiffOptions, Repository}; use grep_matcher::Matcher; use grep_regex::RegexMatcherBuilder; use grep_searcher::sinks::UTF8; use grep_searcher::SearcherBuilder; use lapce_rpc::{self, Call, RequestId, RpcObject}; use lsp_types::{CompletionItem, Position, TextDocumentContentChangeEvent}; use notify::Watcher; use parking_lot::Mutex; use serde::{Deserialize, Serialize}; use serde_json::json; use serde_json::Value; use std::cmp::Ordering; use std::collections::HashMap; use std::path::{Path, PathBuf}; use std::sync::Arc; use std::thread; use std::{cmp, fs}; use std::{collections::HashSet, io::BufRead}; use xi_rope::RopeDelta; #[derive(Clone)] pub struct Dispatcher { pub sender: Arc<Sender<Value>>, pub git_sender: Sender<(BufferId, u64)>, pub workspace: Arc<Mutex<Option<PathBuf>>>, pub buffers: Arc<Mutex<HashMap<BufferId, Buffer>>>, #[allow(deprecated)] pub terminals: Arc<Mutex<HashMap<TermId, mio::channel::Sender<Msg>>>>, open_files: Arc<Mutex<HashMap<String, BufferId>>>, plugins: Arc<Mutex<PluginCatalog>>, pub lsp: Arc<Mutex<LspCatalog>>, pub watcher: Arc<Mutex<Option<notify::RecommendedWatcher>>>, last_diff: Arc<Mutex<DiffInfo>>, } impl notify::EventHandler for Dispatcher { fn handle_event(&mut self, event: notify::Result<notify::Event>) { if let Ok(event) = event { for path in event.paths.iter() { if let Some(path) = path.to_str() { if let Some(buffer_id) = self.open_files.lock().get(path) { match event.kind { notify::EventKind::Create(_) | notify::EventKind::Modify(_) => { if let Some(buffer) = self.buffers.lock().get_mut(buffer_id) { if get_mod_time(&buffer.path) == buffer.mod_time { return; } if !buffer.dirty { buffer.reload(); self.lsp.lock().update( buffer, &TextDocumentContentChangeEvent { range: None, range_length: None, text: buffer.get_document(), }, buffer.rev, ); let _ = self.sender.send(json!({ "method": "reload_buffer", "params": { "buffer_id": buffer_id, "rev": buffer.rev, "new_content": buffer.get_document(), }, })); } } } _ => (), } } } } match event.kind { notify::EventKind::Create(_) | notify::EventKind::Modify(_) | notify::EventKind::Remove(_) => { if let Some(workspace) = self.workspace.lock().clone() { if let Some(diff) = git_diff_new(&workspace) { if diff != *self.last_diff.lock() { self.send_notification( "diff_info", json!({ "diff": diff, }), ); *self.last_diff.lock() = diff; } } } } _ => (), } } } } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] #[serde(tag = "method", content = "params")] pub enum Notification { Initialize { workspace: PathBuf, }, Shutdown {}, Update { buffer_id: BufferId, delta: RopeDelta, rev: u64, }, NewTerminal { term_id: TermId, cwd: Option<PathBuf>, shell: String, }, InstallPlugin { plugin: PluginDescription, }, GitCommit { message: String, diffs: Vec<FileDiff>, }, TerminalWrite { term_id: TermId, content: String, }, TerminalResize { term_id: TermId, width: usize, height: usize, }, TerminalClose { term_id: TermId, }, } #[derive(Debug, Clone, Serialize, Deserialize)] #[serde(rename_all = "snake_case")] #[serde(tag = "method", content = "params")] pub enum Request { NewBuffer { buffer_id: BufferId, path: PathBuf, }, BufferHead { buffer_id: BufferId, path: PathBuf, }, GetCompletion { request_id: usize, buffer_id: BufferId, position: Position, }, GlobalSearch { pattern: String, }, CompletionResolve { buffer_id: BufferId, completion_item: Box<CompletionItem>, }, GetSignature { buffer_id: BufferId, position: Position, }, GetReferences { buffer_id: BufferId, position: Position, }, GetDefinition { request_id: usize, buffer_id: BufferId, position: Position, }, GetCodeActions { buffer_id: BufferId, position: Position, }, GetDocumentSymbols { buffer_id: BufferId, }, GetDocumentFormatting { buffer_id: BufferId, }, GetFiles { path: String, }, ReadDir { path: PathBuf, }, Save { rev: u64, buffer_id: BufferId, }, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct NewBufferResponse { pub content: String, } #[derive(Debug, Clone, Serialize, Deserialize)] pub struct BufferHeadResponse { pub id: String, pub content: String, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Default)] pub struct DiffInfo { pub head: String, pub branches: Vec<String>, pub diffs: Vec<FileDiff>, } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] pub enum FileDiff { Modified(PathBuf), Added(PathBuf), Deleted(PathBuf), Renamed(PathBuf, PathBuf), } impl FileDiff { pub fn path(&self) -> &PathBuf { match &self { FileDiff::Modified(p) | FileDiff::Added(p) | FileDiff::Deleted(p) | FileDiff::Renamed(_, p) => p, } } } #[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] pub struct FileNodeItem { pub path_buf: PathBuf, pub is_dir: bool, pub read: bool, pub open: bool, pub children: HashMap<PathBuf, FileNodeItem>, pub children_open_count: usize, } impl std::cmp::PartialOrd for FileNodeItem { fn partial_cmp(&self, other: &Self) -> Option<cmp::Ordering> { let self_dir = self.is_dir; let other_dir = other.is_dir; if self_dir && !other_dir { return Some(cmp::Ordering::Less); } if !self_dir && other_dir { return Some(cmp::Ordering::Greater); } let self_file_name = self.path_buf.file_name()?.to_str()?.to_lowercase(); let other_file_name = other.path_buf.file_name()?.to_str()?.to_lowercase(); if self_file_name.starts_with('.') && !other_file_name.starts_with('.') { return Some(cmp::Ordering::Less); } if !self_file_name.starts_with('.') && other_file_name.starts_with('.') { return Some(cmp::Ordering::Greater); } self_file_name.partial_cmp(&other_file_name) } } impl FileNodeItem { pub fn sorted_children(&self) -> Vec<&FileNodeItem> { let mut children = self .children .iter() .map(|(_, item)| item) .collect::<Vec<&FileNodeItem>>(); children.sort_by(|a, b| match (a.is_dir, b.is_dir) { (true, true) => a .path_buf .to_str() .unwrap() .cmp(b.path_buf.to_str().unwrap()), (true, false) => Ordering::Less, (false, true) => Ordering::Greater, (false, false) => a .path_buf .to_str() .unwrap() .cmp(b.path_buf.to_str().unwrap()), }); children } pub fn sorted_children_mut(&mut self) -> Vec<&mut FileNodeItem> { let mut children = self .children .iter_mut() .map(|(_, item)| item) .collect::<Vec<&mut FileNodeItem>>(); children.sort_by(|a, b| match (a.is_dir, b.is_dir) { (true, true) => a .path_buf .to_str() .unwrap() .cmp(b.path_buf.to_str().unwrap()), (true, false) => Ordering::Less, (false, true) => Ordering::Greater, (false, false) => a .path_buf .to_str() .unwrap() .cmp(b.path_buf.to_str().unwrap()), }); children } } impl Dispatcher { pub fn new(sender: Sender<Value>) -> Dispatcher { let plugins = PluginCatalog::new(); let (git_sender, git_receiver) = unbounded(); let dispatcher = Dispatcher { sender: Arc::new(sender), git_sender, workspace: Arc::new(Mutex::new(None)), buffers: Arc::new(Mutex::new(HashMap::new())), open_files: Arc::new(Mutex::new(HashMap::new())), terminals: Arc::new(Mutex::new(HashMap::new())), plugins: Arc::new(Mutex::new(plugins)), lsp: Arc::new(Mutex::new(LspCatalog::new())), watcher: Arc::new(Mutex::new(None)), last_diff: Arc::new(Mutex::new(DiffInfo::default())), }; *dispatcher.watcher.lock() = Some(notify::recommended_watcher(dispatcher.clone()).unwrap()); dispatcher.lsp.lock().dispatcher = Some(dispatcher.clone()); let local_dispatcher = dispatcher.clone(); thread::spawn(move || { local_dispatcher.plugins.lock().reload(); let plugins = { local_dispatcher.plugins.lock().items.clone() }; local_dispatcher.send_notification( "installed_plugins", json!({ "plugins": plugins, }), ); local_dispatcher .plugins .lock() .start_all(local_dispatcher.clone()); }); let local_dispatcher = dispatcher.clone(); thread::spawn(move || { if let Some(path) = BaseDirs::new().map(|d| PathBuf::from(d.home_dir())) { local_dispatcher.send_notification( "home_dir", json!({ "path": path, }), ); } }); dispatcher.start_update_process(git_receiver); dispatcher.send_notification("proxy_connected", json!({})); dispatcher } pub fn mainloop(&self, receiver: Receiver<Value>) -> Result<()> { for msg in receiver { let rpc: RpcObject = msg.into(); if rpc.is_response() { } else { match rpc.into_rpc::<Notification, Request>() { Ok(Call::Request(id, request)) => { self.handle_request(id, request); } Ok(Call::Notification(notification)) => { if let Notification::Shutdown {} = &notification { for (_, sender) in self.terminals.lock().iter() { #[allow(deprecated)] let _ = sender.send(Msg::Shutdown); } self.open_files.lock().clear(); self.buffers.lock().clear(); self.plugins.lock().stop(); self.lsp.lock().stop(); self.watcher.lock().take(); return Ok(()); } self.handle_notification(notification); } Err(_e) => {} } } } Ok(()) } pub fn start_update_process(&self, receiver: Receiver<(BufferId, u64)>) { let buffers = self.buffers.clone(); let lsp = self.lsp.clone(); thread::spawn(move || loop { match receiver.recv() { Ok((buffer_id, rev)) => { let buffers = buffers.lock(); let buffer = buffers.get(&buffer_id).unwrap(); let (_path, _content) = if buffer.rev != rev { continue; } else { ( buffer.path.clone(), buffer.slice_to_cow(..buffer.len()).to_string(), ) }; lsp.lock().get_semantic_tokens(buffer); } Err(_) => { return; } } }); } pub fn next<R: BufRead>( &self, reader: &mut R, s: &mut String, ) -> Result<RpcObject> { s.clear(); let _ = reader.read_line(s)?; if s.is_empty() { Err(anyhow!("empty line")) } else { self.parse(s) } } fn parse(&self, s: &str) -> Result<RpcObject> { let val = serde_json::from_str::<Value>(s)?; if !val.is_object() { Err(anyhow!("not json object")) } else { Ok(val.into()) } } pub fn respond(&self, id: RequestId, result: Result<Value>) { let mut resp = json!({ "id": id }); match result { Ok(v) => resp["result"] = v, Err(e) => { resp["error"] = json!({ "code": 0, "message": format!("{}",e), }) } } let _ = self.sender.send(resp); } pub fn send_notification(&self, method: &str, params: Value) { let _ = self.sender.send(json!({ "method": method, "params": params, })); } fn
(&self, rpc: Notification) { match rpc { Notification::Initialize { workspace } => { *self.workspace.lock() = Some(workspace.clone()); let _ = self .watcher .lock() .as_mut() .unwrap() .watch(&workspace, notify::RecursiveMode::Recursive); if let Some(diff) = git_diff_new(&workspace) { self.send_notification( "diff_info", json!({ "diff": diff, }), ); *self.last_diff.lock() = diff; } } Notification::Shutdown {} => {} Notification::Update { buffer_id, delta, rev, } => { let mut buffers = self.buffers.lock(); let buffer = buffers.get_mut(&buffer_id).unwrap(); if let Some(content_change) = buffer.update(&delta, rev) { self.lsp.lock().update(buffer, &content_change, buffer.rev); } } Notification::InstallPlugin { plugin } => { let catalog = self.plugins.clone(); let dispatcher = self.clone(); std::thread::spawn(move || { if let Err(_e) = catalog.lock().install_plugin(dispatcher.clone(), plugin) { } let plugins = { dispatcher.plugins.lock().items.clone() }; dispatcher.send_notification( "installed_plugins", json!({ "plugins": plugins, }), ); }); } Notification::NewTerminal { term_id, cwd, shell, } => { let mut terminal = Terminal::new(term_id, cwd, shell, 50, 10); let tx = terminal.tx.clone(); self.terminals.lock().insert(term_id, tx); let dispatcher = self.clone(); std::thread::spawn(move || { terminal.run(dispatcher); }); } Notification::TerminalClose { term_id } => { let mut terminals = self.terminals.lock(); if let Some(tx) = terminals.remove(&term_id) { #[allow(deprecated)] let _ = tx.send(Msg::Shutdown); } } Notification::TerminalWrite { term_id, content } => { let terminals = self.terminals.lock(); let tx = terminals.get(&term_id).unwrap(); #[allow(deprecated)] let _ = tx.send(Msg::Input(content.into_bytes().into())); } Notification::TerminalResize { term_id, width, height, } => { let terminals = self.terminals.lock(); if let Some(tx) = terminals.get(&term_id) { let size = SizeInfo::new( width as f32, height as f32, 1.0, 1.0, 0.0, 0.0, true, ); #[allow(deprecated)] let _ = tx.send(Msg::Resize(size)); } } Notification::GitCommit { message, diffs } => { if let Some(workspace) = self.workspace.lock().clone() { if let Err(_e) = git_commit(&workspace, &message, diffs) {} } } } } fn handle_request(&self, id: RequestId, rpc: Request) { match rpc { Request::NewBuffer { buffer_id, path } => { let _ = self .watcher .lock() .as_mut() .unwrap() .watch(&path, notify::RecursiveMode::Recursive); self.open_files .lock() .insert(path.to_str().unwrap().to_string(), buffer_id); let buffer = Buffer::new(buffer_id, path, self.git_sender.clone()); let content = buffer.rope.to_string(); self.buffers.lock().insert(buffer_id, buffer); let _ = self.git_sender.send((buffer_id, 0)); let resp = NewBufferResponse { content }; let _ = self.sender.send(json!({ "id": id, "result": resp, })); } #[allow(unused_variables)] Request::BufferHead { buffer_id, path } => { if let Some(workspace) = self.workspace.lock().clone() { let result = file_get_head(&workspace, &path); if let Ok((_blob_id, content)) = result { let resp = BufferHeadResponse { id: "head".to_string(), content, }; let _ = self.sender.send(json!({ "id": id, "result": resp, })); } } } Request::GetCompletion { buffer_id, position, request_id, } => { let buffers = self.buffers.lock(); let buffer = buffers.get(&buffer_id).unwrap(); self.lsp .lock() .get_completion(id, request_id, buffer, position); } Request::CompletionResolve { buffer_id, completion_item, } => { let buffers = self.buffers.lock(); let buffer = buffers.get(&buffer_id).unwrap(); self.lsp .lock() .completion_resolve(id, buffer, &completion_item); } Request::GetSignature { buffer_id, position, } => { let buffers = self.buffers.lock(); let buffer = buffers.get(&buffer_id).unwrap(); self.lsp.lock().get_signature(id, buffer, position); } Request::GetReferences { buffer_id, position, } => { let buffers = self.buffers.lock(); let buffer = buffers.get(&buffer_id).unwrap(); self.lsp.lock().get_references(id, buffer, position); } Request::GetDefinition { buffer_id, position, request_id, } => { let buffers = self.buffers.lock(); let buffer = buffers.get(&buffer_id).unwrap(); self.lsp .lock() .get_definition(id, request_id, buffer, position); } Request::GetCodeActions { buffer_id, position, } => { let buffers = self.buffers.lock(); let buffer = buffers.get(&buffer_id).unwrap(); self.lsp.lock().get_code_actions(id, buffer, position); } Request::GetDocumentSymbols { buffer_id } => { let buffers = self.buffers.lock(); let buffer = buffers.get(&buffer_id).unwrap(); self.lsp.lock().get_document_symbols(id, buffer); } Request::GetDocumentFormatting { buffer_id } => { let buffers = self.buffers.lock(); let buffer = buffers.get(&buffer_id).unwrap(); self.lsp.lock().get_document_formatting(id, buffer); } Request::ReadDir { path } => { let local_dispatcher = self.clone(); thread::spawn(move || { let result = fs::read_dir(path) .map(|entries| { let items = entries .into_iter() .filter_map(|entry| { entry .map(|e| FileNodeItem { path_buf: e.path(), is_dir: e.path().is_dir(), open: false, read: false, children: HashMap::new(), children_open_count: 0, }) .ok() }) .collect::<Vec<FileNodeItem>>(); serde_json::to_value(items).unwrap() }) .map_err(|e| anyhow!(e)); local_dispatcher.respond(id, result); }); } #[allow(unused_variables)] Request::GetFiles { path } => { if let Some(workspace) = self.workspace.lock().clone() { let local_dispatcher = self.clone(); thread::spawn(move || { let mut items = Vec::new(); for path in ignore::Walk::new(workspace).flatten() { if let Some(file_type) = path.file_type() { if file_type.is_file() { items.push(path.into_path()); } } } local_dispatcher .respond(id, Ok(serde_json::to_value(items).unwrap())); }); } } Request::Save { rev, buffer_id } => { let mut buffers = self.buffers.lock(); let buffer = buffers.get_mut(&buffer_id).unwrap(); let resp = buffer.save(rev).map(|_r| json!({})); self.lsp.lock().save_buffer(buffer); self.respond(id, resp); } Request::GlobalSearch { pattern } => { if let Some(workspace) = self.workspace.lock().clone() { let local_dispatcher = self.clone(); thread::spawn(move || { let mut matches = HashMap::new(); let pattern = regex::escape(&pattern); if let Ok(matcher) = RegexMatcherBuilder::new() .case_insensitive(true) .build_literals(&[&pattern]) { let mut searcher = SearcherBuilder::new().build(); for path in ignore::Walk::new(workspace).flatten() { if let Some(file_type) = path.file_type() { if file_type.is_file() { let path = path.into_path(); let mut line_matches = Vec::new(); let _ = searcher.search_path( &matcher, path.clone(), UTF8(|lnum, line| { let mymatch = matcher .find(line.as_bytes())? .unwrap(); line_matches.push(( lnum, ( mymatch.start(), mymatch.end(), ), line.to_string(), )); Ok(true) }), ); if !line_matches.is_empty() { matches.insert( path.clone(), line_matches, ); } } } } } local_dispatcher .respond(id, Ok(serde_json::to_value(matches).unwrap())); }); } } } } } #[derive(Clone, Debug)] pub struct DiffHunk { pub old_start: u32, pub old_lines: u32, pub new_start: u32, pub new_lines: u32, pub header: String, } fn git_commit( workspace_path: &Path, message: &str, diffs: Vec<FileDiff>, ) -> Result<()> { let repo = Repository::open( workspace_path .to_str() .ok_or_else(|| anyhow!("workspace path can't changed to str"))?, )?; let mut index = repo.index()?; for diff in diffs { match diff { FileDiff::Modified(p) | FileDiff::Added(p) => { index.add_path(p.strip_prefix(workspace_path)?)?; } FileDiff::Renamed(a, d) => { index.add_path(a.strip_prefix(workspace_path)?)?; index.remove_path(d.strip_prefix(workspace_path)?)?; } FileDiff::Deleted(p) => { index.remove_path(p.strip_prefix(workspace_path)?)?; } } } index.write()?; let tree = index.write_tree()?; let tree = repo.find_tree(tree)?; let signature = repo.signature()?; let parent = repo.head()?.peel_to_commit()?; repo.commit( Some("HEAD"), &signature, &signature, message, &tree, &[&parent], )?; Ok(()) } fn git_delta_format( workspace_path: &Path, delta: &git2::DiffDelta, ) -> Option<(git2::Delta, git2::Oid, PathBuf)> { match delta.status() { git2::Delta::Added | git2::Delta::Untracked => Some(( git2::Delta::Added, delta.new_file().id(), delta.new_file().path().map(|p| workspace_path.join(p))?, )), git2::Delta::Deleted => Some(( git2::Delta::Deleted, delta.old_file().id(), delta.old_file().path().map(|p| workspace_path.join(p))?, )), git2::Delta::Modified => Some(( git2::Delta::Modified, delta.new_file().id(), delta.new_file().path().map(|p| workspace_path.join(p))?, )), _ => None, } } fn git_diff_new(workspace_path: &Path) -> Option<DiffInfo> { let repo = Repository::open(workspace_path.to_str()?).ok()?; let head = repo.head().ok()?; let name = head.shorthand()?.to_string(); let mut branches = Vec::new(); for branch in repo.branches(None).ok()? { branches.push(branch.ok()?.0.name().ok()??.to_string()); } let mut deltas = Vec::new(); let mut diff_options = DiffOptions::new(); let diff = repo .diff_index_to_workdir(None, Some(diff_options.include_untracked(true))) .ok()?; for delta in diff.deltas() { if let Some(delta) = git_delta_format(workspace_path, &delta) { deltas.push(delta); } } let cached_diff = repo .diff_tree_to_index( repo.find_tree(repo.revparse_single("HEAD^{tree}").ok()?.id()) .ok() .as_ref(), None, None, ) .ok()?; for delta in cached_diff.deltas() { if let Some(delta) = git_delta_format(workspace_path, &delta) { deltas.push(delta); } } let mut renames = Vec::new(); let mut renamed_deltas = HashSet::new(); for (i, delta) in deltas.iter().enumerate() { if delta.0 == git2::Delta::Added { for (j, d) in deltas.iter().enumerate() { if d.0 == git2::Delta::Deleted && d.1 == delta.1 { renames.push((i, j)); renamed_deltas.insert(i); renamed_deltas.insert(j); break; } } } } let mut file_diffs = Vec::new(); for (i, j) in renames.iter() { file_diffs.push(FileDiff::Renamed( deltas[*i].2.clone(), deltas[*j].2.clone(), )); } for (i, delta) in deltas.iter().enumerate() { if renamed_deltas.contains(&i) { continue; } let diff = match delta.0 { git2::Delta::Added => FileDiff::Added(delta.2.clone()), git2::Delta::Deleted => FileDiff::Deleted(delta.2.clone()), git2::Delta::Modified => FileDiff::Modified(delta.2.clone()), _ => continue, }; file_diffs.push(diff); } file_diffs.sort_by_key(|d| match d { FileDiff::Modified(p) | FileDiff::Added(p) | FileDiff::Renamed(p, _) | FileDiff::Deleted(p) => p.clone(), }); Some(DiffInfo { head: name, branches, diffs: file_diffs, }) } fn file_get_head(workspace_path: &Path, path: &Path) -> Result<(String, String)> { let repo = Repository::open( workspace_path .to_str() .ok_or_else(|| anyhow!("can't to str"))?, )?; let head = repo.head()?; let tree = head.peel_to_tree()?; let tree_entry = tree.get_path(path.strip_prefix(workspace_path)?)?; let blob = repo.find_blob(tree_entry.id())?; let id = blob.id().to_string(); let content = std::str::from_utf8(blob.content()) .with_context(|| "content bytes to string")? .to_string(); Ok((id, content)) } #[allow(dead_code)] fn file_git_diff( workspace_path: &Path, path: &Path, content: &str, ) -> Option<(Vec<DiffHunk>, HashMap<usize, char>)> { let repo = Repository::open(workspace_path.to_str()?).ok()?; let head = repo.head().ok()?; let tree = head.peel_to_tree().ok()?; let tree_entry = tree .get_path(path.strip_prefix(workspace_path).ok()?) .ok()?; let blob = repo.find_blob(tree_entry.id()).ok()?; let patch = git2::Patch::from_blob_and_buffer( &blob, None, content.as_bytes(), None, None, ) .ok()?; let mut line_changes = HashMap::new(); Some(( (0..patch.num_hunks()) .into_iter() .filter_map(|i| { let hunk = patch.hunk(i).ok()?; let hunk = DiffHunk { old_start: hunk.0.old_start(), old_lines: hunk.0.old_lines(), new_start: hunk.0.new_start(), new_lines: hunk.0.new_lines(), header: String::from_utf8(hunk.0.header().to_vec()).ok()?, }; let mut line_diff = 0; for line in 0..hunk.old_lines + hunk.new_lines { if let Ok(diff_line) = patch.line_in_hunk(i, line as usize) { match diff_line.origin() { ' ' => { let new_line = diff_line.new_lineno().unwrap(); let old_line = diff_line.old_lineno().unwrap(); line_diff = new_line as i32 - old_line as i32; } '-' => { let old_line = diff_line.old_lineno().unwrap() - 1; let new_line = (old_line as i32 + line_diff) as usize; line_changes.insert(new_line, '-'); line_diff -= 1; } '+' => { let new_line = diff_line.new_lineno().unwrap() as usize - 1; if let Some(c) = line_changes.get(&new_line) { if c == &'-' { line_changes.insert(new_line, 'm'); } } else { line_changes.insert(new_line, '+'); } line_diff += 1; } _ => continue, } diff_line.origin(); } } Some(hunk) }) .collect(), line_changes, )) }
handle_notification
serial_entry_reward_model.py
from typing import Union, Optional, List, Any, Tuple import os import torch import logging from functools import partial from tensorboardX import SummaryWriter from ding.envs import get_vec_env_setting, create_env_manager from ding.worker import BaseLearner, InteractionSerialEvaluator, BaseSerialCommander, create_buffer, \ create_serial_collector from ding.config import read_config, compile_config from ding.policy import create_policy, PolicyFactory from ding.reward_model import create_reward_model from ding.utils import set_pkg_seed def serial_pipeline_reward_model( input_cfg: Union[str, Tuple[dict, dict]], seed: int = 0, env_setting: Optional[List[Any]] = None, model: Optional[torch.nn.Module] = None, max_iterations: Optional[int] = int(1e10), ) -> 'Policy': # noqa """ Overview: Serial pipeline entry with reward model. Arguments: - input_cfg (:obj:`Union[str, Tuple[dict, dict]]`): Config in dict type. \ ``str`` type means config file path. \ ``Tuple[dict, dict]`` type means [user_config, create_cfg]. - seed (:obj:`int`): Random seed. - env_setting (:obj:`Optional[List[Any]]`): A list with 3 elements: \ ``BaseEnv`` subclass, collector env config, and evaluator env config. - model (:obj:`Optional[torch.nn.Module]`): Instance of torch.nn.Module. - max_iterations (:obj:`Optional[torch.nn.Module]`): Learner's max iteration. Pipeline will stop \ when reaching this iteration. Returns: - policy (:obj:`Policy`): Converged policy. """ if isinstance(input_cfg, str):
else: cfg, create_cfg = input_cfg create_cfg.policy.type = create_cfg.policy.type + '_command' env_fn = None if env_setting is None else env_setting[0] cfg = compile_config(cfg, seed=seed, env=env_fn, auto=True, create_cfg=create_cfg, save_cfg=True) # Create main components: env, policy if env_setting is None: env_fn, collector_env_cfg, evaluator_env_cfg = get_vec_env_setting(cfg.env) else: env_fn, collector_env_cfg, evaluator_env_cfg = env_setting collector_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in collector_env_cfg]) evaluator_env = create_env_manager(cfg.env.manager, [partial(env_fn, cfg=c) for c in evaluator_env_cfg]) collector_env.seed(cfg.seed) evaluator_env.seed(cfg.seed, dynamic_seed=False) set_pkg_seed(cfg.seed, use_cuda=cfg.policy.cuda) policy = create_policy(cfg.policy, model=model, enable_field=['learn', 'collect', 'eval', 'command']) # Create worker components: learner, collector, evaluator, replay buffer, commander. tb_logger = SummaryWriter(os.path.join('./{}/log/'.format(cfg.exp_name), 'serial')) learner = BaseLearner(cfg.policy.learn.learner, policy.learn_mode, tb_logger, exp_name=cfg.exp_name) collector = create_serial_collector( cfg.policy.collect.collector, env=collector_env, policy=policy.collect_mode, tb_logger=tb_logger, exp_name=cfg.exp_name ) evaluator = InteractionSerialEvaluator( cfg.policy.eval.evaluator, evaluator_env, policy.eval_mode, tb_logger, exp_name=cfg.exp_name ) replay_buffer = create_buffer(cfg.policy.other.replay_buffer, tb_logger=tb_logger, exp_name=cfg.exp_name) commander = BaseSerialCommander( cfg.policy.other.commander, learner, collector, evaluator, replay_buffer, policy.command_mode ) reward_model = create_reward_model(cfg.reward_model, policy.collect_mode.get_attribute('device'), tb_logger) # ========== # Main loop # ========== # Learner's before_run hook. learner.call_hook('before_run') # Accumulate plenty of data at the beginning of training. if cfg.policy.get('random_collect_size', 0) > 0: action_space = collector_env.env_info().act_space random_policy = PolicyFactory.get_random_policy(policy.collect_mode, action_space=action_space) collector.reset_policy(random_policy) collect_kwargs = commander.step() new_data = collector.collect(n_sample=cfg.policy.random_collect_size, policy_kwargs=collect_kwargs) replay_buffer.push(new_data, cur_collector_envstep=0) collector.reset_policy(policy.collect_mode) for _ in range(max_iterations): collect_kwargs = commander.step() # Evaluate policy performance if evaluator.should_eval(learner.train_iter): stop, reward = evaluator.eval(learner.save_checkpoint, learner.train_iter, collector.envstep) if stop: break new_data_count, target_new_data_count = 0, cfg.reward_model.get('target_new_data_count', 1) while new_data_count < target_new_data_count: new_data = collector.collect(train_iter=learner.train_iter, policy_kwargs=collect_kwargs) new_data_count += len(new_data) # collect data for reward_model training reward_model.collect_data(new_data) replay_buffer.push(new_data, cur_collector_envstep=collector.envstep) # update reward_model reward_model.train() reward_model.clear_data() # Learn policy from collected data for i in range(cfg.policy.learn.update_per_collect): # Learner will train ``update_per_collect`` times in one iteration. train_data = replay_buffer.sample(learner.policy.get_attribute('batch_size'), learner.train_iter) if train_data is None: # It is possible that replay buffer's data count is too few to train ``update_per_collect`` times logging.warning( "Replay buffer's data can only train for {} steps. ".format(i) + "You can modify data collect config, e.g. increasing n_sample, n_episode." ) break # update train_data reward reward_model.estimate(train_data) learner.train(train_data, collector.envstep) if learner.policy.get_attribute('priority'): replay_buffer.update(learner.priority_info) if cfg.policy.on_policy: # On-policy algorithm must clear the replay buffer. replay_buffer.clear() # Learner's after_run hook. learner.call_hook('after_run') return policy
cfg, create_cfg = read_config(input_cfg)
index.esm.d.ts
/** * Top-level type definitions. These are processed by Rollup and rollup-plugin-dts * to make a combined .d.ts file under dist; that way, all of the type definitions * appear directly within the "chart.js" module; that matches the layout of the * distributed chart.esm.js bundle and means that users of Chart.js can easily use * module augmentation to extend Chart.js's types and plugins within their own * code, like so: * * @example * declare module "chart.js" { * // Add types here * } */ import { DeepPartial, DistributiveArray, UnionToIntersection } from './utils'; import { TimeUnit } from './adapters'; import { AnimationEvent } from './animation'; import { AnyObject, EmptyObject } from './basic'; import { Color } from './color'; import { Element } from './element'; import { ChartArea, Point } from './geometric'; import { LayoutItem, LayoutPosition } from './layout'; export { DateAdapter, TimeUnit, _adapters } from './adapters'; export { Animation, Animations, Animator, AnimationEvent } from './animation'; export { Color } from './color'; export { Element } from './element'; export { ChartArea, Point } from './geometric'; export { LayoutItem, LayoutPosition } from './layout'; export interface ScriptableContext<TType extends ChartType> { active: boolean; chart: UnionToIntersection<Chart<TType>>; dataIndex: number; dataset: UnionToIntersection<ChartDataset<TType>>; datasetIndex: number; parsed: UnionToIntersection<ParsedDataType<TType>>; raw: unknown; } export type Scriptable<T, TContext> = T | ((ctx: TContext) => T); export type ScriptableOptions<T, TContext> = { [P in keyof T]: Scriptable<T[P], TContext> }; export type ScriptableAndArray<T, TContext> = readonly T[] | Scriptable<T, TContext>; export type ScriptableAndArrayOptions<T, TContext> = { [P in keyof T]: ScriptableAndArray<T[P], TContext> }; export interface ParsingOptions { /** * How to parse the dataset. The parsing can be disabled by specifying parsing: false at chart options or dataset. If parsing is disabled, data must be sorted and in the formats the associated chart type and scales use internally. */ parsing: { [key: string]: string; } | false; /** * Chart.js is fastest if you provide data with indices that are unique, sorted, and consistent across datasets and provide the normalized: true option to let Chart.js know that you have done so. */ normalized: boolean; } export interface ControllerDatasetOptions extends ParsingOptions { /** * The base axis of the chart. 'x' for vertical charts and 'y' for horizontal charts. * @default 'x' */ indexAxis: 'x' | 'y'; /** * How to clip relative to chartArea. Positive value allows overflow, negative value clips that many pixels inside chartArea. 0 = clip at chartArea. Clipping can also be configured per side: clip: {left: 5, top: false, right: -2, bottom: 0} */ clip: number | ChartArea; /** * The label for the dataset which appears in the legend and tooltips. */ label: string; /** * The drawing order of dataset. Also affects order for stacking, tooltip and legend. */ order: number; /** * The ID of the group to which this dataset belongs to (when stacked, each group will be a separate stack). */ stack: string; /** * Configures the visibility state of the dataset. Set it to true, to hide the dataset from the chart. * @default false */ hidden: boolean; } export interface BarControllerDatasetOptions extends ControllerDatasetOptions, ScriptableAndArrayOptions<BarOptions, ScriptableContext<'bar'>>, ScriptableAndArrayOptions<CommonHoverOptions, ScriptableContext<'bar'>>, AnimationOptions<'bar'> { /** * The ID of the x axis to plot this dataset on. */ xAxisID: string; /** * The ID of the y axis to plot this dataset on. */ yAxisID: string; /** * Percent (0-1) of the available width each bar should be within the category width. 1.0 will take the whole category width and put the bars right next to each other. * @default 0.9 */ barPercentage: number; /** * Percent (0-1) of the available width each category should be within the sample width. * @default 0.8 */ categoryPercentage: number; /** * Manually set width of each bar in pixels. If set to 'flex', it computes "optimal" sample widths that globally arrange bars side by side. If not set (default), bars are equally sized based on the smallest interval. */ barThickness: number | 'flex'; /** * Set this to ensure that bars are not sized thicker than this. */ maxBarThickness: number; /** * Set this to ensure that bars have a minimum length in pixels. */ minBarLength: number; /** * Point style for the legend * @default 'circle; */ pointStyle: PointStyle; } export interface BarControllerChartOptions { /** * Should null or undefined values be omitted from drawing */ skipNull?: boolean; } export type BarController = DatasetController export const BarController: ChartComponent & { prototype: BarController; new (chart: Chart, datasetIndex: number): BarController; }; export interface BubbleControllerDatasetOptions extends ControllerDatasetOptions, ScriptableAndArrayOptions<PointOptions, ScriptableContext<'bubble'>>, ScriptableAndArrayOptions<PointHoverOptions, ScriptableContext<'bubble'>> {} export interface BubbleDataPoint { /** * X Value */ x: number; /** * Y Value */ y: number; /** * Bubble radius in pixels (not scaled). */ r: number; } export type BubbleController = DatasetController export const BubbleController: ChartComponent & { prototype: BubbleController; new (chart: Chart, datasetIndex: number): BubbleController; }; export interface LineControllerDatasetOptions extends ControllerDatasetOptions, ScriptableAndArrayOptions<PointPrefixedOptions, ScriptableContext<'line'>>, ScriptableAndArrayOptions<PointPrefixedHoverOptions, ScriptableContext<'line'>>, ScriptableOptions<LineOptions, ScriptableContext<'line'>>, ScriptableOptions<LineHoverOptions, ScriptableContext<'line'>>, AnimationOptions<'line'> { /** * The ID of the x axis to plot this dataset on. */ xAxisID: string; /** * The ID of the y axis to plot this dataset on. */ yAxisID: string; /** * If true, lines will be drawn between points with no or null data. If false, points with NaN data will create a break in the line. Can also be a number specifying the maximum gap length to span. The unit of the value depends on the scale used. * @default false */ spanGaps: boolean | number; showLine: boolean; } export interface LineControllerChartOptions { /** * If true, lines will be drawn between points with no or null data. If false, points with NaN data will create a break in the line. Can also be a number specifying the maximum gap length to span. The unit of the value depends on the scale used. * @default false */ spanGaps: boolean | number; /** * If false, the lines between points are not drawn. * @default true */ showLine: boolean; } export type LineController = DatasetController export const LineController: ChartComponent & { prototype: LineController; new (chart: Chart, datasetIndex: number): LineController; }; export type ScatterControllerDatasetOptions = LineControllerDatasetOptions; export interface ScatterDataPoint { x: number; y: number; } export type ScatterControllerChartOptions = LineControllerChartOptions; export type ScatterController = LineController export const ScatterController: ChartComponent & { prototype: ScatterController; new (chart: Chart, datasetIndex: number): ScatterController; }; export interface DoughnutControllerDatasetOptions extends ControllerDatasetOptions, ScriptableAndArrayOptions<ArcOptions, ScriptableContext<'doughnut'>>, ScriptableAndArrayOptions<ArcHoverOptions, ScriptableContext<'doughnut'>>, AnimationOptions<'doughnut'> { /** * Sweep to allow arcs to cover. * @default 360 */ circumference: number; /** * Starting angle to draw this dataset from. * @default 0 */ rotation: number; /** * The relative thickness of the dataset. Providing a value for weight will cause the pie or doughnut dataset to be drawn with a thickness relative to the sum of all the dataset weight values. * @default 1 */ weight: number; } export interface DoughnutAnimationOptions { /** * If true, the chart will animate in with a rotation animation. This property is in the options.animation object. * @default true */ animateRotate: boolean; /** * If true, will animate scaling the chart from the center outwards. * @default false */ animateScale: boolean; } export interface DoughnutControllerChartOptions { /** * Sweep to allow arcs to cover. * @default 360 */ circumference: number; /** * The portion of the chart that is cut out of the middle. ('50%' - for doughnut, 0 - for pie) * String ending with '%' means percentage, number means pixels. * @default 50 */ cutout: Scriptable<number | string, ScriptableContext<'doughnut'>>; /** * The outer radius of the chart. String ending with '%' means percentage of maximum radius, number means pixels. * @default '100%' */ radius: Scriptable<number | string, ScriptableContext<'doughnut'>>; /** * Starting angle to draw arcs from. * @default 0 */ rotation: number; animation: DoughnutAnimationOptions; } export type DoughnutDataPoint = number; export interface DoughnutController extends DatasetController { readonly innerRadius: number; readonly outerRadius: number; readonly offsetX: number; readonly offsetY: number; calculateTotal(): number; calculateCircumference(value: number): number; } export const DoughnutController: ChartComponent & { prototype: DoughnutController; new (chart: Chart, datasetIndex: number): DoughnutController; }; export type PieControllerDatasetOptions = DoughnutControllerDatasetOptions; export type PieControllerChartOptions = DoughnutControllerChartOptions; export type PieAnimationOptions = DoughnutAnimationOptions; export type PieDataPoint = DoughnutDataPoint; export type PieController = DoughnutController export const PieController: ChartComponent & { prototype: PieController; new (chart: Chart, datasetIndex: number): PieController; }; export interface PolarAreaControllerDatasetOptions extends DoughnutControllerDatasetOptions { /** * Arc angle to cover. - for polar only * @default circumference / (arc count) */ angle: number; } export type PolarAreaAnimationOptions = DoughnutAnimationOptions; export interface PolarAreaControllerChartOptions { /** * Starting angle to draw arcs for the first item in a dataset. In degrees, 0 is at top. * @default 0 */ startAngle: number; animation: PolarAreaAnimationOptions; } export interface PolarAreaController extends DoughnutController { countVisibleElements(): number; } export const PolarAreaController: ChartComponent & { prototype: PolarAreaController; new (chart: Chart, datasetIndex: number): PolarAreaController; }; export interface RadarControllerDatasetOptions extends ControllerDatasetOptions, ScriptableOptions<PointPrefixedOptions, ScriptableContext<'radar'>>, ScriptableOptions<PointPrefixedHoverOptions, ScriptableContext<'radar'>>, ScriptableOptions<LineOptions, ScriptableContext<'radar'>>, ScriptableOptions<LineHoverOptions, ScriptableContext<'radar'>>, AnimationOptions<'radar'> { /** * The ID of the x axis to plot this dataset on. */ xAxisID: string; /** * The ID of the y axis to plot this dataset on. */ yAxisID: string; /** * If true, lines will be drawn between points with no or null data. If false, points with NaN data will create a break in the line. Can also be a number specifying the maximum gap length to span. The unit of the value depends on the scale used. */ spanGaps: boolean | number; /** * If false, the line is not drawn for this dataset. */ showLine: boolean; } export type RadarControllerChartOptions = LineControllerChartOptions; export type RadarController = DatasetController export const RadarController: ChartComponent & { prototype: RadarController; new (chart: Chart, datasetIndex: number): RadarController; }; export interface ChartMeta<TElement extends Element = Element, TDatasetElement extends Element = Element> { type: string; controller: DatasetController; order: number; label: string; index: number; visible: boolean; stack: number; indexAxis: 'x' | 'y'; data: TElement[]; dataset?: TDatasetElement; hidden: boolean; xAxisID?: string; yAxisID?: string; rAxisID?: string; iAxisID: string; vAxisID: string; xScale?: Scale; yScale?: Scale; rScale?: Scale; iScale?: Scale; vScale?: Scale; _sorted: boolean; _stacked: boolean | 'single'; _parsed: unknown[]; } export interface ActiveDataPoint { datasetIndex: number; index: number; } export interface ActiveElement extends ActiveDataPoint { element: Element; } export declare class Chart< TType extends ChartType = ChartType, TData = DefaultDataPoint<TType>, TLabel = unknown > { readonly platform: BasePlatform; readonly id: string; readonly canvas: HTMLCanvasElement; readonly ctx: CanvasRenderingContext2D; readonly config: ChartConfiguration<TType, TData, TLabel> readonly width: number; readonly height: number; readonly aspectRatio: number; readonly boxes: LayoutItem[]; readonly currentDevicePixelRatio: number; readonly chartArea: ChartArea; readonly scales: { [key: string]: Scale }; readonly attached: boolean; data: ChartData<TType, TData, TLabel>; options: ChartOptions<TType>; constructor(item: ChartItem, config: ChartConfiguration<TType, TData, TLabel>); clear(): this; stop(): this; resize(width?: number, height?: number): void; ensureScalesHaveIDs(): void; buildOrUpdateScales(): void; buildOrUpdateControllers(): void; reset(): void; update(mode?: UpdateMode): void; render(): void; draw(): void; getElementsAtEventForMode(e: Event, mode: string, options: InteractionOptions, useFinalPosition: boolean): InteractionItem[]; getSortedVisibleDatasetMetas(): ChartMeta[]; getDatasetMeta(datasetIndex: number): ChartMeta; getVisibleDatasetCount(): number; isDatasetVisible(datasetIndex: number): boolean; setDatasetVisibility(datasetIndex: number, visible: boolean): void; toggleDataVisibility(index: number): void; getDataVisibility(index: number): boolean; hide(datasetIndex: number): void; show(datasetIndex: number): void; getActiveElements(): ActiveElement[]; setActiveElements(active: ActiveDataPoint[]): void; destroy(): void; toBase64Image(type?: string, quality?: unknown): string; bindEvents(): void; unbindEvents(): void; updateHoverStyle(items: Element, mode: 'dataset', enabled: boolean): void; notifyPlugins(hook: string, args?: AnyObject): boolean | void; static readonly defaults: Defaults; static readonly overrides: Overrides; static readonly version: string; static readonly instances: { [key: string]: Chart }; static readonly registry: Registry; static getChart(key: string | CanvasRenderingContext2D | HTMLCanvasElement): Chart | undefined; static register(...items: ChartComponentLike[]): void; static unregister(...items: ChartComponentLike[]): void; } export const registerables: readonly ChartComponentLike[]; export declare type ChartItem = | string | CanvasRenderingContext2D | OffscreenCanvasRenderingContext2D | HTMLCanvasElement | OffscreenCanvas | { canvas: HTMLCanvasElement | OffscreenCanvas } | ArrayLike<CanvasRenderingContext2D | HTMLCanvasElement | OffscreenCanvas>; export declare enum UpdateModeEnum { resize = 'resize', reset = 'reset', none = 'none', hide = 'hide', show = 'show', normal = 'normal', active = 'active' } export type UpdateMode = keyof typeof UpdateModeEnum; export class DatasetController< TType extends ChartType = ChartType, TElement extends Element = Element, TDatasetElement extends Element = Element, TParsedData = ParsedDataType<TType>, > { constructor(chart: Chart, datasetIndex: number); readonly chart: Chart; readonly index: number; readonly _cachedMeta: ChartMeta<TElement, TDatasetElement>; enableOptionSharing: boolean; linkScales(): void; getAllParsedValues(scale: Scale): number[]; protected getLabelAndValue(index: number): { label: string; value: string }; updateElements(elements: TElement[], start: number, count: number, mode: UpdateMode): void; update(mode: UpdateMode): void; updateIndex(datasetIndex: number): void; protected getMaxOverflow(): boolean | number; draw(): void; reset(): void; getDataset(): ChartDataset; getMeta(): ChartMeta<TElement, TDatasetElement>; getScaleForId(scaleID: string): Scale | undefined; configure(): void; initialize(): void; addElements(): void; buildOrUpdateElements(resetNewElements?: boolean): void; getStyle(index: number, active: boolean): AnyObject; protected resolveDatasetElementOptions(mode: UpdateMode): AnyObject; protected resolveDataElementOptions(index: number, mode: UpdateMode): AnyObject; /** * Utility for checking if the options are shared and should be animated separately. * @protected */ protected getSharedOptions(options: AnyObject): undefined | AnyObject; /** * Utility for determining if `options` should be included in the updated properties * @protected */ protected includeOptions(mode: UpdateMode, sharedOptions: AnyObject): boolean; /** * Utility for updating an element with new properties, using animations when appropriate. * @protected */ protected updateElement(element: TElement | TDatasetElement, index: number | undefined, properties: AnyObject, mode: UpdateMode): void; /** * Utility to animate the shared options, that are potentially affecting multiple elements. * @protected */ protected updateSharedOptions(sharedOptions: AnyObject, mode: UpdateMode, newOptions: AnyObject): void; removeHoverStyle(element: TElement, datasetIndex: number, index: number): void; setHoverStyle(element: TElement, datasetIndex: number, index: number): void; parse(start: number, count: number): void; protected parsePrimitiveData(meta: ChartMeta<TElement, TDatasetElement>, data: AnyObject[], start: number, count: number): AnyObject[]; protected parseArrayData(meta: ChartMeta<TElement, TDatasetElement>, data: AnyObject[], start: number, count: number): AnyObject[]; protected parseObjectData(meta: ChartMeta<TElement, TDatasetElement>, data: AnyObject[], start: number, count: number): AnyObject[]; protected getParsed(index: number): TParsedData; protected applyStack(scale: Scale, parsed: unknown[]): number; protected updateRangeFromParsed( range: { min: number; max: number }, scale: Scale, parsed: unknown[], stack: boolean | string ): void; protected getMinMax(scale: Scale, canStack?: boolean): { min: number; max: number }; } export interface DatasetControllerChartComponent extends ChartComponent { defaults: { datasetElementType?: string | null | false; dataElementType?: string | null | false; }; } export interface Defaults extends CoreChartOptions<ChartType>, ElementChartOptions, PluginChartOptions<ChartType> { scale: ScaleOptionsByType; scales: { [key in ScaleType]: ScaleOptionsByType<key>; }; set(values: AnyObject): AnyObject; set(scope: string, values: AnyObject): AnyObject; get(scope: string): AnyObject; /** * Routes the named defaults to fallback to another scope/name. * This routing is useful when those target values, like defaults.color, are changed runtime. * If the values would be copied, the runtime change would not take effect. By routing, the * fallback is evaluated at each access, so its always up to date. * * Example: * * defaults.route('elements.arc', 'backgroundColor', '', 'color') * - reads the backgroundColor from defaults.color when undefined locally * * @param scope Scope this route applies to. * @param name Property name that should be routed to different namespace when not defined here. * @param targetScope The namespace where those properties should be routed to. * Empty string ('') is the root of defaults. * @param targetName The target name in the target scope the property should be routed to. */ route(scope: string, name: string, targetScope: string, targetName: string): void; } export type Overrides = { [key in ChartType]: DeepPartial< CoreChartOptions<key> & ElementChartOptions & PluginChartOptions<key> & DatasetChartOptions<ChartType> & ScaleChartOptions<key> & ChartTypeRegistry[key]['chartOptions'] >; } export const defaults: Defaults; export interface InteractionOptions { axis?: string; intersect?: boolean; } export interface InteractionItem { element: Element; datasetIndex: number; index: number; } export type InteractionModeFunction = ( chart: Chart, e: ChartEvent, options: InteractionOptions, useFinalPosition?: boolean ) => InteractionItem[]; export interface InteractionModeMap { /** * Returns items at the same index. If the options.intersect parameter is true, we only return items if we intersect something * If the options.intersect mode is false, we find the nearest item and return the items at the same index as that item */ index: InteractionModeFunction; /** * Returns items in the same dataset. If the options.intersect parameter is true, we only return items if we intersect something * If the options.intersect is false, we find the nearest item and return the items in that dataset */ dataset: InteractionModeFunction; /** * Point mode returns all elements that hit test based on the event position * of the event */ point: InteractionModeFunction; /** * nearest mode returns the element closest to the point */ nearest: InteractionModeFunction; /** * x mode returns the elements that hit-test at the current x coordinate */ x: InteractionModeFunction; /** * y mode returns the elements that hit-test at the current y coordinate */ y: InteractionModeFunction; } export type InteractionMode = keyof InteractionModeMap; export const Interaction: { modes: InteractionModeMap; }; export const layouts: { /** * Register a box to a chart. * A box is simply a reference to an object that requires layout. eg. Scales, Legend, Title. * @param {Chart} chart - the chart to use * @param {LayoutItem} item - the item to add to be laid out */ addBox(chart: Chart, item: LayoutItem): void; /** * Remove a layoutItem from a chart * @param {Chart} chart - the chart to remove the box from * @param {LayoutItem} layoutItem - the item to remove from the layout */ removeBox(chart: Chart, layoutItem: LayoutItem): void; /** * Sets (or updates) options on the given `item`. * @param {Chart} chart - the chart in which the item lives (or will be added to) * @param {LayoutItem} item - the item to configure with the given options * @param options - the new item options. */ configure( chart: Chart, item: LayoutItem, options: { fullSize?: number; position?: LayoutPosition; weight?: number } ): void; /** * Fits boxes of the given chart into the given size by having each box measure itself * then running a fitting algorithm * @param {Chart} chart - the chart * @param {number} width - the width to fit into * @param {number} height - the height to fit into */ update(chart: Chart, width: number, height: number): void; }; export interface Plugin<TType extends ChartType = ChartType, O = AnyObject> extends ExtendedPlugin<TType> { id: string; /** * @desc Called when plugin is installed for this chart instance. This hook is also invoked for disabled plugins (options === false). * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. * @since 3.0.0 */ install?(chart: Chart, args: EmptyObject, options: O): void; /** * @desc Called when a plugin is starting. This happens when chart is created or plugin is enabled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. * @since 3.0.0 */ start?(chart: Chart, args: EmptyObject, options: O): void; /** * @desc Called when a plugin stopping. This happens when chart is destroyed or plugin is disabled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. * @since 3.0.0 */ stop?(chart: Chart, args: EmptyObject, options: O): void; /** * @desc Called before initializing `chart`. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. */ beforeInit?(chart: Chart, args: EmptyObject, options: O): void; /** * @desc Called after `chart` has been initialized and before the first update. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. */ afterInit?(chart: Chart, args: EmptyObject, options: O): void; /** * @desc Called before updating `chart`. If any plugin returns `false`, the update * is cancelled (and thus subsequent render(s)) until another `update` is triggered. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {UpdateMode} args.mode - The update mode * @param {object} options - The plugin options. * @returns {boolean} `false` to cancel the chart update. */ beforeUpdate?(chart: Chart, args: { mode: UpdateMode, cancelable: true }, options: O): boolean | void; /** * @desc Called after `chart` has been updated and before rendering. Note that this * hook will not be called if the chart update has been previously cancelled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {UpdateMode} args.mode - The update mode * @param {object} options - The plugin options. */ afterUpdate?(chart: Chart, args: { mode: UpdateMode }, options: O): void; /** * @desc Called during the update process, before any chart elements have been created. * This can be used for data decimation by changing the data array inside a dataset. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. */ beforeElementsUpdate?(chart: Chart, args: EmptyObject, options: O): void; /** * @desc Called during chart reset * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. * @since version 3.0.0 */ reset?(chart: Chart, args: EmptyObject, options: O): void; /** * @desc Called before updating the `chart` datasets. If any plugin returns `false`, * the datasets update is cancelled until another `update` is triggered. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {UpdateMode} args.mode - The update mode. * @param {object} options - The plugin options. * @returns {boolean} false to cancel the datasets update. * @since version 2.1.5 */ beforeDatasetsUpdate?(chart: Chart, args: { mode: UpdateMode }, options: O): boolean | void; /** * @desc Called after the `chart` datasets have been updated. Note that this hook * will not be called if the datasets update has been previously cancelled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {UpdateMode} args.mode - The update mode. * @param {object} options - The plugin options. * @since version 2.1.5 */ afterDatasetsUpdate?(chart: Chart, args: { mode: UpdateMode, cancelable: true }, options: O): void; /** * @desc Called before updating the `chart` dataset at the given `args.index`. If any plugin * returns `false`, the datasets update is cancelled until another `update` is triggered. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {number} args.index - The dataset index. * @param {object} args.meta - The dataset metadata. * @param {UpdateMode} args.mode - The update mode. * @param {object} options - The plugin options. * @returns {boolean} `false` to cancel the chart datasets drawing. */ beforeDatasetUpdate?(chart: Chart, args: { index: number; meta: ChartMeta, mode: UpdateMode, cancelable: true }, options: O): boolean | void; /** * @desc Called after the `chart` datasets at the given `args.index` has been updated. Note * that this hook will not be called if the datasets update has been previously cancelled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {number} args.index - The dataset index. * @param {object} args.meta - The dataset metadata. * @param {UpdateMode} args.mode - The update mode. * @param {object} options - The plugin options. */ afterDatasetUpdate?(chart: Chart, args: { index: number; meta: ChartMeta, mode: UpdateMode, cancelable: false }, options: O): void; /** * @desc Called before laying out `chart`. If any plugin returns `false`, * the layout update is cancelled until another `update` is triggered. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. * @returns {boolean} `false` to cancel the chart layout. */ beforeLayout?(chart: Chart, args: { cancelable: true }, options: O): boolean | void; /** * @desc Called before scale data limits are calculated. This hook is called separately for each scale in the chart. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {Scale} args.scale - The scale. * @param {object} options - The plugin options. */ beforeDataLimits?(chart: Chart, args: { scale: Scale }, options: O): void; /** * @desc Called after scale data limits are calculated. This hook is called separately for each scale in the chart. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {Scale} args.scale - The scale. * @param {object} options - The plugin options. */ afterDataLimits?(chart: Chart, args: { scale: Scale }, options: O): void; /** * @desc Called before scale bulds its ticks. This hook is called separately for each scale in the chart. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {Scale} args.scale - The scale. * @param {object} options - The plugin options. */ beforeBuildTicks?(chart: Chart, args: { scale: Scale }, options: O): void; /** * @desc Called after scale has build its ticks. This hook is called separately for each scale in the chart. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {Scale} args.scale - The scale. * @param {object} options - The plugin options. */ afterBuildTicks?(chart: Chart, args: { scale: Scale }, options: O): void; /** * @desc Called after the `chart` has been laid out. Note that this hook will not * be called if the layout update has been previously cancelled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. */ afterLayout?(chart: Chart, args: EmptyObject, options: O): void; /** * @desc Called before rendering `chart`. If any plugin returns `false`, * the rendering is cancelled until another `render` is triggered. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. * @returns {boolean} `false` to cancel the chart rendering. */ beforeRender?(chart: Chart, args: { cancelable: true }, options: O): boolean | void; /** * @desc Called after the `chart` has been fully rendered (and animation completed). Note * that this hook will not be called if the rendering has been previously cancelled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. */ afterRender?(chart: Chart, args: EmptyObject, options: O): void; /** * @desc Called before drawing `chart` at every animation frame. If any plugin returns `false`, * the frame drawing is cancelled untilanother `render` is triggered. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. * @returns {boolean} `false` to cancel the chart drawing. */ beforeDraw?(chart: Chart, args: { cancelable: true }, options: O): boolean | void; /** * @desc Called after the `chart` has been drawn. Note that this hook will not be called * if the drawing has been previously cancelled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. */ afterDraw?(chart: Chart, args: EmptyObject, options: O): void; /** * @desc Called before drawing the `chart` datasets. If any plugin returns `false`, * the datasets drawing is cancelled until another `render` is triggered. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. * @returns {boolean} `false` to cancel the chart datasets drawing. */ beforeDatasetsDraw?(chart: Chart, args: { cancelable: true }, options: O): boolean | void; /** * @desc Called after the `chart` datasets have been drawn. Note that this hook * will not be called if the datasets drawing has been previously cancelled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. */ afterDatasetsDraw?(chart: Chart, args: EmptyObject, options: O, cancelable: false): void; /** * @desc Called before drawing the `chart` dataset at the given `args.index` (datasets * are drawn in the reverse order). If any plugin returns `false`, the datasets drawing * is cancelled until another `render` is triggered. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {number} args.index - The dataset index. * @param {object} args.meta - The dataset metadata. * @param {object} options - The plugin options. * @returns {boolean} `false` to cancel the chart datasets drawing. */ beforeDatasetDraw?(chart: Chart, args: { index: number; meta: ChartMeta }, options: O): boolean | void; /** * @desc Called after the `chart` datasets at the given `args.index` have been drawn * (datasets are drawn in the reverse order). Note that this hook will not be called * if the datasets drawing has been previously cancelled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {number} args.index - The dataset index. * @param {object} args.meta - The dataset metadata. * @param {object} options - The plugin options. */ afterDatasetDraw?(chart: Chart, args: { index: number; meta: ChartMeta }, options: O): void; /** * @desc Called before processing the specified `event`. If any plugin returns `false`, * the event will be discarded. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {ChartEvent} args.event - The event object. * @param {boolean} args.replay - True if this event is replayed from `Chart.update` * @param {object} options - The plugin options. */ beforeEvent?(chart: Chart, args: { event: ChartEvent, replay: boolean, cancelable: true }, options: O): boolean | void; /** * @desc Called after the `event` has been consumed. Note that this hook * will not be called if the `event` has been previously discarded. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {ChartEvent} args.event - The event object. * @param {boolean} args.replay - True if this event is replayed from `Chart.update` * @param {boolean} [args.changed] - Set to true if the plugin needs a render. Should only be changed to true, because this args object is passed through all plugins. * @param {object} options - The plugin options. */ afterEvent?(chart: Chart, args: { event: ChartEvent, replay: boolean, changed?: boolean, cancelable: false }, options: O): void; /** * @desc Called after the chart as been resized. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {number} args.size - The new canvas display size (eq. canvas.style width & height). * @param {object} options - The plugin options. */ resize?(chart: Chart, args: { size: { width: number, height: number } }, options: O): void; /** * Called after the chart has been destroyed. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. */ destroy?(chart: Chart, args: EmptyObject, options: O): void; /** * Called after chart is destroyed on all plugins that were installed for that chart. This hook is also invoked for disabled plugins (options === false). * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {object} options - The plugin options. * @since 3.0.0 */ uninstall?(chart: Chart, args: EmptyObject, options: O): void; } export declare type ChartComponentLike = ChartComponent | ChartComponent[] | { [key: string]: ChartComponent }; /** * Please use the module's default export which provides a singleton instance * Note: class is exported for typedoc */ export interface Registry { readonly controllers: TypedRegistry<DatasetController>; readonly elements: TypedRegistry<Element>; readonly plugins: TypedRegistry<Plugin>; readonly scales: TypedRegistry<Scale>; add(...args: ChartComponentLike[]): void; remove(...args: ChartComponentLike[]): void; addControllers(...args: ChartComponentLike[]): void; addElements(...args: ChartComponentLike[]): void; addPlugins(...args: ChartComponentLike[]): void; addScales(...args: ChartComponentLike[]): void; getController(id: string): DatasetController | undefined; getElement(id: string): Element | undefined; getPlugin(id: string): Plugin | undefined; getScale(id: string): Scale | undefined; } export const registry: Registry; export interface Tick { value: number; label?: string | string[]; major?: boolean; } export interface CoreScaleOptions { /** * Controls the axis global visibility (visible when true, hidden when false). When display: 'auto', the axis is visible only if at least one associated dataset is visible. * @default true */ display: boolean | 'auto'; /** * Reverse the scale. * @default false */ reverse: boolean; /** * The weight used to sort the axis. Higher weights are further away from the chart area. * @default true */ weight: number; /** * Callback called before the update process starts. */ beforeUpdate(axis: Scale): void; /** * Callback that runs before dimensions are set. */ beforeSetDimensions(axis: Scale): void; /** * Callback that runs after dimensions are set. */ afterSetDimensions(axis: Scale): void; /** * Callback that runs before data limits are determined. */ beforeDataLimits(axis: Scale): void; /** * Callback that runs after data limits are determined. */ afterDataLimits(axis: Scale): void; /** * Callback that runs before ticks are created. */ beforeBuildTicks(axis: Scale): void; /** * Callback that runs after ticks are created. Useful for filtering ticks. */ afterBuildTicks(axis: Scale): void; /** * Callback that runs before ticks are converted into strings. */ beforeTickToLabelConversion(axis: Scale): void; /** * Callback that runs after ticks are converted into strings. */ afterTickToLabelConversion(axis: Scale): void; /** * Callback that runs before tick rotation is determined. */ beforeCalculateTickRotation(axis: Scale): void; /** * Callback that runs after tick rotation is determined. */ afterCalculateTickRotation(axis: Scale): void; /** * Callback that runs before the scale fits to the canvas. */ beforeFit(axis: Scale): void; /** * Callback that runs after the scale fits to the canvas. */ afterFit(axis: Scale): void; /** * Callback that runs at the end of the update process. */ afterUpdate(axis: Scale): void; } export interface Scale<O extends CoreScaleOptions = CoreScaleOptions> extends Element<{}, O>, LayoutItem { readonly id: string; readonly type: string; readonly ctx: CanvasRenderingContext2D; readonly chart: Chart; maxWidth: number; maxHeight: number; paddingTop: number; paddingBottom: number; paddingLeft: number; paddingRight: number; axis: string; labelRotation: number; min: number; max: number; ticks: Tick[]; getMatchingVisibleMetas(type?: string): ChartMeta[]; drawTitle(chartArea: ChartArea): void; drawLabels(chartArea: ChartArea): void; drawGrid(chartArea: ChartArea): void; /** * @param {number} pixel * @return {number} */ getDecimalForPixel(pixel: number): number; /** * Utility for getting the pixel location of a percentage of scale * The coordinate (0, 0) is at the upper-left corner of the canvas * @param {number} decimal * @return {number} */ getPixelForDecimal(decimal: number): number; /** * Returns the location of the tick at the given index * The coordinate (0, 0) is at the upper-left corner of the canvas * @param {number} index * @return {number} */ getPixelForTick(index: number): number; /** * Used to get the label to display in the tooltip for the given value * @param {*} value * @return {string} */ getLabelForValue(value: number): string; /** * Returns the location of the given data point. Value can either be an index or a numerical value * The coordinate (0, 0) is at the upper-left corner of the canvas * @param {*} value * @param {number} [index] * @return {number} */ getPixelForValue(value: number, index: number): number; /** * Used to get the data value from a given pixel. This is the inverse of getPixelForValue * The coordinate (0, 0) is at the upper-left corner of the canvas * @param {number} pixel * @return {*} */ getValueForPixel(pixel: number): number | undefined; getBaseValue(): number; /** * Returns the pixel for the minimum chart value * The coordinate (0, 0) is at the upper-left corner of the canvas * @return {number} */ getBasePixel(): number; init(options: O): void; parse(raw: unknown, index: number): unknown; getUserBounds(): { min: number; max: number; minDefined: boolean; maxDefined: boolean }; getMinMax(canStack: boolean): { min: number; max: number }; getTicks(): Tick[]; getLabels(): string[]; beforeUpdate(): void; configure(): void; afterUpdate(): void; beforeSetDimensions(): void; setDimensions(): void; afterSetDimensions(): void; beforeDataLimits(): void; determineDataLimits(): void; afterDataLimits(): void; beforeBuildTicks(): void; buildTicks(): Tick[]; afterBuildTicks(): void; beforeTickToLabelConversion(): void; generateTickLabels(ticks: Tick[]): void; afterTickToLabelConversion(): void; beforeCalculateLabelRotation(): void; calculateLabelRotation(): void; afterCalculateLabelRotation(): void; beforeFit(): void; fit(): void; afterFit(): void; isFullSize(): boolean; } export const Scale: { prototype: Scale; new <O extends CoreScaleOptions = CoreScaleOptions>(cfg: AnyObject): Scale<O>; }; export interface ScriptableScaleContext { chart: Chart; scale: Scale; index: number; tick: Tick; } export const Ticks: { formatters: { /** * Formatter for value labels * @param value the value to display * @return {string|string[]} the label to display */ values(value: unknown): string | string[]; /** * Formatter for numeric ticks * @param tickValue the value to be formatted * @param index the position of the tickValue parameter in the ticks array * @param ticks the list of ticks being converted * @return string representation of the tickValue parameter */ numeric(tickValue: number, index: number, ticks: { value: number }[]): string; /** * Formatter for logarithmic ticks * @param tickValue the value to be formatted * @param index the position of the tickValue parameter in the ticks array * @param ticks the list of ticks being converted * @return string representation of the tickValue parameter */ logarithmic(tickValue: number, index: number, ticks: { value: number }[]): string; }; }; export interface TypedRegistry<T> { /** * @param {ChartComponent} item * @returns {string} The scope where items defaults were registered to. */ register(item: ChartComponent): string; get(id: string): T | undefined; unregister(item: ChartComponent): void; } export interface ChartEvent { type: | 'contextmenu' | 'mouseenter' | 'mousedown' | 'mousemove' | 'mouseup' | 'mouseout' | 'click' | 'dblclick' | 'keydown' | 'keypress' | 'keyup' | 'resize'; native: Event | null; x: number | null; y: number | null; } export interface ChartComponent { id: string; defaults?: AnyObject; defaultRoutes?: { [property: string]: string }; beforeRegister?(): void; afterRegister?(): void; beforeUnregister?(): void; afterUnregister?(): void; } export interface CoreInteractionOptions { /** * Sets which elements appear in the tooltip. See Interaction Modes for details. * @default 'nearest' */ mode: InteractionMode; /** * if true, the hover mode only applies when the mouse position intersects an item on the chart. * @default true */ intersect: boolean; /** * Can be set to 'x', 'y', or 'xy' to define which directions are used in calculating distances. Defaults to 'x' for 'index' mode and 'xy' in dataset and 'nearest' modes. */ axis: 'x' | 'y' | 'xy'; } export interface HoverInteractionOptions extends CoreInteractionOptions { /** * Called when any of the events fire. Passed the event, an array of active elements (bars, points, etc), and the chart. */ onHover(event: ChartEvent, elements: ActiveElement[], chart: Chart): void; } export interface CoreChartOptions<TType extends ChartType> extends ParsingOptions, AnimationOptions<TType> { datasets: { [key in ChartType]: ChartTypeRegistry[key]['datasetOptions'] } /** * The base axis of the chart. 'x' for vertical charts and 'y' for horizontal charts. * @default 'x' */ indexAxis: 'x' | 'y'; /** * base color * @see Defaults.color */ color: Color; /** * base background color * @see Defaults.backgroundColor */ backgroundColor: Color; /** * base border color * @see Defaults.borderColor */ borderColor: Color; /** * base font * @see Defaults.font */ font: FontSpec; /** * Resizes the chart canvas when its container does (important note...). * @default true */ responsive: boolean; /** * Maintain the original canvas aspect ratio (width / height) when resizing. * @default true */ maintainAspectRatio: boolean; /** * Canvas aspect ratio (i.e. width / height, a value of 1 representing a square canvas). Note that this option is ignored if the height is explicitly defined either as attribute or via the style. * @default 2 */ aspectRatio: number; /** * Locale used for number formatting (using `Intl.NumberFormat`). * @default user's browser setting */ locale: string; /** * Called when a resize occurs. Gets passed two arguments: the chart instance and the new size. */ onResize(chart: Chart, size: { width: number; height: number }): void; /** * Override the window's default devicePixelRatio. * @default window.devicePixelRatio */ devicePixelRatio: number; interaction: CoreInteractionOptions; hover: HoverInteractionOptions; /** * The events option defines the browser events that the chart should listen to for tooltips and hovering. * @default ['mousemove', 'mouseout', 'click', 'touchstart', 'touchmove'] */ events: ('mousemove' | 'mouseout' | 'click' | 'touchstart' | 'touchmove')[]; /** * Called when any of the events fire. Passed the event, an array of active elements (bars, points, etc), and the chart. */ onHover(event: ChartEvent, elements: ActiveElement[], chart: Chart): void; /** * Called if the event is of type 'mouseup' or 'click'. Passed the event, an array of active elements, and the chart. */ onClick(event: ChartEvent, elements: ActiveElement[], chart: Chart): void; layout: { padding: Scriptable<number | ChartArea, ScriptableContext<TType>>; }; } export type EasingFunction = | 'linear' | 'easeInQuad' | 'easeOutQuad' | 'easeInOutQuad' | 'easeInCubic' | 'easeOutCubic' | 'easeInOutCubic' | 'easeInQuart' | 'easeOutQuart' | 'easeInOutQuart' | 'easeInQuint' | 'easeOutQuint' | 'easeInOutQuint' | 'easeInSine' | 'easeOutSine' | 'easeInOutSine' | 'easeInExpo' | 'easeOutExpo' | 'easeInOutExpo' | 'easeInCirc' | 'easeOutCirc' | 'easeInOutCirc' | 'easeInElastic' | 'easeOutElastic' | 'easeInOutElastic' | 'easeInBack' | 'easeOutBack' | 'easeInOutBack' | 'easeInBounce' | 'easeOutBounce' | 'easeInOutBounce'; export type AnimationSpec<TType extends ChartType> = { /** * The number of milliseconds an animation takes. * @default 1000 */ duration?: Scriptable<number, ScriptableContext<TType>>; /** * Easing function to use * @default 'easeOutQuart' */ easing?: Scriptable<EasingFunction, ScriptableContext<TType>>; /** * Delay before starting the animations. * @default 0 */ delay?: Scriptable<number, ScriptableContext<TType>>; /** * If set to true, the animations loop endlessly. * @default false */ loop?: Scriptable<boolean, ScriptableContext<TType>>; } export type AnimationsSpec<TType extends ChartType> = { [name: string]: false | AnimationSpec<TType> & { properties: string[]; /** * Type of property, determines the interpolator used. Possible values: 'number', 'color' and 'boolean'. Only really needed for 'color', because typeof does not get that right. */ type: 'color' | 'number' | 'boolean'; fn: <T>(from: T, to: T, factor: number) => T; /** * Start value for the animation. Current value is used when undefined */ from: Scriptable<Color | number | boolean, ScriptableContext<TType>>; /** * */ to: Scriptable<Color | number | boolean, ScriptableContext<TType>>; } } export type TransitionSpec<TType extends ChartType> = { animation: AnimationSpec<TType>; animations: AnimationsSpec<TType>; } export type TransitionsSpec<TType extends ChartType> = { [mode: string]: TransitionSpec<TType> } export type AnimationOptions<TType extends ChartType> = { animation: false | AnimationSpec<TType> & { /** * Callback called on each step of an animation. */ onProgress?: (this: Chart, event: AnimationEvent) => void; /** * Callback called when all animations are completed. */ onComplete?: (this: Chart, event: AnimationEvent) => void; }; animations: AnimationsSpec<TType>; transitions: TransitionsSpec<TType>; }; export interface FontSpec { /** * Default font family for all text, follows CSS font-family options. * @default "'Helvetica Neue', 'Helvetica', 'Arial', sans-serif" */ family: string; /** * Default font size (in px) for text. Does not apply to radialLinear scale point labels. * @default 12 */ size: number; /** * Default font style. Does not apply to tooltip title or footer. Does not apply to chart title. Follows CSS font-style options (i.e. normal, italic, oblique, initial, inherit) * @default 'normal' */ style: 'normal' | 'italic' | 'oblique' | 'initial' | 'inherit'; /** * Default font weight (boldness). (see MDN). */ weight: string | null; /** * Height of an individual line of text (see MDN). * @default 1.2 */ lineHeight: number | string; } export type TextAlign = 'left' | 'center' | 'right'; export interface VisualElement { draw(ctx: CanvasRenderingContext2D): void; inRange(mouseX: number, mouseY: number, useFinalPosition?: boolean): boolean; inXRange(mouseX: number, useFinalPosition?: boolean): boolean; inYRange(mouseY: number, useFinalPosition?: boolean): boolean; getCenterPoint(useFinalPosition?: boolean): { x: number; y: number }; getRange?(axis: 'x' | 'y'): number; } export interface CommonElementOptions { borderWidth: number; borderColor: Color; backgroundColor: Color; } export interface CommonHoverOptions { hoverBorderWidth: number; hoverBorderColor: Color; hoverBackgroundColor: Color; } export interface Segment { start: number; end: number; loop: boolean; } export interface ArcProps { x: number; y: number; startAngle: number; endAngle: number; innerRadius: number; outerRadius: number; circumference: number; } export interface ArcOptions extends CommonElementOptions { /** * Arc stroke alignment. */ borderAlign: 'center' | 'inner'; /** * Arc offset (in pixels). */ offset: number; } export interface ArcHoverOptions extends CommonHoverOptions { hoverOffset: number; } export interface ArcElement<T extends ArcProps = ArcProps, O extends ArcOptions = ArcOptions> extends Element<T, O>, VisualElement {} export const ArcElement: ChartComponent & { prototype: ArcElement; new (cfg: AnyObject): ArcElement; }; export interface LineProps {} export interface LineOptions extends CommonElementOptions { /** * Line cap style. See MDN. * @default 'butt' */ borderCapStyle: CanvasLineCap; /** * Line dash. See MDN. * @default [] */ borderDash: number[]; /** * Line dash offset. See MDN. * @default 0.0 */ borderDashOffset: number; /** * Line join style. See MDN. * @default 'miter' */ borderJoinStyle: CanvasLineJoin; /** * true to keep Bézier control inside the chart, false for no restriction. * @default true */ capBezierPoints: boolean; /** * Interpolation mode to apply. * @default 'default' */ cubicInterpolationMode: 'default' | 'monotone'; /** * Bézier curve tension (0 for no Bézier curves). * @default 0 */ tension: number; /** * true to show the line as a stepped line (tension will be ignored). * @default false */ stepped: 'before' | 'after' | 'middle' | boolean; } export interface LineHoverOptions extends CommonHoverOptions { hoverBorderCapStyle: CanvasLineCap; hoverBorderDash: number[]; hoverBorderDashOffset: number; hoverBorderJoinStyle: CanvasLineJoin; } export interface LineElement<T extends LineProps = LineProps, O extends LineOptions = LineOptions> extends Element<T, O>, VisualElement { updateControlPoints(chartArea: ChartArea): void; points: Point[]; readonly segments: Segment[]; first(): Point | false; last(): Point | false; interpolate(point: Point, property: 'x' | 'y'): undefined | Point | Point[]; pathSegment(ctx: CanvasRenderingContext2D, segment: Segment, params: AnyObject): undefined | boolean; path(ctx: CanvasRenderingContext2D): boolean; } export const LineElement: ChartComponent & { prototype: LineElement; new (cfg: AnyObject): LineElement; }; export interface PointProps { x: number; y: number; } export type PointStyle = | 'circle' | 'cross' | 'crossRot' | 'dash' | 'line' | 'rect' | 'rectRounded' | 'rectRot' | 'star' | 'triangle' | HTMLImageElement | HTMLCanvasElement; export interface PointOptions extends CommonElementOptions { /** * Point radius * @default 3 */ radius: number; /** * Extra radius added to point radius for hit detection. * @default 1 */ hitRadius: number; /** * Point style * @default 'circle; */ pointStyle: PointStyle; /** * Point rotation (in degrees). * @default 0 */ rotation: number; } export interface PointHoverOptions extends CommonHoverOptions { /** * Point radius when hovered. * @default 4 */ hoverRadius: number; } export interface PointPrefixedOptions { /** * The fill color for points. */ pointBackgroundColor: Color; /** * The border color for points. */ pointBorderColor: Color; /** * The width of the point border in pixels. */ pointBorderWidth: number; /** * The pixel size of the non-displayed point that reacts to mouse events. */ pointHitRadius: number; /** * The radius of the point shape. If set to 0, the point is not rendered. */ pointRadius: number; /** * The rotation of the point in degrees. */ pointRotation: number; /** * Style of the point. */ pointStyle: PointStyle; } export interface PointPrefixedHoverOptions { /** * Point background color when hovered. */ pointHoverBackgroundColor: Color; /** * Point border color when hovered. */ pointHoverBorderColor: Color; /** * Border width of point when hovered. */ pointHoverBorderWidth: number; /** * The radius of the point when hovered. */ pointHoverRadius: number; } export interface PointElement<T extends PointProps = PointProps, O extends PointOptions = PointOptions> extends Element<T, O>, VisualElement { readonly skip: boolean; } export const PointElement: ChartComponent & { prototype: PointElement; new (cfg: AnyObject): PointElement; }; export interface BarProps { x: number; y: number; base: number; horizontal: boolean; width: number; height: number; } export interface BarOptions extends CommonElementOptions { /** * The base value for the bar in data units along the value axis. */ base: number; /** * Skipped (excluded) border: 'start', 'end', 'left', 'right', 'bottom', 'top' or false (none). * @default 'start' */ borderSkipped: 'start' | 'end' | 'left' | 'right' | 'bottom' | 'top' | false; /** * Border radius * @default 0 */ borderRadius: number | BorderRadius; } export interface BorderRadius { topLeft: number; topRight: number; bottomLeft: number; bottomRight: number; } export interface BarHoverOptions extends CommonHoverOptions { hoverBorderRadius: number | BorderRadius; } export interface BarElement< T extends BarProps = BarProps, O extends BarOptions = BarOptions > extends Element<T, O>, VisualElement {} export const BarElement: ChartComponent & { prototype: BarElement; new (cfg: AnyObject): BarElement; }; export interface ElementOptionsByType { arc: ArcOptions & ArcHoverOptions; bar: BarOptions & BarHoverOptions; line: LineOptions & LineHoverOptions; point: PointOptions & PointHoverOptions; } export interface ElementChartOptions { elements: Partial<ElementOptionsByType>; } export class Bas
/** * Called at chart construction time, returns a context2d instance implementing * the [W3C Canvas 2D Context API standard]{@link https://www.w3.org/TR/2dcontext/}. * @param {HTMLCanvasElement} canvas - The canvas from which to acquire context (platform specific) * @param options - The chart options */ acquireContext( canvas: HTMLCanvasElement, options?: CanvasRenderingContext2DSettings ): CanvasRenderingContext2D | null; /** * Called at chart destruction time, releases any resources associated to the context * previously returned by the acquireContext() method. * @param {CanvasRenderingContext2D} context - The context2d instance * @returns {boolean} true if the method succeeded, else false */ releaseContext(context: CanvasRenderingContext2D): boolean; /** * Registers the specified listener on the given chart. * @param {Chart} chart - Chart from which to listen for event * @param {string} type - The ({@link ChartEvent}) type to listen for * @param listener - Receives a notification (an object that implements * the {@link ChartEvent} interface) when an event of the specified type occurs. */ addEventListener(chart: Chart, type: string, listener: (e: ChartEvent) => void): void; /** * Removes the specified listener previously registered with addEventListener. * @param {Chart} chart - Chart from which to remove the listener * @param {string} type - The ({@link ChartEvent}) type to remove * @param listener - The listener function to remove from the event target. */ removeEventListener(chart: Chart, type: string, listener: (e: ChartEvent) => void): void; /** * @returns {number} the current devicePixelRatio of the device this platform is connected to. */ getDevicePixelRatio(): number; /** * @param {HTMLCanvasElement} canvas - The canvas for which to calculate the maximum size * @param {number} [width] - Parent element's content width * @param {number} [height] - Parent element's content height * @param {number} [aspectRatio] - The aspect ratio to maintain * @returns { width: number, height: number } the maximum size available. */ getMaximumSize(canvas: HTMLCanvasElement, width?: number, height?: number, aspectRatio?: number): { width: number, height: number }; /** * @param {HTMLCanvasElement} canvas * @returns {boolean} true if the canvas is attached to the platform, false if not. */ isAttached(canvas: HTMLCanvasElement): boolean; } export class BasicPlatform extends BasePlatform {} export class DomPlatform extends BasePlatform {} export declare enum DecimationAlgorithm { lttb = 'lttb', minmax = 'min-max', } interface BaseDecimationOptions { enabled: boolean; } interface LttbDecimationOptions extends BaseDecimationOptions { algorithm: DecimationAlgorithm.lttb; samples?: number; } interface MinMaxDecimationOptions extends BaseDecimationOptions { algorithm: DecimationAlgorithm.minmax; } export type DecimationOptions = LttbDecimationOptions | MinMaxDecimationOptions; export const Filler: Plugin; export interface FillerOptions { propagate: boolean; } export type FillTarget = number | string | { value: number } | 'start' | 'end' | 'origin' | 'stack' | false; export interface ComplexFillTarget { /** * The accepted values are the same as the filling mode values, so you may use absolute and relative dataset indexes and/or boundaries. */ target: FillTarget; /** * If no color is set, the default color will be the background color of the chart. */ above: Color; /** * Same as the above. */ below: Color; } export interface FillerControllerDatasetOptions { /** * Both line and radar charts support a fill option on the dataset object which can be used to create area between two datasets or a dataset and a boundary, i.e. the scale origin, start or end */ fill: FillTarget | ComplexFillTarget; } export const Legend: Plugin; export interface LegendItem { /** * Label that will be displayed */ text: string; /** * Index of the associated dataset */ datasetIndex: number; /** * Fill style of the legend box */ fillStyle?: Color; /** * If true, this item represents a hidden dataset. Label will be rendered with a strike-through effect */ hidden?: boolean; /** * For box border. * @see https://developer.mozilla.org/en/docs/Web/API/CanvasRenderingContext2D/lineCap */ lineCap?: CanvasLineCap; /** * For box border. * @see https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/setLineDash */ lineDash?: number[]; /** * For box border. * @see https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/lineDashOffset */ lineDashOffset?: number; /** * For box border. * @see https://developer.mozilla.org/en-US/docs/Web/API/CanvasRenderingContext2D/lineJoin */ lineJoin?: CanvasLineJoin; /** * Width of box border */ lineWidth?: number; /** * Stroke style of the legend box */ strokeStyle?: Color; /** * Point style of the legend box (only used if usePointStyle is true) */ pointStyle?: PointStyle; /** * Rotation of the point in degrees (only used if usePointStyle is true) */ rotation?: number; } export interface LegendElement extends Element, LayoutItem {} export interface LegendOptions { /** * Is the legend shown? * @default true */ display: boolean; /** * Position of the legend. * @default 'top' */ position: LayoutPosition; /** * Alignment of the legend. * @default 'center' */ align: TextAlign; /** * Marks that this box should take the full width/height of the canvas (moving other boxes). This is unlikely to need to be changed in day-to-day use. * @default true */ fullSize: boolean; /** * Legend will show datasets in reverse order. * @default false */ reverse: boolean; /** * A callback that is called when a click event is registered on a label item. */ onClick(this: LegendElement, e: ChartEvent, legendItem: LegendItem, legend: LegendElement): void; /** * A callback that is called when a 'mousemove' event is registered on top of a label item */ onHover(this: LegendElement, e: ChartEvent, legendItem: LegendItem, legend: LegendElement): void; /** * A callback that is called when a 'mousemove' event is registered outside of a previously hovered label item. */ onLeave(this: LegendElement, e: ChartEvent, legendItem: LegendItem, legend: LegendElement): void; labels: { /** * Width of colored box. * @default 40 */ boxWidth: number; /** * Height of the coloured box. * @default fontSize */ boxHeight: number; /** * Color of label * @see Defaults.color */ color: Color; /** * Font of label * @see Defaults.font */ font: FontSpec; /** * Padding between rows of colored boxes. * @default 10 */ padding: number; /** * Generates legend items for each thing in the legend. Default implementation returns the text + styling for the color box. See Legend Item for details. */ generateLabels(chart: Chart): LegendItem[]; /** * Filters legend items out of the legend. Receives 2 parameters, a Legend Item and the chart data */ filter(item: LegendItem, data: ChartData): boolean; /** * Sorts the legend items */ sort(a: LegendItem, b: LegendItem, data: ChartData): number; /** * Override point style for the legend. Only applies if usePointStyle is true */ pointStyle: PointStyle; /** * Label style will match corresponding point style (size is based on the minimum value between boxWidth and font.size). * @default false */ usePointStyle: boolean; }; title: { /** * Is the legend title displayed. * @default false */ display: boolean; /** * Color of title * @see Defaults.color */ color: Color; /** * see Fonts */ font: FontSpec; position: 'center' | 'start' | 'end'; padding?: number | ChartArea; /** * The string title. */ text: string; }; } export const Title: Plugin; export interface TitleOptions { /** * Alignment of the title. * @default 'center' */ align: 'start' | 'center' | 'end'; /** * Is the title shown? * @default false */ display: boolean; /** * Position of title * @default 'top' */ position: 'top' | 'left' | 'bottom' | 'right'; /** * Color of text * @see Defaults.color */ color: Color; font: FontSpec; /** * Marks that this box should take the full width/height of the canvas (moving other boxes). If set to `false`, places the box above/beside the * chart area * @default true */ fullSize: boolean; /** * Adds padding above and below the title text if a single number is specified. It is also possible to change top and bottom padding separately. */ padding: number | { top: number; bottom: number }; /** * Title text to display. If specified as an array, text is rendered on multiple lines. */ text: string | string[]; } export type TooltipAlignment = 'start' | 'center' | 'end'; export interface TooltipModel<TType extends ChartType> { // The items that we are rendering in the tooltip. See Tooltip Item Interface section dataPoints: TooltipItem<TType>[]; // Positioning xAlign: TooltipAlignment; yAlign: TooltipAlignment; // X and Y properties are the top left of the tooltip x: number; y: number; width: number; height: number; // Where the tooltip points to caretX: number; caretY: number; // Body // The body lines that need to be rendered // Each object contains 3 parameters // before: string[] // lines of text before the line with the color square // lines: string[]; // lines of text to render as the main item with color square // after: string[]; // lines of text to render after the main lines body: { before: string[]; lines: string[]; after: string[] }[]; // lines of text that appear after the title but before the body beforeBody: string[]; // line of text that appear after the body and before the footer afterBody: string[]; // Title // lines of text that form the title title: string[]; // Footer // lines of text that form the footer footer: string[]; // colors to render for each item in body[]. This is the color of the squares in the tooltip labelColors: Color[]; labelTextColors: Color[]; labelPointStyles: { pointStyle: PointStyle; rotation: number }[]; // 0 opacity is a hidden tooltip opacity: number; // tooltip options options: TooltipOptions<TType>; } export const Tooltip: Plugin & { readonly positioners: { [key: string]: (items: readonly Element[], eventPosition: { x: number; y: number }) => { x: number; y: number }; }; getActiveElements(): ActiveElement[]; setActiveElements(active: ActiveDataPoint[], eventPosition: { x: number, y: number }): void; }; export interface TooltipCallbacks< TType extends ChartType, Model = TooltipModel<TType>, Item = TooltipItem<TType>> { beforeTitle(this: Model, tooltipItems: Item[]): string | string[]; title(this: Model, tooltipItems: Item[]): string | string[]; afterTitle(this: Model, tooltipItems: Item[]): string | string[]; beforeBody(this: Model, tooltipItems: Item[]): string | string[]; afterBody(this: Model, tooltipItems: Item[]): string | string[]; beforeLabel(this: Model, tooltipItem: Item): string | string[]; label(this: Model, tooltipItem: Item): string | string[]; afterLabel(this: Model, tooltipItem: Item): string | string[]; labelColor(this: Model, tooltipItem: Item): { borderColor: Color; backgroundColor: Color }; labelTextColor(this: Model, tooltipItem: Item): Color; labelPointStyle(this: Model, tooltipItem: Item): { pointStyle: PointStyle; rotation: number }; beforeFooter(this: Model, tooltipItems: Item[]): string | string[]; footer(this: Model, tooltipItems: Item[]): string | string[]; afterFooter(this: Model, tooltipItems: Item[]): string | string[]; } export interface ExtendedPlugin< TType extends ChartType, O = AnyObject, Model = TooltipModel<TType>> { /** * @desc Called before drawing the `tooltip`. If any plugin returns `false`, * the tooltip drawing is cancelled until another `render` is triggered. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {Tooltip} args.tooltip - The tooltip. * @param {object} options - The plugin options. * @returns {boolean} `false` to cancel the chart tooltip drawing. */ beforeTooltipDraw?(chart: Chart, args: { tooltip: Model }, options: O): boolean | void; /** * @desc Called after drawing the `tooltip`. Note that this hook will not * be called if the tooltip drawing has been previously cancelled. * @param {Chart} chart - The chart instance. * @param {object} args - The call arguments. * @param {Tooltip} args.tooltip - The tooltip. * @param {object} options - The plugin options. */ afterTooltipDraw?(chart: Chart, args: { tooltip: Model }, options: O): void; } export interface ScriptableTooltipContext<TType extends ChartType> { chart: UnionToIntersection<Chart<TType>>; tooltip: UnionToIntersection<TooltipModel<TType>>; tooltipItems: TooltipItem<TType>[]; } export interface TooltipOptions<TType extends ChartType> extends CoreInteractionOptions { /** * Are on-canvas tooltips enabled? * @default true */ enabled: Scriptable<boolean, ScriptableTooltipContext<TType>>; /** * See custom tooltip section. */ custom(this: TooltipModel<TType>, args: { chart: Chart; tooltip: TooltipModel<TType> }): void; /** * The mode for positioning the tooltip */ position: Scriptable<'average' | 'nearest', ScriptableTooltipContext<TType>> /** * Override the tooltip alignment calculations */ xAlign: Scriptable<TooltipAlignment, ScriptableTooltipContext<TType>>; yAlign: Scriptable<TooltipAlignment, ScriptableTooltipContext<TType>>; /** * Sort tooltip items. */ itemSort: (a: TooltipItem<ChartType>, b: TooltipItem<ChartType>) => number; filter: (e: TooltipItem<ChartType>) => boolean; /** * Background color of the tooltip. * @default 'rgba(0, 0, 0, 0.8)' */ backgroundColor: Scriptable<Color, ScriptableTooltipContext<TType>>; /** * Color of title * @default '#fff' */ titleColor: Scriptable<Color, ScriptableTooltipContext<TType>>; /** * See Fonts * @default {style: 'bold'} */ titleFont: Scriptable<FontSpec, ScriptableTooltipContext<TType>>; /** * Spacing to add to top and bottom of each title line. * @default 2 */ titleSpacing: Scriptable<number, ScriptableTooltipContext<TType>>; /** * Margin to add on bottom of title section. * @default 6 */ titleMarginBottom: Scriptable<number, ScriptableTooltipContext<TType>>; /** * Horizontal alignment of the title text lines. * @default 'left' */ titleAlign: Scriptable<TextAlign, ScriptableTooltipContext<TType>>; /** * Spacing to add to top and bottom of each tooltip item. * @default 2 */ bodySpacing: Scriptable<number, ScriptableTooltipContext<TType>>; /** * Color of body * @default '#fff' */ bodyColor: Scriptable<Color, ScriptableTooltipContext<TType>>; /** * See Fonts. * @default {} */ bodyFont: Scriptable<FontSpec, ScriptableTooltipContext<TType>>; /** * Horizontal alignment of the body text lines. * @default 'left' */ bodyAlign: Scriptable<TextAlign, ScriptableTooltipContext<TType>>; /** * Spacing to add to top and bottom of each footer line. * @default 2 */ footerSpacing: Scriptable<number, ScriptableTooltipContext<TType>>; /** * Margin to add before drawing the footer. * @default 6 */ footerMarginTop: Scriptable<number, ScriptableTooltipContext<TType>>; /** * Color of footer * @default '#fff' */ footerColor: Scriptable<Color, ScriptableTooltipContext<TType>>; /** * See Fonts * @default {style: 'bold'} */ footerFont: Scriptable<FontSpec, ScriptableTooltipContext<TType>>; /** * Horizontal alignment of the footer text lines. * @default 'left' */ footerAlign: Scriptable<TextAlign, ScriptableTooltipContext<TType>>; /** * Padding to add to the tooltip * @default 6 */ padding: Scriptable<number | ChartArea, ScriptableTooltipContext<TType>>; /** * Extra distance to move the end of the tooltip arrow away from the tooltip point. * @default 2 */ caretPadding: Scriptable<number, ScriptableTooltipContext<TType>>; /** * Size, in px, of the tooltip arrow. * @default 5 */ caretSize: Scriptable<number, ScriptableTooltipContext<TType>>; /** * Radius of tooltip corner curves. * @default 6 */ cornerRadius: Scriptable<number, ScriptableTooltipContext<TType>>; /** * Color to draw behind the colored boxes when multiple items are in the tooltip. * @default '#fff' */ multiKeyBackground: Scriptable<Color, ScriptableTooltipContext<TType>>; /** * If true, color boxes are shown in the tooltip. * @default true */ displayColors: Scriptable<boolean, ScriptableTooltipContext<TType>>; /** * Width of the color box if displayColors is true. * @default bodyFont.size */ boxWidth: Scriptable<number, ScriptableTooltipContext<TType>>; /** * Height of the color box if displayColors is true. * @default bodyFont.size */ boxHeight: Scriptable<number, ScriptableTooltipContext<TType>>; /** * Use the corresponding point style (from dataset options) instead of color boxes, ex: star, triangle etc. (size is based on the minimum value between boxWidth and boxHeight) * @default false */ usePointStyle: Scriptable<boolean, ScriptableTooltipContext<TType>>; /** * Color of the border. * @default 'rgba(0, 0, 0, 0)' */ borderColor: Scriptable<Color, ScriptableTooltipContext<TType>>; /** * Size of the border. * @default 0 */ borderWidth: Scriptable<number, ScriptableTooltipContext<TType>>; /** * true for rendering the legends from right to left. */ rtl: Scriptable<boolean, ScriptableTooltipContext<TType>>; /** * This will force the text direction 'rtl' or 'ltr on the canvas for rendering the tooltips, regardless of the css specified on the canvas * @default canvas's default */ textDirection: Scriptable<string, ScriptableTooltipContext<TType>>; animation: AnimationSpec<TType>; animations: AnimationsSpec<TType>; callbacks: TooltipCallbacks<TType>; } export interface TooltipItem<TType extends ChartType> { /** * The chart the tooltip is being shown on */ chart: Chart; /** * Label for the tooltip */ label: string; /** * Parsed data values for the given `dataIndex` and `datasetIndex` */ parsed: UnionToIntersection<ParsedDataType<TType>>; /** * Raw data values for the given `dataIndex` and `datasetIndex` */ raw: unknown; /** * Formatted value for the tooltip */ formattedValue: string; /** * The dataset the item comes from */ dataset: ChartDataset; /** * Index of the dataset the item comes from */ datasetIndex: number; /** * Index of this data item in the dataset */ dataIndex: number; /** * The chart element (point, arc, bar, etc.) for this tooltip item */ element: Element; } export interface PluginOptionsByType<TType extends ChartType> { decimation: DecimationOptions; filler: FillerOptions; legend: LegendOptions; title: TitleOptions; tooltip: TooltipOptions<TType>; } export interface PluginChartOptions<TType extends ChartType> { plugins: Partial<PluginOptionsByType<TType>>; } export interface GridLineOptions { /** * @default true */ display: boolean; borderColor: Color; borderWidth: number; /** * @default false */ circular: boolean; /** * @default 'rgba(0, 0, 0, 0.1)' */ color: Scriptable<Color, ScriptableScaleContext> | readonly Color[]; /** * @default [] */ borderDash: number[]; /** * @default 0 */ borderDashOffset: Scriptable<number, ScriptableScaleContext>; /** * @default 1 */ lineWidth: Scriptable<number, ScriptableScaleContext> | readonly number[]; /** * @default true */ drawBorder: boolean; /** * @default true */ drawOnChartArea: boolean; /** * @default true */ drawTicks: boolean; /** * @default [] */ tickBorderDash: number[]; /** * @default 0 */ tickBorderDashOffset: Scriptable<number, ScriptableScaleContext>; /** * @default 'rgba(0, 0, 0, 0.1)' */ tickColor: Scriptable<Color, ScriptableScaleContext> | readonly Color[]; /** * @default 10 */ tickLength: number; /** * @default 1 */ tickWidth: number; /** * @default false */ offsetGridLines: boolean; } export interface TickOptions { /** * Returns the string representation of the tick value as it should be displayed on the chart. See callback. */ callback: (tickValue: number | string, index: number, ticks: Tick[]) => string; /** * If true, show tick labels. * @default true */ display: boolean; /** * Color of tick * @see Defaults.color */ color: Scriptable<Color, ScriptableScaleContext>; /** * see Fonts */ font: Scriptable<FontSpec, ScriptableScaleContext>; /** * Sets the offset of the tick labels from the axis */ padding: number; /** * The color of the stroke around the text. * @default undefined */ textStrokeColor: Scriptable<Color, ScriptableScaleContext>; /** * Stroke width around the text. * @default 0 */ textStrokeWidth: Scriptable<number, ScriptableScaleContext>; /** * z-index of tick layer. Useful when ticks are drawn on chart area. Values <= 0 are drawn under datasets, > 0 on top. * @default 0 */ z: number; major: { /** * If true, major ticks are generated. A major tick will affect autoskipping and major will be defined on ticks in the scriptable options context. * @default false */ enabled: boolean; }; } export interface CartesianScaleOptions extends CoreScaleOptions { /** * Position of the axis. */ position: 'left' | 'top' | 'right' | 'bottom' | 'center' | { [scale: string]: number }; /** * Which type of axis this is. Possible values are: 'x', 'y'. If not set, this is inferred from the first character of the ID which should be 'x' or 'y'. */ axis: 'x' | 'y'; /** * User defined minimum value for the scale, overrides minimum value from data. */ min: number; /** * User defined maximum value for the scale, overrides maximum value from data. */ max: number; /** * If true, extra space is added to the both edges and the axis is scaled to fit into the chart area. This is set to true for a bar chart by default. * @default false */ offset: boolean; gridLines: GridLineOptions; title: { display: boolean; text: string | string[]; color: Color; font: FontSpec; padding: { top: number; bottom: number; }; }; /** * If true, data will be comprised between datasets of data * @default false */ stacked?: boolean; ticks: TickOptions & { /** * The number of ticks to examine when deciding how many labels will fit. Setting a smaller value will be faster, but may be less accurate when there is large variability in label length. * @default ticks.length */ sampleSize: number; /** * The label alignment * @default 'center' */ align: 'start' | 'center' | 'end'; /** * If true, automatically calculates how many labels can be shown and hides labels accordingly. Labels will be rotated up to maxRotation before skipping any. Turn autoSkip off to show all labels no matter what. * @default true */ autoSkip: boolean; /** * Padding between the ticks on the horizontal axis when autoSkip is enabled. * @default 0 */ autoSkipPadding: number; /** * How is the label positioned perpendicular to the axis direction. * This only applies when the rotation is 0 and the axis position is one of "top", "left", "right", or "bottom" * @default 'near' */ crossAlign: 'near' | 'center' | 'far'; /** * Distance in pixels to offset the label from the centre point of the tick (in the x direction for the x axis, and the y direction for the y axis). Note: this can cause labels at the edges to be cropped by the edge of the canvas * @default 0 */ labelOffset: number; /** * Minimum rotation for tick labels. Note: Only applicable to horizontal scales. * @default 0 */ minRotation: number; /** * Maximum rotation for tick labels when rotating to condense labels. Note: Rotation doesn't occur until necessary. Note: Only applicable to horizontal scales. * @default 50 */ maxRotation: number; /** * Flips tick labels around axis, displaying the labels inside the chart instead of outside. Note: Only applicable to vertical scales. * @default false */ mirror: boolean; /** * Padding between the tick label and the axis. When set on a vertical axis, this applies in the horizontal (X) direction. When set on a horizontal axis, this applies in the vertical (Y) direction. * @default 0 */ padding: number; }; } export type CategoryScaleOptions = CartesianScaleOptions & { min: string | number; max: string | number; labels: string[] | string[][]; }; export type CategoryScale<O extends CategoryScaleOptions = CategoryScaleOptions> = Scale<O> export const CategoryScale: ChartComponent & { prototype: CategoryScale; new <O extends CategoryScaleOptions = CategoryScaleOptions>(cfg: AnyObject): CategoryScale<O>; }; export type LinearScaleOptions = CartesianScaleOptions & { /** * if true, scale will include 0 if it is not already included. * @default true */ beginAtZero: boolean; /** * Adjustment used when calculating the maximum data value. * @see https://www.chartjs.org/docs/next/axes/cartesian/linear#axis-range-settings */ suggestedMin?: number; /** * Adjustment used when calculating the minimum data value. * @see https://www.chartjs.org/docs/next/axes/cartesian/linear#axis-range-settings */ suggestedMax?: number; ticks: { /** * The Intl.NumberFormat options used by the default label formatter */ format: Intl.NumberFormatOptions; /** * Maximum number of ticks and gridlines to show. * @default 11 */ maxTicksLimit: number; /** * if defined and stepSize is not specified, the step size will be rounded to this many decimal places. */ precision: number; /** * User defined fixed step size for the scale * @see https://www.chartjs.org/docs/next/axes/cartesian/linear#step-size */ stepSize: number; }; }; export type LinearScale<O extends LinearScaleOptions = LinearScaleOptions> = Scale<O> export const LinearScale: ChartComponent & { prototype: LinearScale; new <O extends LinearScaleOptions = LinearScaleOptions>(cfg: AnyObject): LinearScale<O>; }; export type LogarithmicScaleOptions = CartesianScaleOptions & { /** * Adjustment used when calculating the maximum data value. * @see https://www.chartjs.org/docs/next/axes/cartesian/linear#axis-range-settings */ suggestedMin?: number; /** * Adjustment used when calculating the minimum data value. * @see https://www.chartjs.org/docs/next/axes/cartesian/linear#axis-range-settings */ suggestedMax?: number; ticks: { /** * The Intl.NumberFormat options used by the default label formatter */ format: Intl.NumberFormatOptions; }; }; export type LogarithmicScale<O extends LogarithmicScaleOptions = LogarithmicScaleOptions> = Scale<O> export const LogarithmicScale: ChartComponent & { prototype: LogarithmicScale; new <O extends LogarithmicScaleOptions = LogarithmicScaleOptions>(cfg: AnyObject): LogarithmicScale<O>; }; export type TimeScaleOptions = CartesianScaleOptions & { /** * Scale boundary strategy (bypassed by min/max time options) * - `data`: make sure data are fully visible, ticks outside are removed * - `ticks`: make sure ticks are fully visible, data outside are truncated * @see https://www.chartjs.org/docs/next/axes/cartesian/time#scale-bounds * @since 2.7.0 * @default 'data' */ bounds: 'ticks' | 'data'; /** * options for creating a new adapter instance */ adapters: { date: unknown; }; time: { /** * Custom parser for dates. * @see https://www.chartjs.org/docs/next/axes/cartesian/time#parser */ parser: string | ((v: unknown) => number); /** * If defined, dates will be rounded to the start of this unit. See Time Units below for the allowed units. */ round: false | TimeUnit; /** * If boolean and true and the unit is set to 'week', then the first day of the week will be Monday. Otherwise, it will be Sunday. * If `number`, the index of the first day of the week (0 - Sunday, 6 - Saturday). * @default false */ isoWeekday: false | number; /** * Sets how different time units are displayed. * @see https://www.chartjs.org/docs/next/axes/cartesian/time#display-formats */ displayFormats: { [key: string]: string; }; /** * The format string to use for the tooltip. */ tooltipFormat: string; /** * If defined, will force the unit to be a certain type. See Time Units section below for details. * @default false */ unit: false | TimeUnit; /** * The number of units between grid lines. * @default 1 */ stepSize: number; /** * The minimum display format to be used for a time unit. * @default 'millisecond' */ minUnit: TimeUnit; }; ticks: { /** * Ticks generation input values: * - 'auto': generates "optimal" ticks based on scale size and time options. * - 'data': generates ticks from data (including labels from data {t|x|y} objects). * - 'labels': generates ticks from user given `data.labels` values ONLY. * @see https://github.com/chartjs/Chart.js/pull/4507 * @since 2.7.0 * @default 'auto' * @see https://www.chartjs.org/docs/next/axes/cartesian/time#ticks-source */ source: 'labels' | 'auto' | 'data'; }; }; export interface TimeScale<O extends TimeScaleOptions = TimeScaleOptions> extends Scale<O> { getDataTimestamps(): number[]; getLabelTimestamps(): string[]; normalize(values: number[]): number[]; } export const TimeScale: ChartComponent & { prototype: TimeScale; new <O extends TimeScaleOptions = TimeScaleOptions>(cfg: AnyObject): TimeScale<O>; }; export type TimeSeriesScale<O extends TimeScaleOptions = TimeScaleOptions> = TimeScale<O> export const TimeSeriesScale: ChartComponent & { prototype: TimeSeriesScale; new <O extends TimeScaleOptions = TimeScaleOptions>(cfg: AnyObject): TimeSeriesScale<O>; }; export type RadialLinearScaleOptions = CoreScaleOptions & { animate: boolean; angleLines: { /** * if true, angle lines are shown. * @default true */ display: boolean; /** * Color of angled lines. * @default 'rgba(0, 0, 0, 0.1)' */ color: Scriptable<Color, ScriptableScaleContext>; /** * Width of angled lines. * @default 1 */ lineWidth: Scriptable<number, ScriptableScaleContext>; /** * Length and spacing of dashes on angled lines. See MDN. * @default [] */ borderDash: Scriptable<number[], ScriptableScaleContext>; /** * Offset for line dashes. See MDN. * @default 0 */ borderDashOffset: Scriptable<number, ScriptableScaleContext>; }; /** * if true, scale will include 0 if it is not already included. * @default false */ beginAtZero: boolean; gridLines: GridLineOptions; /** * User defined minimum number for the scale, overrides minimum value from data. */ min: number; /** * User defined maximum number for the scale, overrides maximum value from data. */ max: number; pointLabels: { /** * if true, point labels are shown. * @default true */ display: boolean; /** * Color of label * @see Defaults.color */ color: Scriptable<Color, ScriptableScaleContext>; /** * @see https://www.chartjs.org/docs/next/axes/general/fonts.md */ font: Scriptable<FontSpec, ScriptableScaleContext>; /** * Callback function to transform data labels to point labels. The default implementation simply returns the current string. * @default true */ callback: (label: string) => string; }; /** * Adjustment used when calculating the maximum data value. */ suggestedMax: number; /** * Adjustment used when calculating the minimum data value. */ suggestedMin: number; ticks: TickOptions & { /** * Color of label backdrops. * @default 'rgba(255, 255, 255, 0.75)' */ backdropColor: Scriptable<Color, ScriptableScaleContext>; /** * Horizontal padding of label backdrop. * @default 2 */ backdropPaddingX: number; /** * Vertical padding of label backdrop. * @default 2 */ backdropPaddingY: number; /** * The Intl.NumberFormat options used by the default label formatter */ format: Intl.NumberFormatOptions; /** * Maximum number of ticks and gridlines to show. * @default 11 */ maxTicksLimit: number; /** * if defined and stepSize is not specified, the step size will be rounded to this many decimal places. */ precision: number; /** * User defined fixed step size for the scale. */ stepSize: number; /** * If true, draw a background behind the tick labels. * @default true */ showLabelBackdrop: Scriptable<boolean, ScriptableScaleContext>; }; }; export interface RadialLinearScale<O extends RadialLinearScaleOptions = RadialLinearScaleOptions> extends Scale<O> { setCenterPoint(leftMovement: number, rightMovement: number, topMovement: number, bottomMovement: number): void; getIndexAngle(index: number): number; getDistanceFromCenterForValue(value: number): number; getValueForDistanceFromCenter(distance: number): number; getPointPosition(index: number, distanceFromCenter: number): { x: number; y: number; angle: number }; getPointPositionForValue(index: number, value: number): { x: number; y: number; angle: number }; getPointLabelPosition(index: number): ChartArea; getBasePosition(index: number): { x: number; y: number; angle: number }; } export const RadialLinearScale: ChartComponent & { prototype: RadialLinearScale; new <O extends RadialLinearScaleOptions = RadialLinearScaleOptions>(cfg: AnyObject): RadialLinearScale<O>; }; export interface CartesianScaleTypeRegistry { linear: { options: LinearScaleOptions; }; logarithmic: { options: LogarithmicScaleOptions; }; category: { options: CategoryScaleOptions; }; time: { options: TimeScaleOptions; }; timeseries: { options: TimeScaleOptions; }; } export interface RadialScaleTypeRegistry { radialLinear: { options: RadialLinearScaleOptions; }; } export interface ScaleTypeRegistry extends CartesianScaleTypeRegistry, RadialScaleTypeRegistry { } export type ScaleType = keyof ScaleTypeRegistry; interface CartesianParsedData { x: number; y: number; // Only specified when stacked bars are enabled _stacks?: { // Key is the stack ID which is generally the axis ID [key: string]: { // Inner key is the datasetIndex [key: number]: number; } } } interface BarParsedData extends CartesianParsedData { // Only specified if floating bars are show _custom?: { barStart: number; barEnd: number; start: number; end: number; min: number; max: number; } } interface BubbleParsedData extends CartesianParsedData { // The bubble radius value _custom: number; } interface RadialParsedData { r: number; } export interface ChartTypeRegistry { bar: { chartOptions: BarControllerChartOptions; datasetOptions: BarControllerDatasetOptions; defaultDataPoint: number; parsedDataType: BarParsedData, scales: keyof CartesianScaleTypeRegistry; }; line: { chartOptions: LineControllerChartOptions; datasetOptions: LineControllerDatasetOptions & FillerControllerDatasetOptions; defaultDataPoint: ScatterDataPoint; parsedDataType: CartesianParsedData; scales: keyof CartesianScaleTypeRegistry; }; scatter: { chartOptions: ScatterControllerChartOptions; datasetOptions: ScatterControllerDatasetOptions; defaultDataPoint: ScatterDataPoint; parsedDataType: CartesianParsedData; scales: keyof CartesianScaleTypeRegistry; }; bubble: { chartOptions: EmptyObject; datasetOptions: BubbleControllerDatasetOptions; defaultDataPoint: BubbleDataPoint; parsedDataType: BubbleParsedData; scales: keyof CartesianScaleTypeRegistry; }; pie: { chartOptions: PieControllerChartOptions; datasetOptions: PieControllerDatasetOptions; defaultDataPoint: PieDataPoint; parsedDataType: number; scales: keyof CartesianScaleTypeRegistry; }; doughnut: { chartOptions: DoughnutControllerChartOptions; datasetOptions: DoughnutControllerDatasetOptions; defaultDataPoint: DoughnutDataPoint; parsedDataType: number; scales: keyof CartesianScaleTypeRegistry; }; polarArea: { chartOptions: PolarAreaControllerChartOptions; datasetOptions: PolarAreaControllerDatasetOptions; defaultDataPoint: number; parsedDataType: RadialParsedData; scales: keyof RadialScaleTypeRegistry; }; radar: { chartOptions: RadarControllerChartOptions; datasetOptions: RadarControllerDatasetOptions; defaultDataPoint: number; parsedDataType: RadialParsedData; scales: keyof RadialScaleTypeRegistry; }; } export type ChartType = keyof ChartTypeRegistry; export type ScaleOptionsByType<TScale extends ScaleType = ScaleType> = DeepPartial< { [key in ScaleType]: { type: key } & ScaleTypeRegistry[key]['options'] }[TScale] >; export type DatasetChartOptions<TType extends ChartType = ChartType> = { [key in TType]: { datasets: ChartTypeRegistry[key]['datasetOptions']; }; }; export type ScaleChartOptions<TType extends ChartType = ChartType> = { scales: { [key: string]: ScaleOptionsByType<ChartTypeRegistry[TType]['scales']>; }; }; export type ChartOptions<TType extends ChartType = ChartType> = DeepPartial< CoreChartOptions<TType> & ElementChartOptions & PluginChartOptions<TType> & DatasetChartOptions<TType> & ScaleChartOptions<TType> & ChartTypeRegistry[TType]['chartOptions'] >; export type DefaultDataPoint<TType extends ChartType> = DistributiveArray<ChartTypeRegistry[TType]['defaultDataPoint']>; export type ParsedDataType<TType extends ChartType = ChartType> = ChartTypeRegistry[TType]['parsedDataType']; export interface ChartDatasetProperties<TType extends ChartType, TData> { type?: TType; data: TData; } export type ChartDataset< TType extends ChartType = ChartType, TData = DefaultDataPoint<TType> > = DeepPartial< { [key in ChartType]: { type: key } & ChartTypeRegistry[key]['datasetOptions'] }[TType] > & ChartDatasetProperties<TType, TData>; export interface ChartData< TType extends ChartType = ChartType, TData = DefaultDataPoint<TType>, TLabel = unknown > { labels: TLabel[]; datasets: ChartDataset<TType, TData>[]; } export interface ChartConfiguration< TType extends ChartType = ChartType, TData = DefaultDataPoint<TType>, TLabel = unknown > { type: TType; data: ChartData<TType, TData, TLabel>; options?: ChartOptions<TType>; plugins?: Plugin<TType>[]; }
ePlatform {
vigenere-cipher.js
const CustomError = require("../extensions/custom-error"); class VigenereCipheringMachine { constructor(idDirect) { this.idDirect = idDirect; } encrypt(message, key) { if (!message || !key) throw new Error(); if (message.match(/[a-zA-Z]+/g) === null || key === null) return (this.idDirect === false) ? message.split('').reverse().join('') : message; var keyWordEnc = getKeyWord(message, key); var encryptStr = ''; var keyCount = 0; for (let i = 0; i < message.length; i++) { (message[i].match(/^[A-Za-z]$/)) ? (encryptStr += String.fromCharCode(getCodeEnc(message[i], keyWordEnc[keyCount++]))) : (encryptStr += (message[i])); } return (this.idDirect === false) ? encryptStr.split('').reverse().join('') : encryptStr; } decrypt(encryptedMessage, key) { if (!encryptedMessage || !key) throw new Error();
encryptedMessage.split('').reverse().join('') : encryptedMessage; var keyWordDec = getKeyWord(encryptedMessage, key); var decryptStr = ''; var keyCount = 0; for (let i = 0; i < encryptedMessage.length; i++) { (encryptedMessage[i].match(/^[A-Za-z]$/)) ? (decryptStr += String.fromCharCode(getCodeDec(encryptedMessage[i], keyWordDec[keyCount++])) ) : (decryptStr += (encryptedMessage[i])); } return (this.idDirect === false) ? decryptStr.split('').reverse().join('') : decryptStr; } } function getKeyWord(message, key) { let keyWord = ''; while (keyWord.length < message.match(/[a-zA-Z]+/g).join('').length) keyWord += key.toUpperCase(); keyWord = keyWord.substring(0, message.match(/[a-zA-Z]+/g).join('').length); return keyWord; } function getCodeEnc(charA, charB) { var code = charA.toUpperCase().charCodeAt(0) + (charB.toUpperCase().charCodeAt(0) - 65) return (code > 90) ? code - (90 - 64) : code; } function getCodeDec(charA, charB) { return (charB.charCodeAt(0) - charA.charCodeAt(0) <= 0) ? charA.charCodeAt(0) - charB.charCodeAt(0) + 65 : 91 - (charB.charCodeAt(0) - charA.charCodeAt(0)); } module.exports = VigenereCipheringMachine;
if (encryptedMessage.match(/[a-zA-Z]+/g) === null || key === null) return (this.idDirect === false) ?
index.js
const { router, get } = require('microrouter'); const cors = require('micro-cors')({ allowMethods: ['GET'] }); module.exports = cors(
get('/*', require('./routes/404')) ) );
router( get('/places', require('./routes/places')), get('/places/:id', require('./routes/places/show')),
table.rs
use crate::data::value::{format_leaf, style_leaf}; use crate::format::RenderView; use crate::prelude::*; use derive_new::new; use nu_errors::ShellError; use nu_protocol::{UntaggedValue, Value}; use textwrap::fill; use prettytable::format::{Alignment, FormatBuilder, LinePosition, LineSeparator}; use prettytable::{color, Attr, Cell, Row, Table}; type Entries = Vec<Vec<(String, &'static str)>>; #[derive(Debug, new)] pub struct TableView { // List of header cell values: headers: Vec<String>, // List of rows of cells, each containing value and prettytable style-string: entries: Entries, } enum TableMode { Light, Normal, } impl TableView { pub fn from_list(values: &[Value], starting_idx: usize) -> Option<TableView> { if values.is_empty() { return None; } // Different platforms want different amounts of buffer, not sure why let termwidth = std::cmp::max(textwrap::termwidth(), 20); let mut headers = nu_protocol::merge_descriptors(values); let mut entries = values_to_entries(values, &mut headers, starting_idx); let max_per_column = max_per_column(&headers, &entries, values.len()); maybe_truncate_columns(&mut headers, &mut entries, termwidth); let headers_len = headers.len(); // Measure how big our columns need to be (accounting for separators also) let max_naive_column_width = (termwidth - 3 * (headers_len - 1)) / headers_len; let column_space = ColumnSpace::measure(&max_per_column, max_naive_column_width, headers_len); // This gives us the max column width let max_column_width = column_space.max_width(termwidth); // This width isn't quite right, as we're rounding off some of our space let column_space = column_space.fix_almost_column_width( &max_per_column, max_naive_column_width, max_column_width, headers_len, ); // This should give us the final max column width let max_column_width = column_space.max_width(termwidth); // Wrap cells as needed let table_view = wrap_cells( headers, entries, max_per_column, max_naive_column_width, max_column_width, ); Some(table_view) } } fn values_to_entries(values: &[Value], headers: &mut Vec<String>, starting_idx: usize) -> Entries { let mut entries = vec![]; if headers.is_empty() { headers.push("<value>".to_string()); } for (idx, value) in values.iter().enumerate() { let mut row: Vec<(String, &'static str)> = headers .iter() .map(|d: &String| { if d == "<value>" { match value { Value { value: UntaggedValue::Row(..), .. } => ( format_leaf(&UntaggedValue::nothing()).plain_string(100_000), style_leaf(&UntaggedValue::nothing()), ), _ => (format_leaf(value).plain_string(100_000), style_leaf(value)), } } else { match value { Value { value: UntaggedValue::Row(..), .. } => { let data = value.get_data(d); ( format_leaf(data.borrow()).plain_string(100_000), style_leaf(data.borrow()), ) } _ => ( format_leaf(&UntaggedValue::nothing()).plain_string(100_000), style_leaf(&UntaggedValue::nothing()), ), } } }) .collect(); // Indices are green, bold, right-aligned: row.insert(0, ((starting_idx + idx).to_string(), "Fgbr")); entries.push(row); } headers.insert(0, "#".to_owned()); entries } #[allow(clippy::ptr_arg)] fn max_per_column(headers: &[String], entries: &Entries, values_len: usize) -> Vec<usize> { let mut max_per_column = vec![]; for i in 0..headers.len() { let mut current_col_max = 0; let iter = entries.iter().take(values_len); for entry in iter { let value_length = entry[i].0.chars().count(); if value_length > current_col_max { current_col_max = value_length; } } max_per_column.push(std::cmp::max(current_col_max, headers[i].chars().count())); } max_per_column } fn maybe_truncate_columns(headers: &mut Vec<String>, entries: &mut Entries, termwidth: usize) { // Make sure we have enough space for the columns we have let max_num_of_columns = termwidth / 10; // If we have too many columns, truncate the table if max_num_of_columns < headers.len() { headers.truncate(max_num_of_columns); for entry in entries.iter_mut() { entry.truncate(max_num_of_columns); } headers.push("...".to_owned()); for entry in entries.iter_mut() { entry.push(("...".to_owned(), "c")); // ellipsis is centred } } } struct ColumnSpace { num_overages: usize, underage_sum: usize, overage_separator_sum: usize, } impl ColumnSpace { /// Measure how much space we have once we subtract off the columns who are small enough fn measure( max_per_column: &[usize], max_naive_column_width: usize, headers_len: usize, ) -> ColumnSpace { let mut num_overages = 0; let mut underage_sum = 0; let mut overage_separator_sum = 0; let iter = max_per_column.iter().enumerate().take(headers_len); for (i, &column_max) in iter { if column_max > max_naive_column_width { num_overages += 1; if i != (headers_len - 1) { overage_separator_sum += 3; } if i == 0 { overage_separator_sum += 1; } } else { underage_sum += column_max; // if column isn't last, add 3 for its separator if i != (headers_len - 1) { underage_sum += 3; } if i == 0 { underage_sum += 1; } } } ColumnSpace { num_overages, underage_sum, overage_separator_sum, } } fn fix_almost_column_width( self, max_per_column: &[usize], max_naive_column_width: usize, max_column_width: usize, headers_len: usize, ) -> ColumnSpace { let mut num_overages = 0; let mut overage_separator_sum = 0; let mut underage_sum = self.underage_sum; let iter = max_per_column.iter().enumerate().take(headers_len); for (i, &column_max) in iter { if column_max > max_naive_column_width { if column_max <= max_column_width { underage_sum += column_max; // if column isn't last, add 3 for its separator if i != (headers_len - 1) { underage_sum += 3; } if i == 0 { underage_sum += 1; } } else { // Column is still too large, so let's count it num_overages += 1; if i != (headers_len - 1) { overage_separator_sum += 3; } if i == 0 { overage_separator_sum += 1; } } }
ColumnSpace { num_overages, underage_sum, overage_separator_sum, } } fn max_width(&self, termwidth: usize) -> usize { let ColumnSpace { num_overages, underage_sum, overage_separator_sum, } = self; if *num_overages > 0 { (termwidth - 1 - *underage_sum - *overage_separator_sum) / *num_overages } else { 99999 } } } fn wrap_cells( mut headers: Vec<String>, mut entries: Entries, max_per_column: Vec<usize>, max_naive_column_width: usize, max_column_width: usize, ) -> TableView { for head in 0..headers.len() { if max_per_column[head] > max_naive_column_width { headers[head] = fill(&headers[head], max_column_width); for entry in entries.iter_mut() { entry[head].0 = fill(&entry[head].0, max_column_width); } } } TableView { headers, entries } } impl RenderView for TableView { fn render_view(&self, host: &mut dyn Host) -> Result<(), ShellError> { if self.entries.is_empty() { return Ok(()); } let mut table = Table::new(); let mut config = crate::data::config::config(Tag::unknown())?; let header_align = config.get("header_align").map_or(Alignment::LEFT, |a| { a.as_string() .map_or(Alignment::LEFT, |a| match a.to_lowercase().as_str() { "center" | "c" => Alignment::CENTER, "right" | "r" => Alignment::RIGHT, _ => Alignment::LEFT, }) }); let header_color = config.get("header_color").map_or(color::GREEN, |c| { c.as_string().map_or(color::GREEN, |c| { str_to_color(c.to_lowercase()).unwrap_or(color::GREEN) }) }); let header_style = config .remove("header_style") .map_or(vec![Attr::Bold], |y| match y.value { UntaggedValue::Table(t) => to_style_vec(t), UntaggedValue::Primitive(p) => vec![p .into_string(Span::unknown()) .map_or(Attr::Bold, |s| str_to_style(s).unwrap_or(Attr::Bold))], _ => vec![Attr::Bold], }); let table_mode = if let Some(s) = config.get("table_mode") { match s.as_string() { Ok(typ) if typ == "light" => TableMode::Light, _ => TableMode::Normal, } } else { TableMode::Normal }; match table_mode { TableMode::Light => { table.set_format( FormatBuilder::new() .separator(LinePosition::Title, LineSeparator::new('─', '─', ' ', ' ')) .padding(1, 1) .build(), ); } _ => { table.set_format( FormatBuilder::new() .column_separator('│') .separator(LinePosition::Top, LineSeparator::new('─', '┬', ' ', ' ')) .separator(LinePosition::Title, LineSeparator::new('─', '┼', ' ', ' ')) .separator(LinePosition::Bottom, LineSeparator::new('─', '┴', ' ', ' ')) .padding(1, 1) .build(), ); } } let header: Vec<Cell> = self .headers .iter() .map(|h| { let mut c = Cell::new_align(h, header_align) .with_style(Attr::ForegroundColor(header_color)); for &s in &header_style { c.style(s); } c }) .collect(); table.set_titles(Row::new(header)); for row in &self.entries { table.add_row(Row::new( row.iter() .map(|(v, s)| Cell::new(v).style_spec(s)) .collect(), )); } table.print_term(&mut *host.out_terminal().ok_or_else(|| ShellError::untagged_runtime_error("Could not open terminal for output"))?) .map_err(|_| ShellError::untagged_runtime_error("Internal error: could not print to terminal (for unix systems check to make sure TERM is set)"))?; Ok(()) } } fn str_to_color(s: String) -> Option<color::Color> { match s.as_str() { "g" | "green" => Some(color::GREEN), "r" | "red" => Some(color::RED), "u" | "blue" => Some(color::BLUE), "b" | "black" => Some(color::BLACK), "y" | "yellow" => Some(color::YELLOW), "m" | "magenta" => Some(color::MAGENTA), "c" | "cyan" => Some(color::CYAN), "w" | "white" => Some(color::WHITE), "bg" | "bright green" => Some(color::BRIGHT_GREEN), "br" | "bright red" => Some(color::BRIGHT_RED), "bu" | "bright blue" => Some(color::BRIGHT_BLUE), "by" | "bright yellow" => Some(color::BRIGHT_YELLOW), "bm" | "bright magenta" => Some(color::BRIGHT_MAGENTA), "bc" | "bright cyan" => Some(color::BRIGHT_CYAN), "bw" | "bright white" => Some(color::BRIGHT_WHITE), _ => None, } } fn to_style_vec(a: Vec<Value>) -> Vec<Attr> { let mut v: Vec<Attr> = Vec::new(); for t in a { if let Ok(s) = t.as_string() { if let Some(r) = str_to_style(s) { v.push(r); } } } v } fn str_to_style(s: String) -> Option<Attr> { match s.as_str() { "b" | "bold" => Some(Attr::Bold), "i" | "italic" | "italics" => Some(Attr::Italic(true)), "u" | "underline" | "underlined" => Some(Attr::Underline(true)), _ => None, } }
}
maps.helpers.ts
import { ECharacteristic, EDifficulty, TMapDetail, TMapDifficulty, TMapVersion } from '../api/api.models'; import { TDifficultyIndex, TLevelStatsData, TLevelStatsInfo } from '../player/player-data.model'; export class
{ public static getScoreClass(score: number): string { if (score >= 90) return 'score-90'; else if (score >= 80) return 'score-80'; return 'score-0'; } public static getLatestVersion(mapDetail: TMapDetail): TMapVersion { return mapDetail.versions.sort((a: TMapVersion, b: TMapVersion) => a.createdAt < b.createdAt ? -1 : a.createdAt === b.createdAt ? 0 : 1 )[mapDetail.versions.length - 1]; } public static getDifficultyLabel(difficulty?: EDifficulty): string { switch (difficulty) { case EDifficulty.Easy: case EDifficulty.Normal: case EDifficulty.Hard: case EDifficulty.Expert: { return difficulty; } case EDifficulty.ExpertPlus: { return 'Expert+'; } default: { return 'N/A'; } } } public static getCharacteristicIcon(characteristic?: ECharacteristic): string | undefined { if (!characteristic) return undefined; return `assets/icons/${characteristic.toLowerCase()}.svg`; } public static getIndexFromDifficulty(diff?: EDifficulty): TDifficultyIndex | undefined { switch (diff) { case EDifficulty.Easy: { return 0; } case EDifficulty.Normal: { return 1; } case EDifficulty.Hard: { return 2; } case EDifficulty.Expert: { return 3; } case EDifficulty.ExpertPlus: { return 4; } default: { return undefined; } } } public static getDifficulyFromIndex(index?: TDifficultyIndex): EDifficulty | undefined { switch (index) { case 0: { return EDifficulty.Easy; } case 1: { return EDifficulty.Normal; } case 2: { return EDifficulty.Hard; } case 3: { return EDifficulty.Expert; } case 4: { return EDifficulty.ExpertPlus; } default: { return undefined; } } } public static getDifficultyGroupedByChar( mapVersion: TMapVersion ): Map<ECharacteristic, TMapDifficulty[]> { const groupedDifs = new Map<ECharacteristic, TMapDifficulty[]>(); for (const dif of mapVersion.diffs) { if (groupedDifs.has(dif.characteristic)) { groupedDifs.get(dif.characteristic)?.push(dif); } else { groupedDifs.set(dif.characteristic, [dif]); } } return groupedDifs; } public static getPlayerLevelStatsGroupedByChar( playerLevelStats: TLevelStatsInfo ): Map<ECharacteristic, TLevelStatsData[]> { const groupedStats = new Map<ECharacteristic, TLevelStatsData[]>(); for (const stat of playerLevelStats.levelStats) { if (groupedStats.has(stat.beatmapCharacteristicName)) { groupedStats.get(stat.beatmapCharacteristicName)?.push(stat); } else { groupedStats.set(stat.beatmapCharacteristicName, [stat]); } } return groupedStats; } public static getDifficultyScoreSaberIndex(diff: EDifficulty): number { switch (diff) { case EDifficulty.Easy: { return 1; } case EDifficulty.Normal: { return 3; } case EDifficulty.Hard: { return 5; } case EDifficulty.Expert: { return 7; } case EDifficulty.ExpertPlus: { return 9; } } } public static getDifficultyFromScoreSaberIndex(diff: number): EDifficulty | undefined { switch (diff) { case 1: { return EDifficulty.Easy; } case 3: { return EDifficulty.Normal; } case 5: { return EDifficulty.Hard; } case 7: { return EDifficulty.Expert; } case 9: { return EDifficulty.ExpertPlus; } default: { return undefined; } } } public static getCharacteristicScoreSaberIndex(char: ECharacteristic): number { switch (char) { case ECharacteristic.Standard: { return 0; } case ECharacteristic.OneSaber: { return 1; } case ECharacteristic.NoArrows: { return 2; } case ECharacteristic['90Degree']: { return 3; } case ECharacteristic['360Degree']: { return 4; } case ECharacteristic.Lightshow: { return 5; } case ECharacteristic.Lawless: { return 6; } } } public static computeDiffId(diff?: TMapDifficulty): string | undefined { if (!diff) return undefined; return diff.difficulty + diff.characteristic; } public static calculateMaxScore(notes: number): number { if (notes >= 13) return (notes - 13) * 8 * 115 + 4715; else if (notes > 0) { if (notes === 1) return 115; else if (notes <= 5) return (notes - 1) * 2 * 115 + 115; else return (notes - 5) * 4 * 115 + 920 + 115; } return 0; } public static calculateScorePercent(maxScore: number, playerScore: number): number { return (100 / maxScore) * playerScore; } }
MapsHelpers
peer_disconnection_effects.rs
// Copyright (c) SimpleStaking, Viable Systems and Tezedge Contributors // SPDX-License-Identifier: MIT use crate::peer::connection::outgoing::PeerConnectionOutgoingRandomInitAction; use crate::peer::PeerStatus; use crate::peers::remove::PeersRemoveAction; use crate::service::actors_service::ActorsMessageTo; use crate::service::{ActorsService, MioService, Service}; use crate::{Action, ActionWithMeta, Store}; use super::PeerDisconnectedAction; pub fn peer_disconnection_effects<S>(store: &mut Store<S>, action: &ActionWithMeta) where S: Service,
{ match &action.action { Action::PeerDisconnect(action) => { let address = action.address; let peer = match store.state.get().peers.get(&address) { Some(v) => v, None => return, }; match &peer.status { PeerStatus::Disconnecting(disconnection_state) => { let peer_token = disconnection_state.token; store.service().mio().peer_disconnect(peer_token); store.dispatch(PeerDisconnectedAction { address }); } PeerStatus::Disconnected => { store.dispatch(PeerDisconnectedAction { address }); } _ => return, }; } Action::PeerDisconnected(action) => { if let Some(peer) = store.state.get().peers.get(&action.address) { if matches!(&peer.status, PeerStatus::Disconnected) { let address = action.address; store .service .actors() .send(ActorsMessageTo::PeerDisconnected(address)); store.dispatch(PeersRemoveAction { address }); store.dispatch(PeerConnectionOutgoingRandomInitAction {}); } } } _ => {} } }
create_indexes.js
'use strict'; const ReadPreference = require('../read_preference'); const { Aspect, defineAspects, OperationBase } = require('./operation'); const { executeCommand } = require('./db_ops'); const { MongoError } = require('../error'); class
extends OperationBase { constructor(collection, indexSpecs, options) { super(options); this.collection = collection; this.indexSpecs = indexSpecs; } execute(callback) { const coll = this.collection; const indexSpecs = this.indexSpecs; let options = this.options; const capabilities = coll.s.topology.capabilities(); // Ensure we generate the correct name if the parameter is not set for (let i = 0; i < indexSpecs.length; i++) { if (indexSpecs[i].name == null) { const keys = []; // Did the user pass in a collation, check if our write server supports it if (indexSpecs[i].collation && capabilities && !capabilities.commandsTakeCollation) { return callback(new MongoError('server/primary/mongos does not support collation')); } for (let name in indexSpecs[i].key) { keys.push(`${name}_${indexSpecs[i].key[name]}`); } // Set the name indexSpecs[i].name = keys.join('_'); } } options = Object.assign({}, options, { readPreference: ReadPreference.PRIMARY }); // Execute the index executeCommand( coll.s.db, { createIndexes: coll.collectionName, indexes: indexSpecs }, options, callback ); } } defineAspects(CreateIndexesOperation, Aspect.WRITE_OPERATION); module.exports = CreateIndexesOperation;
CreateIndexesOperation
index.js
(() => { "use strict"; const isArray = require('./isArray/isArray'); const isBissexto = require('./isBissexto/isBissexto'); const isBoolean = require('./isBoolean/isBoolean'); const isCep = require('./isCep/isCep'); const isCnpj = require('./isCnpj/isCnpj'); const isDate = require('./isDate/isDate'); const isEmail = require('./isEmail/isEmail'); const isEmpty = require('./isEmpty/isEmpty'); const isEnum = require('./isEnum/isEnum'); const isInArray = require('./isInArray/isInArray'); const isInObject = require('./isInObject/isInObject'); const isInProfileFields = require('./isInProfileFields/isInProfileFields'); const isISBN13 = require('./isISBN-13/isISBN-13'); const isName = require('./isName/isName'); const isObject = require('./isObject/isObject'); const isString = require('./isString/isString'); const toCapitalize = require('./toCapitalize/toCapitalize'); const toLowerCase = require('./toLowerCase/toLowerCase'); const toUpperCase = require('./toUpperCase/toUpperCase'); module.exports = { isArray , isBissexto , isBoolean , isCep , isCnpj
, isEmail , isEmpty , isEnum , isInArray , isInObject , isInProfileFields , isISBN13 , isName , isObject , isString , toCapitalize , toLowerCase , toUpperCase } })();
, isDate
mod.rs
use std::env; use std::path::PathBuf; use crate::error::{Error, PersistError}; pub static PID_FILE: &str = "daemon.pid"; pub static SOCK_FILE: &str = "daemon.sock"; pub static PIDS_DIR: &str = "pids"; pub static LOGS_DIR: &str = "logs"; pub fn home_dir() -> Result<PathBuf, Error> { fn recursive_search() -> Result<PathBuf, Error> { let current_dir = env::current_dir()?; let found = current_dir .ancestors() .map(|path| path.join(".persist")) .find(|path| path.is_dir()) .ok_or(PersistError::DaemonNotFound)?; Ok(found) } env::var("PERSIST_HOME") .map(PathBuf::from)
}
.or_else(|_| recursive_search())
main.rs
// enum IPTypes { // V4, V6, // } // struct IpAddr { // kind: IPTypes, // address: String, // } // #[derive(Debug)] // enum IpAddrKind { // V4(u8, u8, u8, u8), // V6(String), // } // enum Msg { // Quit, // Move {x: u32, y: u32, z: u32 }, // Write(String), // ChangeColor(u32, u32, u32), // } // impl Msg { // fn call(&self) { // println!("call") // } // } fn main() { // let home = IpAddr { // kind: IPTypes::V4, // address: String::from("127.0.0.1"), // }; // pip(home); // let loopback = IpAddr { // kind: IPTypes::V6, // address: String::from("::1"), // }; // pip(loopback); // let home = IpAddrKind::V4(0,0,0,0); // let loopback = IpAddrKind::V6(String::from("::1")); // pip2(home); // let q = Msg::Quit; // let m = Msg::Move {x: 1, y:2, z:3}; // let w = Msg::Write(String::from("::1")); // let c = Msg::ChangeColor(1,2,3); // c.call() let x: i8 = 1; let y: Option<i8> = Some(12); println!("{}", x); println!("{:?}", y); // let z = x + y; } // fn pip2(address : IpAddrKind) { // println!("{:?}", address); // }
// fn pip(address : IpAddr) { // println!("{}", address.address) // }
extends.js
function
() { module.exports = _extends = Object.assign || function (target) { for (var i = 1; i < arguments.length; i++) { var source = arguments[i]; for (var key in source) { if (Object.prototype.hasOwnProperty.call(source, key)) { target[key] = source[key]; } } } return target; }; return _extends.apply(this, arguments); } module.exports = _extends;
_extends
main.rs
mod error; mod server; pub use error::*; pub use server::FileDropper; use byte_unit::Byte; use getopts::Options; use log::error; fn print_usage(program: &str, opts: Options) { let brief = format!("Usage: {} OUTPUT [options]", program); print!("{}", opts.usage(&brief)); } fn
() { let args: Vec<String> = std::env::args().collect(); let program = args[0].clone(); let mut opts = Options::new(); opts.optopt( "l", "listen", "Listen on the specified sockaddr", "ADDR:PORT", ); opts.optopt( "s", "max_size", "Maximum allowed request body size", "BYTES", ); opts.optopt("b", "before_text", "Text shown before the upload", "TEXT"); opts.optopt("e", "error_text", "Text shown if an error occurs", "TEXT"); opts.optopt( "t", "success_text", "Text shown after a successful upload", "TEXT", ); opts.optflag("h", "help", "Display this help text and exit"); let matches = match opts.parse(&args[1..]) { Ok(m) => m, Err(f) => { panic!("{}", f) } }; if matches.opt_present("h") { print_usage(&program, opts); return; } if matches.opt_present("h") { print_usage(&program, opts); return; } let listen_addr = matches .opt_str("l") .as_deref() .unwrap_or("127.0.0.1:3000") .parse() .unwrap(); let max_size = Byte::from_str(matches.opt_str("s").as_deref().unwrap_or("100M")) .unwrap() .get_bytes(); let before_text = matches.opt_str("b").unwrap_or("".to_string()); let error_text = matches .opt_str("e") .unwrap_or("Error: ${error}".to_string()); let success_text = matches .opt_str("t") .unwrap_or("Upload successful!".to_string()); let output = if !matches.free.is_empty() { matches.free[0].clone() } else { print_usage(&program, opts); return; }; env_logger::init(); let html = include_str!("index.html") .replace("@beforeText@", &before_text) .replace("@errorText@", &error_text) .replace("@successText@", &success_text); FileDropper::new(listen_addr, output, max_size, html) .serve() .unwrap_or_else(|e| { error!("Error: {}, exiting", e.to_string()); std::process::exit(1); }); }
main
agent.py
#!flask/bin/python import getopt import json import os import shutil import socket import string import random import subprocess import sys from .common.utils import try_set_file_permissions from flask import Flask, jsonify, request, abort, Response app = Flask(__name__) CLUSTER_API="cluster/api/v1.0" snapdata_path = os.environ.get('SNAP_DATA') snap_path = os.environ.get('SNAP_DATA') cluster_tokens_file = "{}/credentials/cluster-tokens.txt".format(snapdata_path) callback_tokens_file = "{}/credentials/callback-tokens.txt".format(snapdata_path) callback_token_file = "{}/credentials/callback-token.txt".format(snapdata_path) certs_request_tokens_file = "{}/credentials/certs-request-tokens.txt".format(snapdata_path) default_port = 25000 default_listen_interface = "0.0.0.0" def get_service_name(service): """ Returns the service name from its configuration file name. :param service: the name of the service configuration file :returns: the service name """ if service in ["kube-proxy", "kube-apiserver", "kube-scheduler", "kube-controller-manager"]: return service[len("kube-"), :] else: return service def update_service_argument(service, key, val): """ Adds an argument to the arguments file of the service. :param service: the service :param key: the argument to add :param val: the value for the argument """ args_file = "{}/args/{}".format(snapdata_path, service) args_file_tmp = "{}/args/{}.tmp".format(snapdata_path, service) found = False with open(args_file_tmp, "w+") as bfp: with open(args_file, "r+") as fp: for _, line in enumerate(fp): if line.startswith(key): if val is not None: bfp.write("{}={}\n".format(key, val)) found = True else: bfp.write("{}\n".format(line.rstrip())) if not found and val is not None: bfp.write("{}={}\n".format(key, val)) try_set_file_permissions(args_file_tmp) shutil.move(args_file_tmp, args_file) def store_callback_token(node, callback_token): """ Store a callback token :param node: the node :param callback_token: the token """ tmp_file = "{}.tmp".format(callback_tokens_file) if not os.path.isfile(callback_tokens_file): open(callback_tokens_file, 'a+') os.chmod(callback_tokens_file, 0o600) with open(tmp_file, "w") as backup_fp: os.chmod(tmp_file, 0o600) found = False with open(callback_tokens_file, 'r+') as callback_fp: for _, line in enumerate(callback_fp): if line.startswith(node): backup_fp.write("{} {}\n".format(node, callback_token)) found = True else: backup_fp.write(line) if not found: backup_fp.write("{} {}\n".format(node, callback_token)) try_set_file_permissions(tmp_file) shutil.move(tmp_file, callback_tokens_file) def sign_client_cert(cert_request, token): """ Sign a certificate request :param cert_request: the request :param token: a token acting as a request uuid :returns: the certificate """ req_file = "{}/certs/request.{}.csr".format(snapdata_path, token) sign_cmd = "openssl x509 -req -in {csr} -CA {SNAP_DATA}/certs/ca.crt -CAkey" \ " {SNAP_DATA}/certs/ca.key -CAcreateserial -out {SNAP_DATA}/certs/server.{token}.crt" \ " -days 100000".format(csr=req_file, SNAP_DATA=snapdata_path, token=token) with open(req_file, 'w') as fp: fp.write(cert_request) subprocess.check_call(sign_cmd.split()) with open("{SNAP_DATA}/certs/server.{token}.crt".format(SNAP_DATA=snapdata_path, token=token)) as fp: cert = fp.read() return cert def add_token_to_certs_request(token): """ Add a token to the file holding the nodes we expect a certificate request from :param token: the token """ with open(certs_request_tokens_file, "a+") as fp: fp.write("{}\n".format(token)) def remove_token_from_file(token, file): """ Remove a token from the valid tokens set :param token: the token to be removed :param file: the file to be removed from """ backup_file = "{}.backup".format(file) # That is a critical section. We need to protect it. # We are safe for now because flask serves one request at a time. with open(backup_file, 'w') as back_fp: with open(file, 'r') as fp: for _, line in enumerate(fp): if line.startswith(token): continue back_fp.write("{}".format(line)) shutil.copyfile(backup_file, file) def get_token(name): """ Get token from known_tokens file :param name: the name of the node :returns: the token or None(if name doesn't exist) """ file = "{}/credentials/known_tokens.csv".format(snapdata_path) with open(file) as fp: line = fp.readline() if name in line: parts = line.split(',') return parts[0].rstrip() return None def add_kubelet_token(hostname): """ Add a token for a node in the known tokens :param hostname: the name of the node :returns: the token added """ file = "{}/credentials/known_tokens.csv".format(snapdata_path) old_token = get_token("system:node:{}".format(hostname)) if old_token: return old_token.rstrip() alpha = string.ascii_letters + string.digits token = ''.join(random.SystemRandom().choice(alpha) for _ in range(32)) uid = ''.join(random.SystemRandom().choice(string.digits) for _ in range(8)) with open(file, 'a') as fp: # TODO double check this format. Why is userid unique? line = "{},system:node:{},kubelet,kubelet-{},\"system:nodes\"".format(token, hostname, uid) fp.write(line + os.linesep) return token.rstrip() def getCA(): """ Return the CA :returns: the CA file contents """ ca_file = "{}/certs/ca.crt".format(snapdata_path) with open(ca_file) as fp: ca = fp.read() return ca def get_arg(key, file): """ Get an argument from an arguments file :param key: the argument we look for :param file: the arguments file to search in :returns: the value of the argument or None(if the key doesn't exist) """ filename = "{}/args/{}".format(snapdata_path, file) with open(filename) as fp: for _, line in enumerate(fp): if line.startswith(key): args = line.split(' ') args = args[-1].split('=') return args[-1].rstrip() return None def is_valid(token, token_type=cluster_tokens_file): """ Check whether a token is valid :param token: token to be checked :param token_type: the type of token (bootstrap or signature) :returns: True for a valid token, False otherwise """ with open(token_type) as fp: for _, line in enumerate(fp): if line.startswith(token): return True return False def
(node=None): """ Return the contents of the kubelet arguments file :param node: node to add a host override (defaults to None) :returns: the kubelet args file """ filename = "{}/args/kubelet".format(snapdata_path) with open(filename) as fp: args = fp.read() if node: args = "{}--hostname-override {}".format(args, node) return args def get_node_ep(hostname, remote_addr): """ Return the endpoint to be used for the node based by trying to resolve the hostname provided :param hostname: the provided hostname :param remote_addr: the address the request came from :returns: the node's location """ try: socket.gethostbyname(hostname) return hostname except socket.gaierror: return remote_addr return remote_addr @app.route('/{}/join'.format(CLUSTER_API), methods=['POST']) def join_node(): """ Web call to join a node to the cluster """ if request.headers['Content-Type'] == 'application/json': token = request.json['token'] hostname = request.json['hostname'] port = request.json['port'] callback_token = request.json['callback'] else: token = request.form['token'] hostname = request.form['hostname'] port = request.form['port'] callback_token = request.form['callback'] if not is_valid(token): error_msg={"error": "Invalid token"} return Response(json.dumps(error_msg), mimetype='application/json', status=500) add_token_to_certs_request(token) remove_token_from_file(token, cluster_tokens_file) node_addr = get_node_ep(hostname, request.remote_addr) node_ep = "{}:{}".format(node_addr, port) store_callback_token(node_ep, callback_token) ca = getCA() etcd_ep = get_arg('--listen-client-urls', 'etcd') api_port = get_arg('--secure-port', 'kube-apiserver') proxy_token = get_token('kube-proxy') kubelet_token = add_kubelet_token(hostname) subprocess.check_call("systemctl restart snap.microk8s.daemon-apiserver.service".split()) if node_addr != hostname: kubelet_args = read_kubelet_args_file(node_addr) else: kubelet_args = read_kubelet_args_file() return jsonify(ca=ca, etcd=etcd_ep, kubeproxy=proxy_token, apiport=api_port, kubelet=kubelet_token, kubelet_args=kubelet_args, hostname_override=node_addr) @app.route('/{}/sign-cert'.format(CLUSTER_API), methods=['POST']) def sign_cert(): """ Web call to sign a certificate """ if request.headers['Content-Type'] == 'application/json': token = request.json['token'] cert_request = request.json['request'] else: token = request.form['token'] cert_request = request.form['request'] if not is_valid(token, certs_request_tokens_file): error_msg={"error": "Invalid token"} return Response(json.dumps(error_msg), mimetype='application/json', status=500) remove_token_from_file(token, certs_request_tokens_file) signed_cert = sign_client_cert(cert_request, token) return jsonify(certificate=signed_cert) @app.route('/{}/configure'.format(CLUSTER_API), methods=['POST']) def configure(): """ Web call to configure the node """ if request.headers['Content-Type'] == 'application/json': callback_token = request.json['callback'] configuration = request.json else: callback_token = request.form['callback'] configuration = json.loads(request.form['configuration']) if not is_valid(callback_token, callback_token_file): error_msg={"error": "Invalid token"} return Response(json.dumps(error_msg), mimetype='application/json', status=500) # We expect something like this: ''' { "callback": "xyztoken" "service": [ { "name": "kubelet", "arguments_remove": [ "myoldarg" ], "arguments_update": [ {"myarg": "myvalue"}, {"myarg2": "myvalue2"}, {"myarg3": "myvalue3"} ], "restart": False }, { "name": "kube-proxy", "restart": True } ], "addon": [ { "name": "gpu", "enable": True }, { "name": "gpu", "disable": True } ] } ''' if "service" in configuration: for service in configuration["service"]: print("{}".format(service["name"])) if "arguments_update" in service: print("Updating arguments") for argument in service["arguments_update"]: for key, val in argument.items(): print("{} is {}".format(key, val)) update_service_argument(service["name"], key, val) if "arguments_remove" in service: print("Removing arguments") for argument in service["arguments_remove"]: print("{}".format(argument)) update_service_argument(service["name"], argument, None) if "restart" in service and service["restart"]: service_name = get_service_name(service["name"]) print("restarting {}".format(service["name"])) subprocess.check_call("systemctl restart snap.microk8s.daemon-{}.service".format(service_name).split()) if "addon" in configuration: for addon in configuration["addon"]: print("{}".format(addon["name"])) if "enable" in addon and addon["enable"]: print("Enabling {}".format(addon["name"])) subprocess.check_call("{}/microk8s-enable.wrapper {}".format(snap_path, addon["name"]).split()) if "disable" in addon and addon["disable"]: print("Disabling {}".format(addon["name"])) subprocess.check_call("{}/microk8s-disable.wrapper {}".format(snap_path, addon["name"]).split()) resp_date = {"result": "ok"} resp = Response(json.dumps(resp_date), status=200, mimetype='application/json') return resp def usage(): print("Agent responsible for setting up a cluster. Arguments:") print("-l, --listen: interfaces to listen to (defaults to {})".format(default_listen_interface)) print("-p, --port: port to listen to (default {})".format(default_port)) if __name__ == '__main__': server_cert = "{SNAP_DATA}/certs/server.crt".format(SNAP_DATA=snapdata_path) server_key = "{SNAP_DATA}/certs/server.key".format(SNAP_DATA=snapdata_path) try: opts, args = getopt.gnu_getopt(sys.argv[1:], "hl:p:", ["help", "listen=", "port="]) except getopt.GetoptError as err: print(err) # will print something like "option -a not recognized" usage() sys.exit(2) port = default_port listen = default_listen_interface for o, a in opts: if o in ("-l", "--listen"): listen = a if o in ("-p", "--port"): port = a elif o in ("-h", "--help"): usage() sys.exit(1) else: assert False, "unhandled option" app.run(host=listen, port=port, ssl_context=(server_cert, server_key))
read_kubelet_args_file
login-register-frontpage.component.ts
import { Component, OnInit,Inject,ViewContainerRef ,OnDestroy} from '@angular/core'; import {FormGroup, FormControl, Validators,FormBuilder} from '@angular/forms'; import {Router} from '@angular/router'; import {loginDetails} from './loginDetails'; import {registerDetails} from './registerDetails'; import {LoginService} from '../../services/login.service'; import {RegisterService} from '../../services/register.service'; import {vendorDetails} from './vendorDetails'; import { MessageService } from './../../services/message.service'; import {GooglesigninService} from '../../services/googlesignin.service' import {HomeUrl} from '../../configs/homeRedirect.config'; declare const gapi: any; @Component({ selector: 'app-login-register-frontpage', templateUrl: './login-register-frontpage.component.html', styleUrls: ['./login-register-frontpage.component.css'], providers:[ RegisterService,MessageService , GooglesigninService] }) export class
implements OnInit , OnDestroy { public auth2: any; registerUsername:String; registerPassword:String; registerAddress:String; registerCity:String; registerState:String; registerZip:number; loginDetails:loginDetails; loginForm:FormGroup; addressProxy:FormGroup; registerForm:FormGroup; fb: FormBuilder; vendorDetails:vendorDetails; form:FormGroup; tempPassword:String; isAlredyExist:boolean=false; status: boolean = false; signINResponse: any; private userLocation: string = "Delhi"; windowRef:any=window; constructor( @Inject(FormBuilder) fb: FormBuilder, private loginService:LoginService, private registerService:RegisterService, private router:Router, private messageService:MessageService, private _vcr:ViewContainerRef, private googlesigninservice:GooglesigninService ) { this.fb=fb; this.registerForm=this.fb.group({ username: ['',[Validators.required, Validators.email]], password: ['',[Validators.required, Validators.minLength(8), Validators.maxLength(20), Validators.pattern("^(?=.*?[A-Z])(?=.*?[a-z])(?=.*?[0-9])(?=.*?[#?!@$%^&*-]).{8,}$")]], rePassword: ['',[Validators.required]] },{validator: this.checkIfMatchingPasswords}); this.onChanges(); } ngOnDestroy(){ console.log("hello"); } public googleInit() { gapi.load('auth2', () => { this.auth2 = gapi.auth2.init({ client_id: '365945332378-avlqvr9k9k1m1gkko3uocqaa5s5cj4pm.apps.googleusercontent.com', cookiepolicy: 'single_host_origin', scope: 'profile email' }); this.attachSignin(document.getElementById('googleBtn')); }); } public attachSignin(element) { this.auth2.attachClickHandler(element, {}, (googleUser) => { let id_token = googleUser.getAuthResponse().id_token; console.log(id_token); this.googlesigninservice.getgooglesign(id_token).subscribe(res=>{ res=res.toString(); localStorage.setItem("application-token",res); let userLocation = localStorage.getItem("loc"); let redirectUrl=HomeUrl.homeUrl+userLocation; window.location.href = redirectUrl; },error=>{ console.log("error in services") } ) }, (error) => { alert(JSON.stringify(error, undefined, 2)); }); } ngAfterViewInit(){ this.googleInit(); } ngOnInit() { this.loginForm=new FormGroup({ username : new FormControl('', [Validators.required, Validators.email]), password : new FormControl('', [Validators.required]), }); this.userLocation = localStorage.getItem("loc"); } onChanges(): void { this.registerForm.get('username').valueChanges.subscribe(val => { this.isAlredyExist=false; }); } validateUsername(){ let body= { "email": this.registerForm.get('username').value }; this.registerService.register(body).subscribe((res) => { }, (res:Response) => { if(res.status==409) { this.isAlredyExist=true; } }); } login(){ let username=this.loginForm.get('username').value; let password = this.loginForm.get('password').value; var xorKey = 129; var result = ""; for (let i = 0; i < password.length; i++) { result += String.fromCharCode(xorKey ^ password.charCodeAt(i)); } this.loginService.loginWithEmailId(username,result).subscribe((res) =>{ this.router.navigate(['/homepage']); this.router.navigate(['/homepage',this.userLocation]); }, (res:Response) =>{ if(res.status==401){ alert("Unauthorized User"); } else if(res.status==500){ alert("Internal server error"); } else if(res.status==201){ alert("Successfully logged in"); } else if(res.status==404){ alert("Service Not Found"); } else if(res.status==403){ alert("403 Forbidden"); } else{ alert("Connection error"); } }); } //password match validator checkIfMatchingPasswords(group: FormGroup) { let passwordField= group.controls.password, confirmPasswordField = group.controls.rePassword; if(passwordField.value !== confirmPasswordField.value ) { return confirmPasswordField.setErrors({notEquivalent: true}) }else { return confirmPasswordField.setErrors(null); } } registerUser(){ let tempPassword=""; tempPassword=this.registerForm.get('password').value; var xorKey = 129; var resultPassword = ""; for (let i = 0; i < tempPassword.length; i++) { resultPassword += String.fromCharCode(xorKey ^ tempPassword.charCodeAt(i)); } let body={ "email": this.registerForm.get('username').value, "password": resultPassword, "role": "Customer" }; this.registerService.register(body).subscribe((res) =>{ this.messageService.showSuccessToast(this._vcr,"Verfification link sent your Email Id"); this.registerForm.reset(); }, (res:Response) =>{ if(res.status==401 || res.status==409){ this.messageService.showErrorToast(this._vcr,"Username already exists"); } else if(res.status==500){ alert("Internal server error"); } else if(res.status==201){ this.messageService.showSuccessToast(this._vcr,"Successfully Registered"); } else if(res.status==404){ alert("Service Not Found"); } else if(res.status==403){ alert("403 Forbidden"); } else{ alert("Connection error"); } }); } } // ^ # start-of-string // (?=.*[0-9]) # a digit must occur at least once // (?=.*[a-z]) # a lower case letter must occur at least once // (?=.*[A-Z]) # an upper case letter must occur at least once // (?=.*[@#$%^&+=]) # a special character must occur at least once // (?=\S+$) # no whitespace allowed in the entire string // .{8,} # anything, at least eight places though // $ # end-of-string
LoginRegisterFrontpageComponent
dbwrapper.go
// Copyright 2018 PingCAP, Inc. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // See the License for the specific language governing permissions and // limitations under the License. package client import ( "context" "fmt" "time" "github.com/pingcap/go-ycsb/pkg/measurement" "github.com/pingcap/go-ycsb/pkg/ycsb" ) // DbWrapper stores the pointer to a implementation of ycsb.DB. type DbWrapper struct { DB ycsb.DB } func measure(start time.Time, op string, err error) { lan := time.Now().Sub(start) if err != nil { measurement.Measure(fmt.Sprintf("%s_ERROR", op), lan) return } measurement.Measure(op, lan) } func (db DbWrapper) Close() error { return db.DB.Close() } func (db DbWrapper) InitThread(ctx context.Context, threadID int, threadCount int) context.Context { return db.DB.InitThread(ctx, threadID, threadCount) } func (db DbWrapper) CleanupThread(ctx context.Context) { db.DB.CleanupThread(ctx) } func (db DbWrapper) Read(ctx context.Context, table string, key string, fields []string) (_ map[string][]byte, err error) { start := time.Now() defer func() { measure(start, "READ", err) }() return db.DB.Read(ctx, table, key, fields) } func (db DbWrapper) BatchRead(ctx context.Context, table string, keys []string, fields []string) (_ []map[string][]byte, err error) { batchDB, ok := db.DB.(ycsb.BatchDB) if ok { start := time.Now() defer func() { measure(start, "BATCH_READ", err) }() return batchDB.BatchRead(ctx, table, keys, fields) } for _, key := range keys { _, err := db.DB.Read(ctx, table, key, fields) if err != nil { return nil, err } } return nil, nil } func (db DbWrapper) Scan(ctx context.Context, table string, startKey string, count int, fields []string) (_ []map[string][]byte, err error) { start := time.Now() defer func() { measure(start, "SCAN", err) }() return db.DB.Scan(ctx, table, startKey, count, fields) } func (db DbWrapper) Update(ctx context.Context, table string, key string, values map[string][]byte) (err error) { start := time.Now() defer func() { measure(start, "UPDATE", err) }() return db.DB.Update(ctx, table, key, values) } func (db DbWrapper) BatchUpdate(ctx context.Context, table string, keys []string, values []map[string][]byte) (err error) { batchDB, ok := db.DB.(ycsb.BatchDB) if ok { start := time.Now() defer func() { measure(start, "BATCH_UPDATE", err) }() return batchDB.BatchUpdate(ctx, table, keys, values) } for i := range keys { err := db.DB.Update(ctx, table, keys[i], values[i]) if err != nil { return err } } return nil } func (db DbWrapper) Insert(ctx context.Context, table string, key string, values map[string][]byte) (err error) { start := time.Now() defer func() { measure(start, "INSERT", err) }() return db.DB.Insert(ctx, table, key, values) } func (db DbWrapper) BatchInsert(ctx context.Context, table string, keys []string, values []map[string][]byte) (err error) { batchDB, ok := db.DB.(ycsb.BatchDB) if ok
for i := range keys { err := db.DB.Insert(ctx, table, keys[i], values[i]) if err != nil { return err } } return nil } func (db DbWrapper) Delete(ctx context.Context, table string, key string) (err error) { start := time.Now() defer func() { measure(start, "DELETE", err) }() return db.DB.Delete(ctx, table, key) } func (db DbWrapper) BatchDelete(ctx context.Context, table string, keys []string) (err error) { batchDB, ok := db.DB.(ycsb.BatchDB) if ok { start := time.Now() defer func() { measure(start, "BATCH_DELETE", err) }() return batchDB.BatchDelete(ctx, table, keys) } for _, key := range keys { err := db.DB.Delete(ctx, table, key) if err != nil { return err } } return nil } func (db DbWrapper) Analyze(ctx context.Context, table string) error { if analyzeDB, ok := db.DB.(ycsb.AnalyzeDB); ok { return analyzeDB.Analyze(ctx, table) } return nil }
{ start := time.Now() defer func() { measure(start, "BATCH_INSERT", err) }() return batchDB.BatchInsert(ctx, table, keys, values) }
fread.py
#!/usr/bin/env python # -*- coding: utf-8 -*- # import sys import os import time import itertools from heapq import heappush, heappop # Compatibility with Python < 2.6 # try: from heapq import heappushpop except ImportError: def heappushpop(heap, item): heappush(heap, item) return heappop(heap) import numpy from prosci.util.protein import Pdb, ResidueList, Protein from prosci.util.pdb3d import dist from prosci.util.geohash import GeometricHash from prosci.loops import ccd from prosci.loops.loopmodel import ANCHOR_LENGTH, get_max_loop_length, describe_anchors, iterate_database, get_loop_structure, add_oxygens, is_loop_length_in_database, get_min_loop_length_in_database, relabel_loop, is_clash, calculate_rmsd, is_consecutive, make_contact_gh, find_contacts, get_contact_class, find_contacts_simple, get_native_contacts from prosci.loops.esst import load_esst # Define consecutive residues very loosely, for purposes of checking the framework def is_framework_consecutive(a, b): # Set max distance for consecutive C and N atoms to 2.8 Angstroms # ... which is very loose # return is_consecutive(a, b, maxlen_sq=7.84) def _heapadd(heap, item, maxitems): if len(heap) < maxitems: heappush(heap, item) else: heappushpop(heap, item) class FreadError(RuntimeError): pass class BadInputError(FreadError): pass class UnsupportedLoopLengthError(BadInputError): pass class LoopStretchError(BadInputError): pass class NonConsecutiveAnchorError(BadInputError): pass class Fread(object): def __init__(self, db=None, subst_table_path=None, score_cutoff=25, open_rmsd_cutoff=1.0, closed_rmsd_cutoff=0.3, vdw_factor=0.7, close=True, verbose=False, errstream=sys.stderr, meld=True, max_melding_rmsd=1.0, nostruc=False, mutate=False, ccd_target_rmsd=0.15, ccd_iterations=5000, max_decoys=100, first_decoys=0, extension_size=sys.maxint, extension_minimum=0, calculate_contacts=False, contact_distance=6.0, contact_identity=0.8): self.score_cutoff = score_cutoff self.open_rmsd_cutoff = open_rmsd_cutoff self.closed_rmsd_cutoff = closed_rmsd_cutoff self.vdw_factor = vdw_factor self.close = close self.verbose = verbose self.errstream = errstream self.meld = meld self.max_melding_rmsd = max_melding_rmsd self.min_database_loop_length = 1 self.no_structure_output = nostruc self.mutate = mutate self.ccd_target_rmsd = ccd_target_rmsd self.ccd_iterations = ccd_iterations self.max_decoys = max_decoys self.first_decoys = first_decoys self.extension_size = extension_size self.extension_minimum = extension_minimum self.min_b_factor = 0.0 self.calculate_contacts = calculate_contacts self.contact_distance = contact_distance self.contact_identity = contact_identity self.warnings = [] if db: self.set_db(db) self.set_subst_tables(subst_table_path) self.results = [] self.counter = itertools.count(1) self.idecoy = 0 def set_options(self, opts): for o in opts: if o in self.__dict__ and not o.startswith('_'): self.__dict__[o] = opts[o] def set_silent(self): #self.verbose = False self.errstream = open(os.devnull, 'w') def note(self, message, force=False): if force or self.verbose: self.errstream.write(str(message)+"\n") def warn(self, exception, force=False): if isinstance(exception, str): exception = FreadError(exception) self.note("WARNING: "+str(exception), force) self.warnings.append(exception) return exception def die(self, exception, force=True): raise self.warn(exception, force) def set_subst_tables(self, subst_table_path): self.subst_tables = load_esst(subst_table_path) def set_db(self, db): self.min_database_loop_length = get_min_loop_length_in_database(db) self.db = db def set_structure(self, structure): self.clear_results() p = Pdb(structure) self.ligand_atoms = p.ligands self.residues = ResidueList(p) add_oxygens(self.residues) def get_loop_index(self, residue_number, inscode="", chain=None): if inscode: inscode = inscode.strip() if chain: chain = chain.strip() if isinstance(residue_number, str) and residue_number[-1].isalpha(): ires = int(residue_number[:-1]) inscode = residue_number[-1] else: ires = int(residue_number) start_of_loop=None for i,r in enumerate(self.residues): if r.ires == ires and r.inscode == inscode and (not chain or chain == r.chain): start_of_loop = i+1 break return start_of_loop def get_structure_sequence(self, startindex=0, endindex=None): return self.residues[startindex:endindex].get_seq() def set_loop(self, start_of_loop, loop_sequence, resnum=False, chain=None, structure_known=None): self.clear_results() if chain is not None: chain = chain.strip() # We've been given a residue number instead of a start index if resnum: if isinstance(start_of_loop, str): if start_of_loop[-1].isalpha(): ires = int(start_of_loop[:-1]) inscode = start_of_loop[-1] else: ires = int(start_of_loop) inscode = "" else: ires = int(start_of_loop) inscode = "" found=False for i,r in enumerate(self.residues): if r.ires == ires and r.inscode == inscode and (not chain or chain == r.chain): start_of_loop = i+1 found = True self.note("Located residue left of loop to be modelled: %d%s. Loop starting index: %d. Sequence surrounding start of loop: %s|%s" % (r.ires, r.inscode, start_of_loop, self.residues[max(0,start_of_loop-8):start_of_loop].get_seq(), self.residues[start_of_loop:start_of_loop+8].get_seq())) break if not found: msg = "Residue before start of loop (residue number %d%s" % (ires, inscode) if chain: msg += ", chain '%s'" % chain msg += ") not found in query structure" self.die(BadInputError(msg)) if start_of_loop < ANCHOR_LENGTH or start_of_loop > len(self.residues) - ANCHOR_LENGTH: self.die(BadInputError("Cannot model loop closer than %d residues to the terminus (need a complete anchor on both sides)." % (ANCHOR_LENGTH))) # Assuming no coordinates are present in file, start and end of the loop are the same end_of_loop = start_of_loop if structure_known is not None: # We were told if loop coordinates are present in input file. # Skip auto-detection. # self.loop_structure_is_known = structure_known if structure_known: self.note("User tells me loop structure is present.") else: self.note("User tells me loop structure is not present.") # Auto-detect if loop coordinates are present in input file. else: # Are the coordinates are in the file? self.loop_structure_is_known = is_framework_consecutive(self.residues[start_of_loop-1], self.residues[start_of_loop]) self.note("Is loop structure present: "+str(self.loop_structure_is_known)) # If we have the native loop structure, adjust end_of_loop. # Also do some additional sanity checks # if self.loop_structure_is_known: end_of_loop += len(loop_sequence) strucseq = "" for i,r in enumerate(self.residues[start_of_loop:end_of_loop]): strucseq += r.get_seq() if (not self.mutate) and (loop_sequence != strucseq): self.die(BadInputError("Residues differ between sequence and structure input: %s, %s" % (loop_sequence, strucseq))) self.note("Loop sequence of given ATOM co-ordinates: %s" % (strucseq)) del strucseq if end_of_loop != start_of_loop: if len(loop_sequence) != end_of_loop - start_of_loop: self.die(BadInputError("Loop co-ordinates present in input, but number of residues (%d) does not match length of input loop sequence (%d)." % (end_of_loop - start_of_loop, len(loop_sequence)))) if end_of_loop > len(self.residues) - ANCHOR_LENGTH: self.die(BadInputError("Cannot model loop closer than %d residues to the terminus (need a complete anchor on both sides)." % (ANCHOR_LENGTH))) # Set ourselves up for loop extension # self.seq = self.loop_sequence = loop_sequence self.s = self.start_of_loop = start_of_loop self.e = self.end_of_loop = end_of_loop self.verify_anchors() while len(self.seq) < self.min_database_loop_length: try: self.verify_stretch() except LoopStretchError: pass self.extend_loop() while True: try: self.verify_stretch() break except LoopStretchError as e: try: self.extend_loop() except BadInputError: self.die(LoopStretchError(str(e)+" Cannot extend gap further.")) def extend_loop(self): """Extend loop by one residue on each side. Raises NonConsecutiveAnchorError() or UnsupportedLoopLengthError() upon failure. """ start_of_loop = self.start_of_loop end_of_loop = self.end_of_loop s = self.s - 1 e = self.e + 1 length = (start_of_loop - s) + (e - end_of_loop) + len(self.loop_sequence) if s<ANCHOR_LENGTH or not is_framework_consecutive(self.residues[s-ANCHOR_LENGTH], self.residues[s-ANCHOR_LENGTH+1]) or e>len(self.residues)-ANCHOR_LENGTH or not is_framework_consecutive(self.residues[e+ANCHOR_LENGTH-2], self.residues[e+ANCHOR_LENGTH-1]): self.die(NonConsecutiveAnchorError("Cannot extend loop to length %d, due to gaps in the query structure or proximity to the termini"%(length))) if not is_loop_length_in_database(self.db, length): self.die(UnsupportedLoopLengthError("Cannot extend loop to length %d, due to database limitations"%(length))) seq = "" for i in xrange(s, start_of_loop): seq += self.residues[i].get_seq() seq += self.loop_sequence for i in xrange(end_of_loop, e): seq += self.residues[i].get_seq() assert len(seq) == length, str([s, start_of_loop, end_of_loop, e, length, len(seq), seq, dbdir, pdb_file, start_of_loop, loop_sequence]) self.s = s self.e = e self.seq = seq self.note("Extending loop to length %d" % (length)) def verify_anchors(self): # Ensure anchors are consecutive stretches of amino acids for x in xrange(self.s-ANCHOR_LENGTH+1, self.s): if not is_framework_consecutive(self.residues[x-1], self.residues[x]): self.die(NonConsecutiveAnchorError("Anchor residues not consecutive in framework structure: residue index %s, %s"%(self.s, self.residues.code))) for x in xrange(self.e, self.e+ANCHOR_LENGTH-1): if not is_framework_consecutive(self.residues[x], self.residues[x+1]): self.die(NonConsecutiveAnchorError("Anchor residues not consecutive in framework structure: residue index %s, %s"%(self.s, self.residues.code))) def verify_stretch(self): # Ensure anchors are not too far apart for loop to stretch gap anchor_distance = dist(self.residues[self.s-1].C, self.residues[self.e].N) if anchor_distance > get_max_loop_length(len(self.seq)) * 1.05: self.die(LoopStretchError("Loop span (%.2f Angstrom) too large to be closed using %d residues. Trying to extend gap." % (anchor_distance, len(self.seq)))) def verify_length(self): # Ensure loop is not too long for the database if not is_loop_length_in_database(self.db, len(self.seq)): self.die(UnsupportedLoopLengthError("Cannot model loop of length %d, due to database limitations"%(len(self.seq)))) def verify(self): self.verify_anchors() self.verify_stretch() self.verify_length() def model(self, top=None, stop_after=None, f_rank_decoy=None, f_stop_search=None, f_filter=None): """Model loop using the FREAD algorithm. This is the method handling most of the work. Raises UnsupportedLoopLengthError if loop length is not supported. """ if top is None: top = self.max_decoys if stop_after is None: stop_after = self.first_decoys if top <= 0: top = sys.maxint if stop_after > 0 and self.idecoy >= stop_after: return self.results if not f_rank_decoy: f_rank_decoy = FREAD_RANKING if not f_stop_search: f_stop_search = lambda x: False if not f_filter: f_filter = lambda x: True while len(self.seq) < self.min_database_loop_length: self.extend_loop() self.verify() meld_anchors = self.meld # and (self.open_rmsd_cutoff <= self.max_melding_rmsd) close_loop = self.close verbose = self.verbose residues = self.residues start_of_loop = self.s end_of_loop = self.e loop_sequence = self.seq start = start_of_loop - ANCHOR_LENGTH end = end_of_loop + ANCHOR_LENGTH loop_length = len(loop_sequence) total_length = loop_length + 2*ANCHOR_LENGTH self.note("Loop region: N(%4d, %4d) C(%4d,%4d) Loop(%3d,'%s')" % (start, start_of_loop, end_of_loop, end, loop_length, loop_sequence)) ############################################################################ # Get query anchors and prepare for clash checking # # Get anchor coordinates # anchor_N = residues[start:start_of_loop] anchor_C = residues[end_of_loop:end] # Describe anchors in query structure # anchor_description, query_transform = describe_anchors(anchor_N, anchor_C, loop_length) # Build a GeometricHash of the query structure (without the loop region), for # clash checking # coords = [] gh_atoms = [] for r in residues[:start]+residues[end:]: for a in r: #if a.atom in ("N", "CA", "C", "O", "CB"): coords.append(a.xyz) gh_atoms.append(a) gh = GeometricHash(numpy.array(coords)) del coords # Inter-residue contacts if self.calculate_contacts: p = Protein((residues[:start]+residues[end:]).split_chains()) p.ligands = Protein(ResidueList(self.ligand_atoms).split_chains()) contact_gh, contact_gh_atoms = make_contact_gh(p) ############################################################################ # Search the database # results = [] # Heap Queue of the top-ranking decoys for decoy in self.results: _heapadd(results, (f_rank_decoy(decoy), decoy.idecoy, decoy), top) for decoy in iterate_database(self.db, loop_length, self.subst_tables, anchor_description, loop_sequence, self.open_rmsd_cutoff, self.score_cutoff): if len(results) >= top: if decoy.internal_rmsd > results[0][-1].anchor_rmsd_open: continue # Retrieve loop structure from database decoy_residues = get_loop_structure(self.db, decoy.struc, decoy.start, total_length) assert len(decoy_residues) == total_length # Superimpose anchors and check anchor RMSD before starting anchor_rmsd_open = ccd.superimpose(decoy_residues[:ANCHOR_LENGTH]+decoy_residues[-ANCHOR_LENGTH:], anchor_N+anchor_C, decoy_residues) if anchor_rmsd_open > self.open_rmsd_cutoff: self.note("%s_%d_%d : Anchor RMSD too large: %.3f"%(decoy.struc, decoy.start, loop_length, anchor_rmsd_open)) continue if len(results) >= top: if anchor_rmsd_open > results[0][-1].anchor_rmsd_open: continue # Save start residue number of loop, in database structure decoy.startres = decoy_residues[ANCHOR_LENGTH].ires decoy.startinscode = decoy_residues[ANCHOR_LENGTH].inscode # Relabel residues and discard non-matching atoms relabel_loop(decoy_residues[ANCHOR_LENGTH:-ANCHOR_LENGTH], loop_sequence, prevatom=anchor_N[-1].CA, nextatom=anchor_C[0].CA) if self.loop_structure_is_known: loop_rmsd_open = calculate_rmsd(decoy_residues[ANCHOR_LENGTH:-ANCHOR_LENGTH], residues[start_of_loop:end_of_loop]) # Are we allowed to meld this decoy? # meld_this_decoy = meld_anchors and (anchor_rmsd_open <= self.max_melding_rmsd) #and not self.no_structure_output close_this_decoy = close_loop and anchor_rmsd_open > self.ccd_target_rmsd and (not meld_anchors or anchor_rmsd_open > self.max_melding_rmsd) #if not self.no_structure_output or close_this_decoy: decoy.nanchor = anchor_N.deep_copy() decoy.canchor = anchor_C.deep_copy() if meld_this_decoy: meld(decoy.nanchor, decoy_residues[:ANCHOR_LENGTH]) meld(decoy.canchor, decoy_residues[-ANCHOR_LENGTH:], invertWeights=True) if not close_this_decoy: anchor_rmsd_closed = anchor_rmsd_open iterations = 0 else: # Loop closure anchor_rmsd_closed, iterations = ccd.close_loop(decoy.nanchor, decoy_residues, decoy.canchor, target_rmsd=self.ccd_target_rmsd, iterations=self.ccd_iterations) if anchor_rmsd_closed > self.closed_rmsd_cutoff: self.note("Failed to close loop: %s_%d_%d"%(decoy.struc, decoy.start, loop_length)) continue # Cut off the decoy loop's anchors decoy_residues = decoy_residues[ANCHOR_LENGTH:-ANCHOR_LENGTH] #if not self.no_structure_output: # Restore main chain oxygens, which got lost during the melding/closing procedure decoy_residues[-1].O = None # This oxygen is wrong anyway, so delete it add_oxygens(decoy.nanchor+decoy_residues[:1], force=True) add_oxygens(decoy_residues[-1:]+decoy.canchor+residues[end:end+1], force=True) # Clash check is_clashing = is_clash(gh, gh_atoms, decoy_residues, self.vdw_factor) if is_clashing: self.note("Clash detected in decoy: %s_%d_%d"%(decoy.struc, decoy.start, loop_length)) continue if self.loop_structure_is_known: decoy.loop_rmsd_open = loop_rmsd_open if not iterations: decoy.loop_rmsd_closed = loop_rmsd_open else: decoy.loop_rmsd_closed = calculate_rmsd(decoy_residues, residues[start_of_loop:end_of_loop]) decoy.length = loop_length decoy.anchor_rmsd_open = anchor_rmsd_open decoy.iterations = iterations #if not self.no_structure_output: decoy.loop = decoy_residues if self.calculate_contacts: contacts = "" for i, r in enumerate(decoy_residues): contact_atoms = find_contacts(contact_gh, r, maxdist=self.contact_distance) contact_atoms.extend(find_contacts_simple(decoy.nanchor+decoy_residues+decoy.canchor, i, maxdist=self.contact_distance)) contacts += get_contact_class(r.chain, contact_atoms) decoy.native_contacts = get_native_contacts(self.db, decoy.struc, decoy.start, total_length) id = 0 for x,y in zip(decoy.native_contacts, contacts): if x == y: id += 1 decoy.contact_identity = float(id) / len(contacts) decoy.contacts = contacts if (self.contact_identity > 0) and (decoy.contact_identity < self.contact_identity): continue # Get per-residue substitution scores # tables = self.subst_tables.tables ascii2index = self.subst_tables.ascii2index seqmap = tuple([ascii2index[ord(s)] for s in loop_sequence]) dihed = decoy.dihedrals score_list = [] for i,x in enumerate(decoy.seq): score_list.append(tables[int(dihed[i])][seqmap[i]][ascii2index[ord(x)]]) assert sum(score_list) == decoy.score decoy.residue_scores = score_list # Save per-residue scores in the occupancy column and # the total score in the B factor column # # for i,r in enumerate(decoy.loop): for a in r: a.b = self.min_b_factor + max(0, 30 - decoy.score) # Average score over a 3 residue window... # because that's how MEDELLER does it sc = score_list[max(0,i-1):i+2] a.occup = float(sum(sc))/len(sc) if not f_filter(decoy): self.note("User-defined filter is excluding decoy: %s_%d_%d"%(decoy.struc, decoy.start, loop_length)) continue self.idecoy = decoy.idecoy = next(self.counter) # Save result _heapadd(results, (f_rank_decoy(decoy), decoy.idecoy, decoy), top) if stop_after > 0 and self.idecoy >= stop_after: break if f_stop_search(decoy): break self.note("%d decoys found"%(len(results))) self.results = [] while results: r = heappop(results) self.results.append(r[-1]) self.results.reverse() return self.results def clear_results(self): self.results = [] self.warnings = [] self.counter = itertools.count(1) self.idecoy = 0 def write_summary(self, outstream=sys.stdout, write_decoy_sequence=False): """Write summary information to specified output stream. """ for decoy in self.results: outstream.write("%s_%d%s_%d\t%d\t%.3f\t%.3f\t%d\t%s" % (decoy.struc, decoy.startres, decoy.startinscode, decoy.length, decoy.score, decoy.internal_rmsd, decoy.anchor_rmsd_open, decoy.iterations,decoy.seq)) if write_decoy_sequence: outstream.write("\t%s" % (decoy.seq)) if self.calculate_contacts: outstream.write("\t%s" % (decoy.contacts)) outstream.write("\t%s" % (decoy.native_contacts)) outstream.write("\t%.3f" % (decoy.contact_identity)) if self.loop_structure_is_known: outstream.write("\t%.3f\t%.3f" % (decoy.loop_rmsd_open, decoy.loop_rmsd_closed)) outstream.write("\n") def write_decoy_structures(self, out_dir, top=0, suffix=".loop.atm", idfilter=[]): """Write decoy structure files (PDB format) to specified directory. """ if self.no_structure_output: self.die("Cannot write decoy structures as structure output is disabled") if out_dir: if not os.path.exists(out_dir): os.mkdir(out_dir) for i, decoy in enumerate(self.results): if top > 0 and i >= top: break decoyname = "%s_%d%s_%d"%(decoy.struc, decoy.startres, decoy.startinscode, decoy.length) if idfilter and (decoyname not in idfilter): continue decoyfile = os.path.join(out_dir, decoyname+suffix) f = open(decoyfile, "w") try: for r in decoy.nanchor: f.write(str(r)) for r in decoy.loop: f.write(str(r)) for r in decoy.canchor: f.write(str(r)) finally: f.close() def assemble_model(self, decoy): # Start and end of loop region in model start = self.s - ANCHOR_LENGTH end = self.e + ANCHOR_LENGTH if isinstance(decoy, int): decoy = self.results[decoy] if self.no_structure_output: self.die("Cannot write decoy structures as structure output is disabled") model = ResidueList([]) for i in xrange(start): model.append(self.residues[i].copy()) model.extend(decoy.nanchor) model.extend(decoy.loop) model.extend(decoy.canchor) for i in xrange(end, len(self.residues)): model.append(self.residues[i].copy()) # Replace main chain oxygen before the loop, to ensure the peptide bond there is planar model[start-1].O = None add_oxygens(model, start=start-1, end=start, force=True) return model def write_model_structures(self, out_dir, top=0, suffix=".model.atm", idfilter=[]): """Write model structure files (PDB format), including decoy residues, to specified directory. """ if self.no_structure_output: self.die("Cannot write model structures as structure output is disabled") if out_dir: if not os.path.exists(out_dir): os.mkdir(out_dir) else: out_dir = "" for i, decoy in enumerate(self.results): if top > 0 and i >= top: break decoyname = "%s_%d%s_%d"%(decoy.struc, decoy.startres, decoy.startinscode, decoy.length) if idfilter and (decoyname not in idfilter): continue decoyfile = os.path.join(out_dir, decoyname+suffix) model = self.assemble_model(decoy) f = open(decoyfile, "w") try: f.write(str(model)) finally: f.close() def open_errstream(self, filename): if filename == "-": self.errstream = sys.stderr else: self.errstream = open(filename, "w") def close_errstream(self): if self.errstream != sys.stderr: self.errstream.close() self.set_silent() def automodel_loop(self, start_of_loop, loop_sequence, loopsuffix = "", dbdir = '.', strucdir = 'decoys', summary_file_name = 'summary', write_decoys = True, write_models = True, write_decoy_sequence = False, resnum = False, chain = None, structure_known = None, f_rank_decoy = None, f_stop_search = None, f_filter = None, idfilter = [], **kwargs):
def meld(fragScaffold, fragPrediction, invertWeights=False): """Averages the coordinates of the two Pdb arguments. Move the first object's co-ordinates onto the averaged position. By default, the first object is assumed to be part of the N-terminal fragment, the second is part of the C-terminal fragment. This can be reversed by setting invertWeights=True.""" resS = fragScaffold resP = fragPrediction L = len(resS) assert len(resS) == len(resP) def averageCoord(P, S): # P = coordinate of prediction # S = coordinate of scaffold # D = distance (in residues) from (loop+anchor)-fragment end # L = anchor length return 1.0/(L+1) * (D*P + (L+1-D)*S) for i, (rS, rP) in enumerate(zip(resS, resP)): if invertWeights: D = len(resS)-i else: D = i+1 newN = averageCoord(rP.N.xyz, rS.N.xyz) newCA = averageCoord(rP.CA.xyz, rS.CA.xyz) newC = averageCoord(rP.C.xyz, rS.C.xyz) T_from, T_to, rotmat = ccd.get_rotmat([rS.N.xyz, rS.CA.xyz, rS.C.xyz], [newN, newCA, newC]) rS.O = None # Remove main chain oxygen - can regenerate this later for a in rS: a.xyz = numpy.dot(a.xyz - T_from, rotmat) + T_to def FREAD_RANKING(decoy): """Returns a comparable object (a tuple of scores) used for ranking a given decoy. Bigger values will be ranked higher.""" return (-decoy.anchor_rmsd_open, decoy.score, -decoy.internal_rmsd, -decoy.iterations) def FREAD_RANKING_BY_ESSS(decoy): """Returns a comparable object (a tuple of scores) used for ranking a given decoy. Bigger values will be ranked higher.""" return (decoy.score, -decoy.anchor_rmsd_open, -decoy.internal_rmsd, -decoy.iterations)
"""Search the given database and return a list of decoys for the given loop. """ # Set options defined in the Fread constructor self.set_options(kwargs) if self.extension_size < 0: max_extension = sys.maxint else: max_extension = self.extension_size if strucdir is not None: strucdir = strucdir.strip() if (not strucdir) or (not write_decoys and not write_models): strucdir = None self.no_structure_output = True for db_group in dbdir.split(":"): # try these databases until we find something databases = db_group.split("|") # combine results from all these databases self.set_db(databases[0]) try: self.set_loop(start_of_loop, loop_sequence.upper(), resnum=resnum, chain=chain, structure_known=structure_known) except FreadError: break # Extend loop if extension_minimum is set # for i in xrange(self.extension_minimum): try: self.extend_loop() except FreadError: break # Try to model, and extend loop if nothing was found anyresults = False for db in databases: self.set_db(db) try: anyresults |= bool(self.model(f_rank_decoy=f_rank_decoy, f_stop_search=f_stop_search, f_filter=f_filter)) except FreadError: break for i in xrange(max_extension): if anyresults: break try: self.extend_loop() except FreadError: break for db in databases: self.set_db(db) anyresults |= bool(self.model(f_rank_decoy=f_rank_decoy, f_stop_search=f_stop_search, f_filter=f_filter)) if anyresults: break # Write results to STDOUT or a file, if one was specified if summary_file_name: if summary_file_name == "-": self.write_summary(sys.stdout, write_decoy_sequence=write_decoy_sequence) else: root, ext = os.path.splitext(summary_file_name) outstream = open(root + loopsuffix + ext, "w") self.write_summary(outstream, write_decoy_sequence=write_decoy_sequence) outstream.close() if not self.no_structure_output: if write_decoys: self.write_decoy_structures(strucdir + loopsuffix, suffix=".loop.pdb", idfilter = idfilter) if write_models: self.write_model_structures(strucdir + loopsuffix, suffix=".model.pdb", idfilter = idfilter) return self.results
frame.rs
use gimli::{ BaseAddresses, CfaRule, Expression, Register, RegisterRule, UnwindContext, UnwindTableRow, }; #[cfg(feature = "dwarf-expr")] use gimli::{Evaluation, EvaluationResult, Location, Value}; use super::arch::*; use super::find_fde::{self, FDEFinder, FDESearchResult}; use crate::abi::PersonalityRoutine; use crate::arch::*; use crate::util::*; struct StoreOnStack; // gimli's MSRV doesn't allow const generics, so we need to pick a supported array size. const fn next_value(x: usize) -> usize { let supported = [0, 1, 2, 3, 4, 8, 16, 32, 64, 128]; let mut i = 0; while i < supported.len() { if supported[i] >= x { return supported[i]; } i += 1; } 192 } impl<R: gimli::Reader> gimli::UnwindContextStorage<R> for StoreOnStack { type Rules = [(Register, RegisterRule<R>); next_value(MAX_REG_RULES)]; type Stack = [UnwindTableRow<R, Self>; 1]; } #[cfg(feature = "dwarf-expr")] impl<R: gimli::Reader> gimli::EvaluationStorage<R> for StoreOnStack { type Stack = [Value; 64]; type ExpressionStack = [(R, R); 0]; type Result = [gimli::Piece<R>; 1]; } #[derive(Debug)] pub struct Frame { fde_result: FDESearchResult, row: UnwindTableRow<StaticSlice, StoreOnStack>, } impl Frame { pub fn from_context(ctx: &Context, signal: bool) -> Result<Option<Self>, gimli::Error> { let mut ra = ctx[Arch::RA]; // Reached end of stack if ra == 0 { return Ok(None); } // RA points to the *next* instruction, so move it back 1 byte for the call instruction. if !signal { ra -= 1; } let fde_result = match find_fde::get_finder().find_fde(ra as _) { Some(v) => v, None => return Ok(None), }; let mut unwinder = UnwindContext::<_, StoreOnStack>::new_in(); let row = fde_result .fde .unwind_info_for_address( &fde_result.eh_frame, &fde_result.bases, &mut unwinder, ra as _, )? .clone(); Ok(Some(Self { fde_result, row })) } #[cfg(feature = "dwarf-expr")] fn evaluate_expression( &self, ctx: &Context, expr: Expression<StaticSlice>, ) -> Result<usize, gimli::Error> { let mut eval = Evaluation::<_, StoreOnStack>::new_in(expr.0, self.fde_result.fde.cie().encoding()); let mut result = eval.evaluate()?; loop { match result { EvaluationResult::Complete => break, EvaluationResult::RequiresMemory { address, .. } => { let value = unsafe { (address as usize as *const usize).read_unaligned() }; result = eval.resume_with_memory(Value::Generic(value as _))?; } EvaluationResult::RequiresRegister { register, .. } => { let value = ctx[register]; result = eval.resume_with_register(Value::Generic(value as _))?; } EvaluationResult::RequiresRelocatedAddress(address) => { let value = unsafe { (address as usize as *const usize).read_unaligned() }; result = eval.resume_with_memory(Value::Generic(value as _))?; } _ => unreachable!(), } } Ok( match eval .as_result() .last() .ok_or(gimli::Error::PopWithEmptyStack)? .location { Location::Address { address } => address as usize, _ => unreachable!(), }, ) } #[cfg(not(feature = "dwarf-expr"))] fn evaluate_expression( &self, _ctx: &Context, _expr: Expression<StaticSlice>, ) -> Result<usize, gimli::Error> { Err(gimli::Error::UnsupportedEvaluation) } pub fn unwind(&self, ctx: &Context) -> Result<Context, gimli::Error> { let row = &self.row; let mut new_ctx = ctx.clone(); let cfa = match *row.cfa() { CfaRule::RegisterAndOffset { register, offset } => { ctx[register].wrapping_add(offset as usize) } CfaRule::Expression(expr) => self.evaluate_expression(ctx, expr)?, }; new_ctx[Arch::SP] = cfa as _; new_ctx[Arch::RA] = 0; for (reg, rule) in row.registers() { let value = match *rule { RegisterRule::Undefined | RegisterRule::SameValue => ctx[*reg], RegisterRule::Offset(offset) => unsafe { *((cfa.wrapping_add(offset as usize)) as *const usize) }, RegisterRule::ValOffset(offset) => cfa.wrapping_add(offset as usize), RegisterRule::Register(r) => ctx[r], RegisterRule::Expression(expr) => { let addr = self.evaluate_expression(ctx, expr)?; unsafe { *(addr as *const usize) } } RegisterRule::ValExpression(expr) => self.evaluate_expression(ctx, expr)?, RegisterRule::Architectural => unreachable!(), }; new_ctx[*reg] = value; } Ok(new_ctx) } pub fn bases(&self) -> &BaseAddresses { &self.fde_result.bases } pub fn personality(&self) -> Option<PersonalityRoutine> { self.fde_result .fde .personality() .map(|x| unsafe { deref_pointer(x) }) .map(|x| unsafe { core::mem::transmute(x) }) } pub fn lsda(&self) -> usize { self.fde_result .fde .lsda() .map(|x| unsafe { deref_pointer(x) }) .unwrap_or(0) } pub fn initial_address(&self) -> usize { self.fde_result.fde.initial_address() as _ } pub fn is_signal_trampoline(&self) -> bool
}
{ self.fde_result.fde.is_signal_trampoline() }
casbin_authorizer.test.ts
import "jest" import { UserAuthInfo } from "../../src/auth/authn"; import { CasbinAuthorizer } from "../../src/auth/casbin"; import { resolve } from "path"; import { readFileSync } from "fs"; import { Action } from "papiea-core"; import { PermissionDeniedError } from "../../src/errors/permission_error"; import { Logger, LoggerFactory } from 'papiea-backend-utils'; describe("Casbin authorizer tests", () => { const pathToModel: string = resolve(__dirname, "../../src/auth/provider_model_example.txt"); const modelText: string = readFileSync(pathToModel).toString(); const pathToPolicy: string = resolve(__dirname, "../../src/auth/provider_policy_example.txt"); const policyText: string = readFileSync(pathToPolicy).toString(); const logger = LoggerFactory.makeLogger({level: "info"}); const authorizer: CasbinAuthorizer = new CasbinAuthorizer(logger, modelText, policyText); beforeAll(async () => { await authorizer.init(); }); afterAll(async () => { }); function actionShouldFail(user: UserAuthInfo, object: any, action: Action, done: any) { authorizer.checkPermission(user, object, action).then(_ => { done.fail(); }).catch(e => { if (e.constructor === PermissionDeniedError) { done(); } else { done.fail(e); } }); } function
(user: UserAuthInfo, object: any, action: Action, done: any) { authorizer.checkPermission(user, object, action).then(_ => { done(); }).catch(e => { done.fail(e); }); } test("Alice fails to write an kind_3 of another user without rights", done => { actionShouldFail( { "owner": "alice", "tenant": "1" }, { "metadata": { "extension": { "owner": "1alice1" }, "kind": "kind_3" } }, Action.Create, done ); }); test("Alice fails to delete her own kind_4, which is explicitly denied", done => { actionShouldFail( { "owner": "alice", "tenant": "1" }, { "metadata": { "extension": { "owner": "alice" }, "kind": "kind_4" } }, Action.Delete, done ); }); test("Alice succeeds reading her own imaged host which is explicitly allowed", done => { actionShouldSucceed( { "owner": "alice", "tenant": "1" }, { "metadata": { "extension": { "owner": "alice" }, "kind": "kind_3" } }, Action.Read, done ); }); test("Bob succeeds reading alice's kind_1, which is allowed to read by everyone using the default 'reader_group'", done => { actionShouldSucceed( { "owner": "bob", "tenant": "1" }, { "metadata": { "extension": { "owner": "alice" }, "kind": "kind_1" } }, Action.Read, done ); }); test("Bob fails to delete a kind_1 since no rule allows for it", done => { actionShouldFail( { "owner": "bob", "tenant": "1" }, { "metadata": { "extension": { "owner": "alice" }, "kind": "kind_1" } }, Action.Delete, done ); }); test("Alice succeeds reading a kind_2 owned by her since it was explicitly allowed", done => { actionShouldSucceed( { "owner": "alice", "tenant": "1" }, { "metadata": { "extension": { "owner": "alice" }, "kind": "kind_2" } }, Action.Read, done ); }); test("Bill succeeds reading alice's kind_2 since the reader_group allows it for everyone", done => { actionShouldSucceed( { "owner": "bill", "tenant": "1" }, { "metadata": { "extension": { "owner": "alice" }, "kind": "kind_2" } }, Action.Read, done ); }); test("Bill fails deleting alice's kind_2, since no rule allows it", done => { actionShouldFail( { "owner": "bill", "tenant": "1" }, { "metadata": { "extension": { "owner": "alice" }, "kind": "kind_2" } }, Action.Delete, done ); }); test("Bill fails deleting alice's kind_3, since the standard_group only allows deleting to owners", done => { actionShouldFail( { "owner": "bill", "tenant": "1" }, { "metadata": { "extension": { "owner": "alice" }, "kind": "kind_3" } }, Action.Delete, done ); }); test("Bill succeeds deleting his own kind_3, since the standard_group only allows deleting to owners", done => { actionShouldSucceed( { "owner": "bill", "tenant": "1" }, { "metadata": { "extension": { "owner": "bill" }, "kind": "kind_3" } }, Action.Delete, done ); }); test("Anonymous user is denied reading alice's kind_2, by a rule denying anything of anyone's entity", done => { actionShouldFail( { "owner": "anonymous" }, { "metadata": { "extension": { "owner": "alice" }, "kind": "kind_2" } }, Action.Read, done ); }); test("Admin can delete alice's kind_3 since admin belongs to the admin's group", done => { actionShouldSucceed( { "owner": "admin" }, { "metadata": { "extension": { "owner": "alice" }, "kind": "kind_3" } }, Action.Delete, done ); }); test("Rick can delete alice's kind_3 since he is in her tenant and is a part of tenant_group", done => { actionShouldSucceed( { "owner": "rick", "tenant": "1" }, { "metadata": { "extension": { "owner": "alice", "tenant_uuid": "1" }, "kind": "kind_3" } }, Action.Delete, done ); }); test("Rick fails to delete alice's kind_4 since he is in her tenant and is a part of tenant_group but the rule denies deleteing", done => { actionShouldFail( { "owner": "rick", "tenant": "1" }, { "metadata": { "extension": { "owner": "alice", "tenant_uuid": "1" }, "kind": "kind_4" } }, Action.Delete, done ); }); }); describe("Casbin authorizer tests for default provider policy", () => { const pathToModel: string = resolve(__dirname, "../../src/auth/provider_model_example.txt"); const modelText: string = readFileSync(pathToModel).toString(); const pathToPolicy: string = resolve(__dirname, "../../src/auth/provider_policy_example.txt"); const policyText: string = readFileSync(pathToPolicy).toString(); const logger = LoggerFactory.makeLogger({level: "info"}); const authorizer: CasbinAuthorizer = new CasbinAuthorizer(logger, modelText, policyText); beforeAll(async () => { await authorizer.init(); }); afterAll(async () => { }); function actionShouldFail(user: UserAuthInfo, object: any, action: Action, done: any) { authorizer.checkPermission(user, object, action).then(_ => { done.fail(); }).catch(e => { if (e.constructor === PermissionDeniedError) { done(); } else { done.fail(e); } }); } function actionShouldSucceed(user: UserAuthInfo, object: any, action: Action, done: any) { authorizer.checkPermission(user, object, action).then(_ => { done(); }).catch(e => { done.fail(e); }); } describe('admin user', () => { describe('has access to everything', () => { describe('some_kind', () => { const kind = 'some_kind'; test("Read", done => { actionShouldSucceed( { owner: 'admin' }, { metadata: { extension: { owner: 'alice' }, kind } }, Action.Read, done ) }); test("Create", done => { actionShouldSucceed( { owner: 'admin' }, { metadata: { extension: { owner: 'alice' }, kind } }, Action.Create, done ) }); test("Update", done => { actionShouldSucceed( { owner: 'admin' }, { metadata: { extension: { owner: 'alice' }, kind } }, Action.Update, done ) }); test("Delete", done => { actionShouldSucceed( { owner: 'admin' }, { metadata: { extension: { owner: 'alice' }, kind } }, Action.Delete, done ) }) }) }) }); describe('anonymous user', () => { describe('has access to nothing', () => { describe('his own (should never have any anyway)', () => { describe('some_kind', () => { const kind = 'some_kind'; test("Read", done => { actionShouldFail( { owner: 'anonymous' }, { metadata: { extension: { owner: 'anonymous' }, kind } }, Action.Read, done ) }); test("Create", done => { actionShouldFail( { owner: 'anonymous' }, { metadata: { extension: { owner: 'anonymous' }, kind } }, Action.Create, done ) }); test("Update", done => { actionShouldFail( { owner: 'anonymous' }, { metadata: { extension: { owner: 'anonymous' }, kind } }, Action.Update, done ) }); test("Delete", done => { actionShouldFail( { owner: 'anonymous' }, { metadata: { extension: { owner: 'anonymous' }, kind } }, Action.Delete, done ) }) }) }); describe('other owners', () => { describe('some_kind', () => { const kind = 'some_kind'; test("Read", done => { actionShouldFail( { owner: 'anonymous' }, { metadata: { extension: { owner: 'alice' }, kind } }, Action.Read, done ) }); test("Create", done => { actionShouldFail( { owner: 'anonymous' }, { metadata: { extension: { owner: 'alice' }, kind } }, Action.Create, done ) }); test("Update", done => { actionShouldFail( { owner: 'anonymous' }, { metadata: { extension: { owner: 'alice' }, kind } }, Action.Update, done ) }); test("Delete", done => { actionShouldFail( { owner: 'anonymous' }, { metadata: { extension: { owner: 'alice' }, kind } }, Action.Delete, done ) }) }) }) }) }) });
actionShouldSucceed
abacus.rs
/*! # UTC2K - Abacus */ #![allow(clippy::integer_division)] use crate::{ DAY_IN_SECONDS, HOUR_IN_SECONDS, MINUTE_IN_SECONDS, Utc2k, }; use std::ops::{ Add, AddAssign, }; #[derive(Debug, Clone, Copy, Eq, Hash, PartialEq)] /// # Abacus. /// /// This is essentially a 32-bit version of [`Utc2k`], which allows individual /// date/time parts to temporarily hold large values so that they can be /// "rebalanced" without worrying about overflow. /// /// This is used by [`Utc2k`] when dealing with parts to ensure the unit values /// make sense, for example during string parsing or addition operations. If /// 1000 seconds are added to a time, that's totally fine, but we don't want /// 1000 seconds; we want 16 minutes and 40 seconds. /// /// Because this is a transient struct, the functionality could be represented /// as a top-level function or something instead, but this approach appears to /// be faster. pub(super) struct Abacus { y: u32, m: u32, d: u32, hh: u32, mm: u32, ss: u32, } impl Add<u32> for Abacus { type Output = Self; fn add(self, other: u32) -> Self { let mut out = Self { y: self.y, m: self.m, d: self.d, hh: self.hh, mm: self.mm, ss: self.ss.saturating_add(other), }; out.rebalance(); out } } impl AddAssign<u32> for Abacus { #[inline] fn add_assign(&mut self, other: u32) { self.ss = self.ss.saturating_add(other); self.rebalance(); } } impl From<Utc2k> for Abacus { fn from(src: Utc2k) -> Self { let (y, m, d, hh, mm, ss) = src.parts(); Self::new(y, m, d, hh, mm, ss) } } impl Abacus { #[must_use] /// # New. pub(super) fn new(y: u16, m: u8, d: u8, hh: u8, mm: u8, ss: u8) -> Self { let mut out = Self { y: y.into(), m: m.into(), d: d.into(), hh: hh.into(), mm: mm.into(), ss: ss.into(), }; out.rebalance(); out } #[allow(clippy::cast_possible_truncation)] // It fits. #[must_use] /// # Parts. /// /// Return the individual parts, nice and balanced, ready for consumption /// by [`Utc2k`]. (Only the last two digits of the year are returned.) pub(super) const fn parts(&self) -> (u8, u8, u8, u8, u8, u8) { if self.y < 2000 { (0, 1, 1, 0, 0, 0) } else if 2099 < self.y { (99, 12, 31, 23, 59, 59) } else { ( (self.y - 2000) as u8, self.m as u8, self.d as u8, self.hh as u8, self.mm as u8, self.ss as u8, ) } } } impl Abacus { /// # Rebalance. /// /// Shift overflowing small units to larger units, like seconds to minutes, /// minutes to hours, etc. fn rebalance(&mut self) { if 23 < self.hh || 59 < self.mm || 59 < self.ss { self.rebalance_ss(); self.rebalance_mm(); self.rebalance_hh(); } if 0 == self.m || 12 < self.m || 0 == self.d || (28 < self.d && self.month_days() < self.d) { self.rebalance_date(); } } /// # Rebalance Seconds. /// /// While the other time-rebalancing methods focus on just a single shift, /// this will move seconds to days, hours, and/or minutes as necessary. The /// extra effort here is primarily a short-circuit for addition operations, /// which only increment seconds. /// /// The bitshift wizardry was inspired by [this post](https://johnnylee-sde.github.io/Fast-unsigned-integer-to-time-string/). fn rebalance_ss(&mut self) { if self.ss >= DAY_IN_SECONDS { let div = self.ss / DAY_IN_SECONDS; self.d += div; self.ss -= div * DAY_IN_SECONDS; } if self.ss >= HOUR_IN_SECONDS { let div = (self.ss * 0x91A3) >> 27; self.hh += div; self.ss -= div * HOUR_IN_SECONDS; } if self.ss >= MINUTE_IN_SECONDS { let div = (self.ss * 0x889) >> 17; self.mm += div; self.ss -= div * MINUTE_IN_SECONDS; } } /// # Rebalance Minutes. /// /// This moves overflowing minutes to hours. fn rebalance_mm(&mut self) { if self.mm > 59 { let div = (self.mm * 0x889) >> 17; self.hh += div; self.mm -= div * 60; } } /// # Rebalance Hours. /// /// This moves overflowing hours to days. fn rebalance_hh(&mut self) { if self.hh > 23 { let div = self.hh / 24; self.d += div; self.hh -= div * 24; } } /// # Rebalance Date. /// /// This handles the shifting of both days to months and months to years. /// /// In cases where a value is zero, the higher unit is rewound. For /// example, a year/month of `2000-00` becomes `1999-12`; a month/day of /// '06-00' becomes '05-31'. /// /// Because months have different numbers of days from one another, and /// even from their selves year-to-year, this method recurses to simplify /// handling. fn
(&mut self) { // No amount of rebalancing can bring this within range. if self.y < 1500 { self.y = 1500; self.m = 1; self.d = 1; return; } // Rewind the year. if self.m == 0 { self.y -= 1; self.m = 12; } // Carry excess months over to years. else if 12 < self.m { let div = (self.m - 1) / 12; self.y += div; self.m -= div * 12; } // Rewind the month. if self.d == 0 { // If the month was January, we need to rewind the year too. Might // as well handle all rewinds in one go. if self.m == 1 { self.y -= 1; self.m = 12; self.d = 31; } else { self.m -= 1; self.d = self.month_days(); } } // We know we're fine if the day is less than 28, but if it is greater, // we have to do some additional checking. else if 28 < self.d { let size = self.month_days(); if size < self.d { self.m += 1; self.d -= size; self.rebalance_date(); } } } /// # Last Day of Month. /// /// This returns the last day of the month, or the number of days in a /// month, whichever way you want to think of it. /// /// This is leap-aware. const fn month_days(&self) -> u32 { match self.m { 1 | 3 | 5 | 7 | 8 | 10 | 12 => 31, 4 | 6 | 9 | 11 => 30, 2 if self.y.trailing_zeros() >= 2 && ((self.y % 100) != 0 || (self.y % 400) == 0) => 29, _ => 28, } } } #[cfg(test)] mod tests { use super::*; #[test] /// # Addition. fn addition() { macro_rules! add { ($($start:ident + $num:literal = ($y2:literal, $m2:literal, $d2:literal, $hh2:literal, $mm2:literal, $ss2:literal)),+) => ($( assert_eq!( ($start + $num).parts(), ($y2, $m2, $d2, $hh2, $mm2, $ss2) ); // Make sure add/assign is the same as adding. It's obviously // fine now, but could get broken later. Who knows! { let mut tmp = $start; tmp += $num; assert_eq!($start + $num, tmp); } )+); } // Add nothing. let start = Abacus::new(2000, 1, 1, 0, 0, 0); add!( start + 0 = (0, 1, 1, 0, 0, 0), start + 1 = (0, 1, 1, 0, 0, 1), start + 60 = (0, 1, 1, 0, 1, 0), start + 3600 = (0, 1, 1, 1, 0, 0), start + 3661 = (0, 1, 1, 1, 1, 1), start + 31_622_400 = (1, 1, 1, 0, 0, 0), start + 4_294_967_295 = (99, 12, 31, 23, 59, 59) ); } #[test] /// # Test Carry-Over. /// /// This helps ensure we're doing the math correctly. fn carries() { macro_rules! carry { ($(($y:literal, $m:literal, $d:literal, $hh:literal, $mm:literal, $ss:literal) ($y2:literal, $m2:literal, $d2:literal, $hh2:literal, $mm2:literal, $ss2:literal) $fail:literal),+) => ($( assert_eq!( Abacus::new($y, $m, $d, $hh, $mm, $ss).parts(), ($y2, $m2, $d2, $hh2, $mm2, $ss2), $fail ); )+); } carry!( (2000, 13, 32, 24, 60, 60) (01, 2, 2, 1, 1, 0) "Overage of one everywhere.", (2000, 25, 99, 1, 1, 1) (02, 4, 9, 1, 1, 1) "Large month/day overages.", (2000, 1, 1, 99, 99, 99) (00, 1, 5, 4, 40, 39) "Large time overflows.", (2000, 255, 255, 255, 255, 255) (21, 11, 20, 19, 19, 15) "Max overflows.", (1970, 25, 99, 1, 1, 1) (00, 1, 1, 0, 0, 0) "Saturating low.", (3000, 25, 99, 1, 1, 1) (99, 12, 31, 23, 59, 59) "Saturating high #1.", (2099, 25, 99, 1, 1, 1) (99, 12, 31, 23, 59, 59) "Saturating high #2.", (2010, 0, 0, 1, 1, 1) (09, 11, 30, 1, 1, 1) "Zero month, zero day.", (2010, 0, 32, 1, 1, 1) (10, 1, 1, 1, 1, 1) "Zero month, overflowing day.", (2010, 1, 0, 1, 1, 1) (09, 12, 31, 1, 1, 1) "Zero day into zero month.", (2010, 2, 30, 1, 1, 1) (10, 3, 2, 1, 1, 1) "Too many days for month.", (2010, 24, 1, 1, 1, 1) (11, 12, 1, 1, 1, 1) "Exactly 24 months." ); } #[test] /// # Bitshifts. /// /// Make sure our bitshift tickery doesn't lead to any rounding errors. /// /// While the time fields are themselves `u32`, because of how /// instantiation works, they are all effectively limited to the `u8` range /// except for seconds, which (by the time shifting matters) maxes out at /// `86_399`. /// /// The range to check is therefore `0..DAY_IN_SECONDS`. fn shifting() { fn divvy(mut ss: u32) -> (u32, u32, u32) { let mut hh = 0; let mut mm = 0; if ss >= HOUR_IN_SECONDS { let div = ss / HOUR_IN_SECONDS; hh += div; ss -= div * HOUR_IN_SECONDS; } if ss >= MINUTE_IN_SECONDS { let div = ss / MINUTE_IN_SECONDS; mm += div; ss -= div * MINUTE_IN_SECONDS; } (hh, mm, ss) } fn shifty(mut ss: u32) -> (u32, u32, u32) { let mut hh = 0; let mut mm = 0; if ss >= HOUR_IN_SECONDS { let div = (ss * 0x91A3) >> 27; hh += div; ss -= div * HOUR_IN_SECONDS; } if ss >= MINUTE_IN_SECONDS { let div = (ss * 0x889) >> 17; mm += div; ss -= div * MINUTE_IN_SECONDS; } (hh, mm, ss) } for i in 0..DAY_IN_SECONDS { assert_eq!(divvy(i), shifty(i)); } } }
rebalance_date
get_news_validator.py
from utils.utils import validate_parameters get_news_query_schema = { "from": { "type": "integer", 'coerce': int, "min": 0, "max": 10000, "required": False, "default": 0 }, "limit": { "type": "integer", 'coerce': int, "min": 0, "max": 10000, "required": False, "default": 0 }, "category": { "type": "string", "required": False } } class
: def __call__(self, request): body_validation_errors = validate_parameters(request.args.copy(), get_news_query_schema) return body_validation_errors
GetNewsValidator
Plural.js
ZERO: 'zero', ONE: 'one', TWO: 'two', FEW: 'few', MANY: 'many', OTHER: 'other', };
export default {
lenetplus.py
from mxnet import gluon def _make_conv_block(block_index, num_chan=32, num_layer=2, stride=1, pad=2):
class LeNetPlus(gluon.nn.HybridBlock): """ LeNetPlus model """ def __init__(self, classes=10, feature_size=256, use_bn=False, use_inn=False, use_l2n=False, dropout=0.5, **kwargs): super(LeNetPlus, self).__init__(**kwargs) num_chans = [32, 64, 128] with self.name_scope(): self.use_bn = use_bn self.use_inn = use_inn self.use_l2n = use_l2n self.features = gluon.nn.HybridSequential(prefix='') if self.use_inn: self.features.add(gluon.nn.InstanceNorm()) for i, num_chan in enumerate(num_chans): if use_bn: self.features.add(gluon.nn.BatchNorm()) self.features.add(_make_conv_block(i, num_chan=num_chan)) if dropout > 0: self.features.add(gluon.nn.Dropout(dropout)) self.features.add(gluon.nn.Dense(feature_size)) self.output = gluon.nn.Dense(classes) def hybrid_forward(self, F, x, *args, **kwargs): features = self.features(x) if self.use_l2n: features = F.L2Normalization(features, mode='instance', name='l2n') outputs = self.output(features) return outputs, features
out = gluon.nn.HybridSequential(prefix='block_%d_' % block_index) with out.name_scope(): for _ in range(num_layer): out.add(gluon.nn.Conv2D(num_chan, kernel_size=3, strides=stride, padding=pad)) out.add(gluon.nn.LeakyReLU(alpha=0.2)) out.add(gluon.nn.MaxPool2D()) return out
get_dot11_associations.py
# -*- coding: utf-8 -*- # ---------------------------------------------------------------------
# --------------------------------------------------------------------- # Copyright (C) 2007-2019 The NOC Project # See LICENSE for details # --------------------------------------------------------------------- # Python modules import re # NOC modules from noc.core.script.base import BaseScript from noc.sa.interfaces.igetdot11associations import IGetDot11Associations from noc.core.text import strip_html_tags rx_mac = re.compile( "(?P<mac>[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2})" ) class Script(BaseScript): name = "ZTE.ZXDSL531.get_dot11_associations" interface = IGetDot11Associations def execute(self): if self.access_profile.scheme == self.TELNET: v = self.cli("wlctl authe_sta_list") elif self.access_profile.scheme == self.HTTP: v = self.http.get("/wlclientview.cmd") v = strip_html_tags(v) else: raise Exception("Unsupported access scheme") r = [] for l in v.split("\n"): m = rx_mac.search(l) if m: r.append({"mac": m.group("mac")}) return r
# ZTE.ZXDSL531.get_dot11_associations
errors.ts
import type { IError } from '@interfaces/common'; export class HttpError extends Error implements IError { public status: number; public message: string; constructor(status: number, message: string) { super(message); this.status = status; this.message = message; } }
} } export class UnauthorizedError extends HttpError { constructor(message = 'Unauthorized user.') { super(401, message); } } export class ForbiddenError extends HttpError { constructor(message = 'Forbidden.') { super(403, message); } } export class NotFoundError extends HttpError { constructor(message = 'Requested resource not found.') { super(404, message); } } export class InternalServerError extends HttpError { constructor(message = 'Something went wrong.') { super(500, message); } }
export class BadRequestError extends HttpError { constructor(message = 'Bad Request.') { super(400, message);
imgs_getter.py
# -*- coding: utf-8 -*- """ 获取YCY图片 """ import json import os import requests from settings import PROJECT_PATH class YCYImage(object): def __init__(self): self.headers = { "User-Agent": "Mozilla/5.0 (Windows NT 6.1) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36", # "Content-Type": "application/x-www-form-urlencoded", } def get_img(self): """获取100页的图片链接""" url = "https://www.duitang.com/napi/blog/list/by_search/" result = [] for page in range(0, 240, 24): data = { 'kw': '杨超越', 'type': 'feed', 'include_fields': 'top_comments,is_root,source_link,item,buyable,root_id,status,like_count,like_id,sender,album,reply_count,favorite_blog_id', '_type': '', 'start': str(page), } r = requests.get(url, headers=self.headers, params=data, verify=False) d = json.loads(r.text) if d.get('data').get('object_list'): d = d['data']['object_list'] result.extend(d) return result def download_img_and_save(self, result): """下载图片并保存""" if not result: return for index, d in enumerate(result): r = requests.get(url=d['photo']['path']) file_name = os.path.join(PROJECT_PATH, "pics", "ycy_{}.jpg".format(index)) with open(file_name, 'wb') as f: f.write(r.content)
result = self.get_img() self.download_img_and_save(result) if __name__ == '__main__': ycy = YCYImage() ycy.run()
def run(self):
gender.py
class Gender: NEUTRAL = 1 FEMALE = 2 MALE = 3 GENDER_STRINGS = {NEUTRAL: "neutral", FEMALE: "female", MALE: "male" } def __init__(self, gender: int = 1): self.gender: int = gender def __str__(self): return self.GENDER_STRINGS[_(self.gender)] def __eq__(self, other):
if isinstance(other, Gender): return self.gender == other.gender elif isinstance(other, int): return self.gender == other else: return False
b.js
import lib from 'lib';
export default lib;
files.py
import os from contextlib import contextmanager @contextmanager def
(destination): # Allows for temporary change of working directory when used with a with statement try: cwd = os.getcwd() os.chdir(destination) yield finally: os.chdir(cwd)
change_dir
http.go
package dto import ( "fmt" "github.com/stackql/go-openapistackql/openapistackql" "github.com/getkin/kin-openapi/openapi3" ) type ParameterBinding struct { Param *openapistackql.Parameter Val interface{} } func NewParameterBinding(param *openapistackql.Parameter, val interface{}) ParameterBinding { return ParameterBinding{ Param: param, Val: val, } } type HttpParameters struct { CookieParams map[string]ParameterBinding HeaderParams map[string]ParameterBinding PathParams map[string]ParameterBinding QueryParams map[string]ParameterBinding RequestBody map[string]interface{} ResponseBody map[string]interface{} ServerParams map[string]ParameterBinding Unassigned map[string]ParameterBinding } func NewHttpParameters() *HttpParameters { return &HttpParameters{ CookieParams: make(map[string]ParameterBinding), HeaderParams: make(map[string]ParameterBinding), PathParams: make(map[string]ParameterBinding), QueryParams: make(map[string]ParameterBinding), RequestBody: make(map[string]interface{}), ResponseBody: make(map[string]interface{}), ServerParams: make(map[string]ParameterBinding), Unassigned: make(map[string]ParameterBinding), } } func (hp *HttpParameters) StoreParameter(param *openapistackql.Parameter, val interface{}) { if param.In == openapi3.ParameterInPath { hp.PathParams[param.Name] = NewParameterBinding(param, val) return } if param.In == openapi3.ParameterInQuery { hp.QueryParams[param.Name] = NewParameterBinding(param, val) return } if param.In == openapi3.ParameterInHeader { hp.HeaderParams[param.Name] = NewParameterBinding(param, val) return } if param.In == openapi3.ParameterInCookie { hp.CookieParams[param.Name] = NewParameterBinding(param, val) return } if param.In == "server" { hp.ServerParams[param.Name] = NewParameterBinding(param, val) return } } func (hp *HttpParameters) updateStuff(k string, v ParameterBinding, paramMap map[string]interface{}, visited map[string]struct{}) error { if _, ok := visited[k]; ok { return fmt.Errorf("parameter name = '%s' repeated, cannot convert to flat map", k) } paramMap[k] = v.Val visited[k] = struct{}{} return nil } func (hp *HttpParameters) ToFlatMap() (map[string]interface{}, error) { rv := make(map[string]interface{}) visited := make(map[string]struct{})
} } for k, v := range hp.HeaderParams { err := hp.updateStuff(k, v, rv, visited) if err != nil { return nil, err } } for k, v := range hp.PathParams { err := hp.updateStuff(k, v, rv, visited) if err != nil { return nil, err } } for k, v := range hp.QueryParams { err := hp.updateStuff(k, v, rv, visited) if err != nil { return nil, err } } for k, v := range hp.ServerParams { err := hp.updateStuff(k, v, rv, visited) if err != nil { return nil, err } } return rv, nil }
for k, v := range hp.CookieParams { err := hp.updateStuff(k, v, rv, visited) if err != nil { return nil, err
085b1aabe4f13fd63dacce00e4471dfa3cc64748.js
mycallback( {"CONTRIBUTOR OCCUPATION": "Retired", "CONTRIBUTION AMOUNT (F3L Bundled)": "250.00", "ELECTION CODE": "P2012", "MEMO CODE": "", "CONTRIBUTOR EMPLOYER": "n/a", "DONOR CANDIDATE STATE": "", "CONTRIBUTOR STREET 1": "Post Office Box 506", "CONTRIBUTOR MIDDLE NAME": "", "DONOR CANDIDATE FEC ID": "", "DONOR CANDIDATE MIDDLE NAME": "", "CONTRIBUTOR STATE": "CA", "DONOR CANDIDATE FIRST NAME": "", "CONTRIBUTOR FIRST NAME": "David", "BACK REFERENCE SCHED NAME": "", "DONOR CANDIDATE DISTRICT": "", "CONTRIBUTION DATE": "20110320", "DONOR COMMITTEE NAME": "", "MEMO TEXT/DESCRIPTION": "", "Reference to SI or SL system code that identifies the Account": "", "FILER COMMITTEE ID NUMBER": "C00461061", "DONOR CANDIDATE LAST NAME": "", "CONTRIBUTOR LAST NAME": "Wheeler", "_record_type": "fec.version.v7_0.SA", "CONDUIT STREET2": "", "CONDUIT STREET1": "", "DONOR COMMITTEE FEC ID": "", "CONTRIBUTION PURPOSE DESCRIP": "", "CONTRIBUTOR ZIP": "95642", "CONTRIBUTOR STREET 2": "", "CONDUIT CITY": "", "ENTITY TYPE": "IND", "CONTRIBUTOR CITY": "Jackson", "CONTRIBUTOR SUFFIX": "", "TRANSACTION ID": "INCA8819", "DONOR CANDIDATE SUFFIX": "", "DONOR CANDIDATE OFFICE": "", "CONTRIBUTION PURPOSE CODE": "15", "ELECTION OTHER DESCRIPTION": "", "_src_file": "2011/20110504/727410.fec_2.yml", "CONDUIT STATE": "", "CONTRIBUTOR ORGANIZATION NAME": "", "BACK REFERENCE TRAN ID NUMBER": "", "DONOR CANDIDATE PREFIX": "", "CONTRIBUTOR PREFIX": "", "CONDUIT ZIP": "", "CONDUIT NAME": "", "CONTRIBUTION AGGREGATE F3L Semi-annual Bundled": "250.00", "FORM TYPE": "SA11AI"});
mycallback( {"CONTRIBUTOR OCCUPATION": "Retired", "CONTRIBUTION AMOUNT (F3L Bundled)": "250.00", "ELECTION CODE": "P2012", "MEMO CODE": "", "CONTRIBUTOR EMPLOYER": "n/a", "DONOR CANDIDATE STATE": "", "CONTRIBUTOR STREET 1": "Post Office Box 506", "CONTRIBUTOR MIDDLE NAME": "", "DONOR CANDIDATE FEC ID": "", "DONOR CANDIDATE MIDDLE NAME": "", "CONTRIBUTOR STATE": "CA", "DONOR CANDIDATE FIRST NAME": "", "CONTRIBUTOR FIRST NAME": "David", "BACK REFERENCE SCHED NAME": "", "DONOR CANDIDATE DISTRICT": "", "CONTRIBUTION DATE": "20110320", "DONOR COMMITTEE NAME": "", "MEMO TEXT/DESCRIPTION": "", "Reference to SI or SL system code that identifies the Account": "", "FILER COMMITTEE ID NUMBER": "C00461061", "DONOR CANDIDATE LAST NAME": "", "CONTRIBUTOR LAST NAME": "Wheeler", "_record_type": "fec.version.v7_0.SA", "CONDUIT STREET2": "", "CONDUIT STREET1": "", "DONOR COMMITTEE FEC ID": "", "CONTRIBUTION PURPOSE DESCRIP": "", "CONTRIBUTOR ZIP": "95642", "CONTRIBUTOR STREET 2": "", "CONDUIT CITY": "", "ENTITY TYPE": "IND", "CONTRIBUTOR CITY": "Jackson", "CONTRIBUTOR SUFFIX": "", "TRANSACTION ID": "INCA8819", "DONOR CANDIDATE SUFFIX": "", "DONOR CANDIDATE OFFICE": "", "CONTRIBUTION PURPOSE CODE": "15", "ELECTION OTHER DESCRIPTION": "", "_src_file": "2011/20110504/727410.fec_2.yml", "CONDUIT STATE": "", "CONTRIBUTOR ORGANIZATION NAME": "", "BACK REFERENCE TRAN ID NUMBER": "", "DONOR CANDIDATE PREFIX": "", "CONTRIBUTOR PREFIX": "", "CONDUIT ZIP": "", "CONDUIT NAME": "", "CONTRIBUTION AGGREGATE F3L Semi-annual Bundled": "250.00", "FORM TYPE": "SA11AI"});
hregistrarcontasautorizacaodoc.js
/**@preserve GeneXus Java 10_3_12-110051 on February 8, 2021 22:6:15.8 *
gx.evt.autoSkip=!1;gx.define("hregistrarcontasautorizacaodoc",!1,function(){this.ServerClass="hregistrarcontasautorizacaodoc";this.PackageName="";this.setObjectType("web");this.setOnAjaxSessionTimeout("Warn");this.hasEnterEvent=!0;this.skipOnEnter=!1;this.addKeyListener("12","'FECHAR'");this.addKeyListener("5","REFRESH");this.addKeyListener("12","CANCEL");this.addKeyListener("1","HELP");this.SetStandaloneVars=function(){this.AV18PedidoCompraEmpresaId=gx.fn.getControlValue("vPEDIDOCOMPRAEMPRESAID");this.AV19PedidoCompraAno=gx.fn.getIntegerValue("vPEDIDOCOMPRAANO",".");this.AV20PedidoCompraNumero=gx.fn.getIntegerValue("vPEDIDOCOMPRANUMERO",".");this.AV24ContaPagRecDtEmissaoStr=gx.fn.getControlValue("vCONTAPAGRECDTEMISSAOSTR")};this.e1214o2_client=function(){this.executeServerEvent("ENTER",!0,null,!1,!1)};this.e1114o2_client=function(){this.executeServerEvent("'FECHAR'",!1,null,!1,!1)};this.e1514o2_client=function(){this.executeServerEvent("CANCEL",!0,null,!1,!1)};this.GXValidFnc=[];var n=this.GXValidFnc;this.GXCtrlIds=[3,6,9,11];this.GXLastCtrlId=11;n[3]={fld:"TABLE1",grid:0};n[6]={fld:"TABLE2",grid:0};n[9]={fld:"TEXTBLOCK1",format:0,grid:0};n[11]={lvl:0,type:"svchar",len:20,dec:0,sign:!1,ro:0,grid:0,gxgrid:null,fnc:null,isvalid:null,rgrid:[],fld:"vLOGENTREGADOCUMENTO",gxz:"ZV13LOGEntregaDocumento",gxold:"OV13LOGEntregaDocumento",gxvar:"AV13LOGEntregaDocumento",ucs:[],op:[],ip:[],nacdep:[],ctrltype:"edit",v2v:function(n){gx.O.AV13LOGEntregaDocumento=n},v2z:function(n){gx.O.ZV13LOGEntregaDocumento=n},v2c:function(){gx.fn.setControlValue("vLOGENTREGADOCUMENTO",gx.O.AV13LOGEntregaDocumento,0)},c2v:function(){gx.O.AV13LOGEntregaDocumento=this.val()},val:function(){return gx.fn.getControlValue("vLOGENTREGADOCUMENTO")},nac:gx.falseFn};this.AV13LOGEntregaDocumento="";this.ZV13LOGEntregaDocumento="";this.OV13LOGEntregaDocumento="";this.AV13LOGEntregaDocumento="";this.AV18PedidoCompraEmpresaId="";this.AV19PedidoCompraAno=0;this.AV20PedidoCompraNumero=0;this.AV24ContaPagRecDtEmissaoStr="";this.A8475PedidoCompraItemProdutoGrpEmp="";this.A3337PedidoCompraNumero=0;this.A3336PedidoCompraAno=0;this.A3426PedidoCompraEmpresaId="";this.A8476PedidoCompraItemProdutoGrpId=0;this.A6568PedidoCompraItemProdutoCategor="";this.A3339PedidoCompraItemProdutoEmpId="";this.A3340PedidoCompraItemProdutoId=0;this.A4203PedidoCompraPagRecNumero=0;this.A4202PedidoCompraPagRecAno=0;this.A4201PedidoCompraPagRecEmpId="";this.A1695ContaPagRecDocumento="";this.A1685ContaPagRecEmpresaId="";this.A8430LOGEntregaDocumento="";this.A4290LOGEntregaData=gx.date.nullDate();this.Events={e1214o2_client:["ENTER",!0],e1114o2_client:["'FECHAR'",!0],e1514o2_client:["CANCEL",!0]};this.EvtParms.REFRESH=[[],[]];this.EvtParms.ENTER=[[{av:"AV24ContaPagRecDtEmissaoStr",fld:"vCONTAPAGRECDTEMISSAOSTR"},{av:"AV13LOGEntregaDocumento",fld:"vLOGENTREGADOCUMENTO"},{av:"AV27SnPrestServ",fld:"vSNPRESTSERV"},{av:"A3426PedidoCompraEmpresaId",fld:"PEDIDOCOMPRAEMPRESAID"},{av:"AV18PedidoCompraEmpresaId",fld:"vPEDIDOCOMPRAEMPRESAID"},{av:"A3336PedidoCompraAno",fld:"PEDIDOCOMPRAANO"},{av:"AV19PedidoCompraAno",fld:"vPEDIDOCOMPRAANO"},{av:"A3337PedidoCompraNumero",fld:"PEDIDOCOMPRANUMERO"},{av:"AV20PedidoCompraNumero",fld:"vPEDIDOCOMPRANUMERO"},{av:"A8430LOGEntregaDocumento",fld:"LOGENTREGADOCUMENTO"},{av:"A4290LOGEntregaData",fld:"LOGENTREGADATA"},{av:"A1685ContaPagRecEmpresaId",fld:"CONTAPAGRECEMPRESAID"},{av:"AV21ContaPagRecEmpresaId",fld:"vCONTAPAGRECEMPRESAID"},{av:"A1695ContaPagRecDocumento",fld:"CONTAPAGRECDOCUMENTO"},{av:"A4201PedidoCompraPagRecEmpId",fld:"PEDIDOCOMPRAPAGRECEMPID"},{av:"A4202PedidoCompraPagRecAno",fld:"PEDIDOCOMPRAPAGRECANO"},{av:"A4203PedidoCompraPagRecNumero",fld:"PEDIDOCOMPRAPAGRECNUMERO"},{av:"A8475PedidoCompraItemProdutoGrpEmp",fld:"PEDIDOCOMPRAITEMPRODUTOGRPEMP"},{av:"A6568PedidoCompraItemProdutoCategor",fld:"PEDIDOCOMPRAITEMPRODUTOCATEGOR"},{av:"A8476PedidoCompraItemProdutoGrpId",fld:"PEDIDOCOMPRAITEMPRODUTOGRPID"}],[{av:"AV22ContaPagRecDtEmissao",fld:"vCONTAPAGRECDTEMISSAO"},{av:"AV14SnErro",fld:"vSNERRO"},{av:"AV32GXLvl13",fld:"vGXLVL13"},{av:"AV16Txt",fld:"vTXT"},{av:"AV23Data",fld:"vDATA"},{av:"AV26GrupoProdutoEmpresaId",fld:"vGRUPOPRODUTOEMPRESAID"},{av:"AV28GrupoProdServTxt",fld:"vGRUPOPRODSERVTXT"},{av:"AV25GrupoProdServ",fld:"vGRUPOPRODSERV"},{av:"AV27SnPrestServ",fld:"vSNPRESTSERV"}]];this.EvtParms["'FECHAR'"]=[[{av:"AV13LOGEntregaDocumento",fld:"vLOGENTREGADOCUMENTO"},{av:"AV24ContaPagRecDtEmissaoStr",fld:"vCONTAPAGRECDTEMISSAOSTR"},{av:"AV20PedidoCompraNumero",fld:"vPEDIDOCOMPRANUMERO"},{av:"AV19PedidoCompraAno",fld:"vPEDIDOCOMPRAANO"},{av:"AV18PedidoCompraEmpresaId",fld:"vPEDIDOCOMPRAEMPRESAID"}],[{av:"AV13LOGEntregaDocumento",fld:"vLOGENTREGADOCUMENTO"}]];this.EnterCtrl=["BTNCONFIRMAR"];this.setVCMap("AV18PedidoCompraEmpresaId","vPEDIDOCOMPRAEMPRESAID",0,"char");this.setVCMap("AV19PedidoCompraAno","vPEDIDOCOMPRAANO",0,"int");this.setVCMap("AV20PedidoCompraNumero","vPEDIDOCOMPRANUMERO",0,"int");this.setVCMap("AV24ContaPagRecDtEmissaoStr","vCONTAPAGRECDTEMISSAOSTR",0,"char");this.InitStandaloneVars()});gx.setParentObj(new hregistrarcontasautorizacaodoc)
youtube-playlist-synchronizer.py
from PIL import Image, ImageTk from tkinter import Tk, Text, BOTH, W, N, E, S,filedialog,messagebox from tkinter.ttk import Frame, Button, Label, Style, Progressbar from youtube_synchronizer.utils import createFolderForPlaylist from youtube_synchronizer.dataconnectors.youtube_login import loginToGoogle class YoutubeFrame(Frame): def __init__(self): super().__init__() self.initUI() def initUI(self): self.master.title("Youtube Synchronizer") self.pack(fill=BOTH, expand=True) # self.columnconfigure(1, weight=1) self.rowconfigure(3, weight=1) self.rowconfigure(5, pad=1) lbl = Label(self, text="Welcome to Youtube playlist Synchronizer") lbl.grid(sticky=W, pady=4, padx=5) bar = Progressbar(self, length=200, style='black.Horizontal.TProgressbar') # img = Image.open("icon.png") # img = img.resize((300, 300), Image.ANTIALIAS) # ytpl = ImageTk.PhotoImage(img) # area = Label(self, image=ytpl) # area.image = ytpl self.logArea = Text(self,state="disabled") self.logArea.grid(row=1, column=0, columnspan=3, rowspan=4, padx=5, sticky=E+W+S+N) self.appendLog("Steps to follow \n") self.appendLog("1) Select root directory \n ") self.appendLog("2) Give permission for google to get playlist automatically \n") self.appendLog("3) start syncing into your selected folder\n") cbtn = Button(self, text="Choose Directory", command=lambda: self.chooseRootDirectory(cbtn)) cbtn.grid(row=5, column=0, pady=2) hbtn = Button(self, text="Google Permission", command=lambda: self.clicked(hbtn)) hbtn.grid(row=5, column=1, padx=2) obtn = Button(self, text="Start Sync", command=self.startSyncing) obtn.grid(row=5, column=3) def clicked(self,event): googlePermissionUrl = loginToGoogle() event.grid_forget() label = Label(self, text="Google Permissions Granted") label.grid(row=5, column=1, pady=2) self.appendLog("Thanks for granting Google Permission") def chooseRootDirectory(self,event): self.rootDirectory = filedialog.askdirectory() event.grid_forget() label = Label(self, text=self.rootDirectory) label.grid(row=5, column=0, pady=2) self.appendLog("You have selected "+ self.rootDirectory +" as your root directory")
self.logArea.configure(state='normal') self.logArea.insert('end', text+'\n') self.logArea.configure(state='disabled') def startSyncing(self): self.response = messagebox.askquestion("Confirmation", "you have selected: " + self.rootDirectory + " as root Directory and youtube playlist will be added as sub folders inside " + self.rootDirectory + "/, are you sure?") if self.response == 'yes': createFolderForPlaylist(self.rootDirectory) else: self.appendLog("Playlist synchronized successfully") def main(): root = Tk() app = YoutubeFrame() root.mainloop() if __name__ == '__main__': main()
def appendLog(self,text):
test197.js
var callbackArguments = []; var argument1 = function callback(a,b,c) { callbackArguments.push(JSON.stringify(arguments)) base_0[2][5] = {"157":1.1713102259137485e+308,"403":122,"783":607,"969":823,"1.366743586472057e+308":"[","":"76","1.187259722010475e+308":"","Jo)MBE6":"6","-100":""} return a*b+c }; var argument2 = function callback(a,b,c) { callbackArguments.push(JSON.stringify(arguments)) base_1[1]['{X'] = false return a/b-c }; var argument3 = r_1; var argument4 = function callback(a,b,c) { callbackArguments.push(JSON.stringify(arguments)) base_2[4][8] = null argument5[705] = "" base_2[4] = null return a-b-c }; var argument5 = function callback(a,b,c) { callbackArguments.push(JSON.stringify(arguments)) argument6[618] = [823,122] argument7 = 157 base_3[7] = "v" return a/b+c }; var argument6 = {"25":122,"":1.298530573673951e+308,"E2p|E1_":"x","*N":"g","1.1487048473586838e+308":1.7976931348623157e+308,"Z[":1.7294317012239148e+308}; var argument7 = null; var base_0 = [1.7976931348623157e+308,618,100,655,"pt",969,"B","(!h","l","dL5"] var r_0= undefined try { r_0 = base_0.some(argument1) } catch(e) { r_0= "Error" } var base_1 = [1.7976931348623157e+308,618,100,655,"pt",969,"B","(!h","l","dL5"] var r_1= undefined try { r_1 = base_1.some(argument2,argument3) } catch(e) { r_1= "Error" } var base_2 = [1.7976931348623157e+308,618,100,655,"pt",969,"B","(!h","l","dL5"] var r_2= undefined try { r_2 = base_2.some(argument4) } catch(e) { r_2= "Error" } var base_3 = [1.7976931348623157e+308,618,100,655,"pt",969,"B","(!h","l","dL5"] var r_3= undefined try { r_3 = base_3.some(argument5,argument6,argument7) } catch(e) { r_3= "Error" } function
(array){ return array.map(function(a){ if (a === null || a == undefined) return a; var name = a.constructor.name; if (name==='Object' || name=='Boolean'|| name=='Array'||name=='Number'||name=='String') return JSON.stringify(a); return name; }); } setTimeout(function(){ require("fs").writeFileSync("./experiments/some/someGen/test197.json",JSON.stringify({"baseObjects":serialize([base_0,base_1,base_2,base_3]),"returnObjects":serialize([r_0,r_1,r_2,r_3]),"callbackArgs":callbackArguments})) },300)
serialize
kindck-send-object.rs
// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms. // Test which of the builtin types are considered sendable. The tests // in this file all test the "kind" violates detected during kindck. // See all `regions-bounded-by-send.rs` fn assert_send<T:Send>()
trait Dummy { } trait Message : Send { } // careful with object types, who knows what they close over... fn object_ref_with_static_bound_not_ok() { assert_send::<&'static (Dummy+'static)>(); //~^ ERROR the trait `core::marker::Send` is not implemented } fn box_object_with_no_bound_not_ok<'a>() { assert_send::<Box<Dummy>>(); //~ ERROR the trait `core::marker::Send` is not implemented } fn object_with_send_bound_ok() { assert_send::<&'static (Dummy+Send)>(); assert_send::<Box<Dummy+Send>>(); } fn main() { }
{ }
object_response.py
# coding: utf-8 """ Pure Storage FlashBlade REST 1.6 Python SDK Pure Storage FlashBlade REST 1.6 Python SDK, developed by [Pure Storage, Inc](http://www.purestorage.com/). Documentations can be found at [purity-fb.readthedocs.io](http://purity-fb.readthedocs.io/). OpenAPI spec version: 1.6 Contact: [email protected] Generated by: https://github.com/swagger-api/swagger-codegen.git """ from pprint import pformat from six import iteritems import re class ObjectResponse(object): """ NOTE: This class is auto generated by the swagger code generator program. Do not edit the class manually. """ """ Attributes: swagger_types (dict): The key is attribute name and the value is attribute type. attribute_map (dict): The key is attribute name and the value is json key in definition. """ swagger_types = { 'pagination_info': 'PaginationInfo' } attribute_map = { 'pagination_info': 'pagination_info' } def __init__(self, pagination_info=None): """ ObjectResponse - a model defined in Swagger """ self._pagination_info = None if pagination_info is not None: self.pagination_info = pagination_info @property def pagination_info(self): """ Gets the pagination_info of this ObjectResponse. pagination information, only available in GET requests :return: The pagination_info of this ObjectResponse. :rtype: PaginationInfo """ return self._pagination_info @pagination_info.setter def pagination_info(self, pagination_info): """ Sets the pagination_info of this ObjectResponse. pagination information, only available in GET requests :param pagination_info: The pagination_info of this ObjectResponse. :type: PaginationInfo """ self._pagination_info = pagination_info def to_dict(self): """ Returns the model properties as a dict """ result = {} for attr, _ in iteritems(self.swagger_types): value = getattr(self, attr) if isinstance(value, list): result[attr] = list(map( lambda x: x.to_dict() if hasattr(x, "to_dict") else x, value )) elif hasattr(value, "to_dict"): result[attr] = value.to_dict() elif isinstance(value, dict): result[attr] = dict(map( lambda item: (item[0], item[1].to_dict()) if hasattr(item[1], "to_dict") else item, value.items() )) else: result[attr] = value return result def to_str(self): """ Returns the string representation of the model """ return pformat(self.to_dict()) def __repr__(self): """ For `print` and `pprint` """ return self.to_str() def __eq__(self, other): """ Returns true if both objects are equal """ if not isinstance(other, ObjectResponse): return False return self.__dict__ == other.__dict__ def __ne__(self, other):
""" Returns true if both objects are not equal """ return not self == other
95.js
/* Element initializers with unknown index. */ function
(i) { var x = [1, 2, i == 1 ? 3 : 4, 5, 6]; +i; var y = "" + x; if (i == 1) { y; "1,2,3,5,6"; } else { y; "1,2,4,5,6"; } var __es_v0 = 3; } for (var i = 0; i < 10; i++) { foo(i); foo(i % 1); }
foo
blocker.py
from pygame import * class Blocker(sprite.Sprite): def __init__(self, size, color, row, column): sprite.Sprite.__init__(self) self.height = size self.width = size self.color = color self.image = Surface((self.width, self.height)) self.image.fill(self.color) self.rect = self.image.get_rect() self.row = row self.column = column def
(self, game, keys, *args): game.screen .blit(self.image, self.rect)
update
0023_submission_is_featured.py
# Generated by Django 1.11.7 on 2018-01-13 14:26 from django.db import migrations, models class Migration(migrations.Migration):
dependencies = [ ("submission", "0022_submission_review_code"), ] operations = [ migrations.AddField( model_name="submission", name="is_featured", field=models.BooleanField(default=False), ), ]
tensor.rs
use std::fs; use std::io::Read; use std::str::FromStr; use crate::model::Model; use crate::{CliResult, Parameters}; use tract_hir::internal::*; fn parse_dt(dt: &str) -> CliResult<DatumType> { Ok(match dt.to_lowercase().as_ref() { "f16" => DatumType::F16, "f32" => DatumType::F32, "f64" => DatumType::F64, "i8" => DatumType::I8, "i16" => DatumType::I16, "i32" => DatumType::I32, "i64" => DatumType::I64, "u8" => DatumType::U8, "u16" => DatumType::U16, "u32" => DatumType::U32, "u64" => DatumType::U64, _ => bail!( "Type of the input should be f16, f32, f64, i8, i16, i16, i32, u8, u16, u32, u64." ), }) } pub fn parse_spec(size: &str) -> CliResult<InferenceFact> { if size.len() == 0 { return Ok(InferenceFact::default()); } if size.contains("x") && !size.contains(",") { parse_x_spec(size) } else { parse_coma_spec(size) } } pub fn parse_coma_spec(size: &str) -> CliResult<InferenceFact> { let splits = size.split(",").collect::<Vec<_>>(); if splits.len() < 1 { bail!("The <size> argument should be formatted as {size},{...},{type}."); } let last = splits.last().unwrap(); let (datum_type, shape) = if let Ok(dt) = parse_dt(last) { (Some(dt), &splits[0..splits.len() - 1]) } else { (None, &*splits) }; let shape = ShapeFactoid::closed( shape .iter() .map(|&s| { Ok(if s == "_" { GenericFactoid::Any } else { GenericFactoid::Only(parse_dim(s)?) }) }) .collect::<CliResult<TVec<DimFact>>>()?, ); if let Some(dt) = datum_type { Ok(InferenceFact::dt_shape(dt, shape)) } else { Ok(InferenceFact::shape(shape)) } } pub fn parse_dim(i: &str) -> CliResult<TDim> { // ensure the magic S is pre-registered #[cfg(feature = "pulse")] let _ = tract_pulse::internal::stream_symbol(); if i.len() == 0 { bail!("Can not parse empty string as Dim") } let number_len = i.chars().take_while(|c| c.is_digit(10)).count(); let symbol_len = i.len() - number_len; if symbol_len > 1 { bail!("Can not parse {} as Dim", i) } let number: i64 = if number_len > 0 { i[..number_len].parse()? } else { 1 }; if symbol_len == 0 { return Ok(number.to_dim()); } let symbol = i.chars().last().unwrap(); let symbol = Symbol::from(symbol); Ok(symbol.to_dim() * number) } pub fn parse_x_spec(size: &str) -> CliResult<InferenceFact> { warn!( "Deprecated \"x\" syntax for shape : please use the comma as separator, x is now a symbol." ); let splits = size.split("x").collect::<Vec<_>>(); if splits.len() < 1 { bail!("The <size> argument should be formatted as {size},{...},{type}."); } let last = splits.last().unwrap(); let (datum_type, shape) = if last.ends_with("S") || last.parse::<i32>().is_ok() { (None, &*splits) } else { let datum_type = parse_dt(splits.last().unwrap())?; (Some(datum_type), &splits[0..splits.len() - 1]) }; let shape = ShapeFactoid::closed( shape .iter() .map(|&s| { Ok(if s == "_" { GenericFactoid::Any } else { GenericFactoid::Only(parse_dim_stream(s)?) }) }) .collect::<CliResult<TVec<DimFact>>>()?, ); if let Some(dt) = datum_type { Ok(InferenceFact::dt_shape(dt, shape)) } else { Ok(InferenceFact::shape(shape)) } } fn parse_values<'a, T: Datum + FromStr>(shape: &[usize], it: Vec<&'a str>) -> CliResult<Tensor> { let values = it .into_iter() .map(|v| Ok(v.parse::<T>().map_err(|_| format_err!("Failed to parse {}", v))?)) .collect::<CliResult<Vec<T>>>()?; Ok(tract_ndarray::Array::from_shape_vec(shape, values)?.into()) } fn tensor_for_text_data(filename: &str) -> CliResult<Tensor> { let mut file = fs::File::open(filename) .map_err(|e| format_err!("Reading tensor from {}, {:?}", filename, e))?; let mut data = String::new(); file.read_to_string(&mut data)?; let mut lines = data.lines(); let proto = parse_spec(lines.next().context("Empty data file")?)?; let shape = proto.shape.concretize().unwrap(); let values = lines.flat_map(|l| l.split_whitespace()).collect::<Vec<&str>>(); // We know there is at most one streaming dimension, so we can deduce the // missing value with a simple division. let product: usize = shape.iter().map(|o| o.to_usize().unwrap_or(1)).product(); let missing = values.len() / product; let shape: Vec<_> = shape.iter().map(|d| d.to_usize().unwrap_or(missing)).collect(); dispatch_datum!(parse_values(proto.datum_type.concretize().unwrap())(&*shape, values)) } /// Parses the `data` command-line argument. pub fn for_data(filename: &str) -> CliResult<(Option<String>, InferenceFact)> { #[allow(unused_imports)] use std::convert::TryFrom; if filename.ends_with(".pb") { #[cfg(feature = "onnx")] { let file = fs::File::open(filename).with_context(|| format!("Can't open {:?}", filename))?; let proto = ::tract_onnx::tensor::proto_from_reader(file)?; Ok((Some(proto.name.to_string()), Tensor::try_from(proto)?.into())) } #[cfg(not(feature = "onnx"))] { panic!("Loading tensor from protobuf requires onnx features"); } } else if filename.contains(".npz:") { let mut tokens = filename.split(":"); let (filename, inner) = (tokens.next().unwrap(), tokens.next().unwrap()); let mut npz = ndarray_npy::NpzReader::new(std::fs::File::open(filename)?)?; Ok((None, for_npz(&mut npz, inner)?.into())) } else { Ok((None, tensor_for_text_data(filename)?.into())) } } pub fn for_npz(npz: &mut ndarray_npy::NpzReader<fs::File>, name: &str) -> CliResult<Tensor> { fn rewrap<T: Datum>(array: tract_ndarray::ArrayD<T>) -> Tensor { let shape = array.shape().to_vec(); unsafe { let vec = array.into_raw_vec(); tract_core::ndarray::ArrayD::from_shape_vec_unchecked(shape, vec).into_tensor() } } if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<f32>, tract_ndarray::IxDyn>(name) { return Ok(rewrap(t)); } if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<f64>, tract_ndarray::IxDyn>(name) { return Ok(rewrap(t)); } if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i8>, tract_ndarray::IxDyn>(name) { return Ok(rewrap(t)); } if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i16>, tract_ndarray::IxDyn>(name) { return Ok(rewrap(t)); } if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i32>, tract_ndarray::IxDyn>(name) { return Ok(rewrap(t)); } if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<i64>, tract_ndarray::IxDyn>(name) { return Ok(rewrap(t)); } if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u8>, tract_ndarray::IxDyn>(name) { return Ok(rewrap(t)); } if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u16>, tract_ndarray::IxDyn>(name) { return Ok(rewrap(t)); } if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u32>, tract_ndarray::IxDyn>(name) { return Ok(rewrap(t)); } if let Ok(t) = npz.by_name::<tract_ndarray::OwnedRepr<u64>, tract_ndarray::IxDyn>(name) { return Ok(rewrap(t)); } bail!("Can not extract tensor from {}", name); } pub fn for_string(value: &str) -> CliResult<(Option<String>, InferenceFact)> { if value.starts_with("@")
else { let (name, value) = if value.contains(":") { let mut splits = value.split(":"); (Some(splits.next().unwrap().to_string()), splits.next().unwrap()) } else { (None, value) }; if value.contains("=") { let mut split = value.split("="); let spec = parse_spec(split.next().unwrap())?; let value = split.next().unwrap().split(","); let dt = spec .datum_type .concretize() .context("Must specify type when giving tensor value")?; let shape = spec .shape .as_concrete_finite()? .context("Must specify concrete shape when giving tensor value")?; let tensor = dispatch_datum!(parse_values(dt)(&*shape, value.collect()))?; Ok((name, tensor.into())) } else { Ok((name, parse_spec(value)?)) } } } #[cfg(feature = "pulse")] fn parse_dim_stream(s: &str) -> CliResult<TDim> { use tract_pulse::internal::stream_dim; if s == "S" { Ok(stream_dim()) } else if s.ends_with("S") { let number: String = s.chars().take_while(|c| c.is_digit(10)).collect(); let number: i64 = number.parse::<i64>().map(|i| i.into())?; Ok(stream_dim() * number) } else { Ok(s.parse::<i64>().map(|i| i.into())?) } } #[cfg(not(feature = "pulse"))] fn parse_dim_stream(s: &str) -> CliResult<TDim> { Ok(s.parse::<i64>().map(|i| i.into())?) } pub fn retrieve_or_make_inputs(tract: &dyn Model, params: &Parameters) -> CliResult<TVec<Tensor>> { let mut inputs: TVec<Tensor> = tvec!(); for input in tract.input_outlets() { let name = tract.node_name(input.node); if let Some(input) = params.input_values.get(name) { info!("Using fixed input for input called {:?}: {:?}", name, input); inputs.push(input.clone().into_tensor()) } else { let fact = tract.outlet_typedfact(*input)?; info!("Using random input for input called {:?}: {:?}", name, fact); inputs.push(crate::tensor::tensor_for_fact(&fact, None)?); } } Ok(inputs) } pub fn make_inputs(values: &[impl std::borrow::Borrow<TypedFact>]) -> CliResult<TVec<Tensor>> { values.iter().map(|v| tensor_for_fact(v.borrow(), None)).collect() } pub fn make_inputs_for_model(model: &dyn Model) -> CliResult<TVec<Tensor>> { Ok(make_inputs( &*model .input_outlets() .iter() .map(|&t| model.outlet_typedfact(t)) .collect::<TractResult<Vec<TypedFact>>>()?, )?) } #[allow(unused_variables)] pub fn tensor_for_fact(fact: &TypedFact, streaming_dim: Option<usize>) -> CliResult<Tensor> { if let Some(value) = &fact.konst { return Ok(value.clone().into_tensor()); } #[cfg(pulse)] { if fact.shape.stream_info().is_some() { use tract_pulse::fact::StreamFact; use tract_pulse::internal::stream_symbol; let s = stream_symbol(); if let Some(dim) = streaming_dim { let shape = fact .shape .iter() .map(|d| { d.eval(&SymbolValues::default().with(s, dim as i64)).to_usize().unwrap() }) .collect::<TVec<_>>(); return Ok(random(&shape, fact.datum_type)); } else { bail!("random tensor requires a streaming dim") } } } Ok(random( &fact .shape .as_concrete() .with_context(|| format!("Expected concrete shape, found: {:?}", fact))?, fact.datum_type, )) } /// Generates a random tensor of a given size and type. pub fn random(sizes: &[usize], datum_type: DatumType) -> Tensor { use std::iter::repeat_with; fn make<D>(shape: &[usize]) -> Tensor where D: Datum, rand::distributions::Standard: rand::distributions::Distribution<D>, { use rand::{Rng, SeedableRng}; let mut rng = rand::rngs::StdRng::seed_from_u64(21242); let len = shape.iter().product(); tract_core::ndarray::ArrayD::from_shape_vec( shape, repeat_with(|| rng.gen::<D>()).take(len).collect(), ) .unwrap() .into() } use DatumType::*; match datum_type { Bool => make::<bool>(sizes), I8 => make::<i8>(sizes), I16 => make::<i16>(sizes), I32 => make::<i32>(sizes), I64 => make::<i64>(sizes), U8 => make::<u8>(sizes), U16 => make::<u16>(sizes), F16 => make::<f32>(sizes).cast_to::<f16>().unwrap().into_owned(), F32 => make::<f32>(sizes), F64 => make::<f64>(sizes), _ => panic!("Can generate random tensor for {:?}", datum_type), } }
{ for_data(&value[1..]) }
mode0.rs
#[doc = "Register `MODE0` reader"] pub struct R(crate::R<MODE0_SPEC>); impl core::ops::Deref for R { type Target = crate::R<MODE0_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::convert::From<crate::R<MODE0_SPEC>> for R { fn from(reader: crate::R<MODE0_SPEC>) -> Self { R(reader) } } #[doc = "Register `MODE0` writer"] pub struct W(crate::W<MODE0_SPEC>); impl core::ops::Deref for W { type Target = crate::W<MODE0_SPEC>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } impl core::ops::DerefMut for W { #[inline(always)] fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl core::convert::From<crate::W<MODE0_SPEC>> for W { fn from(writer: crate::W<MODE0_SPEC>) -> Self { W(writer) } } #[doc = "Field `READ_MODE` reader - Read Mode"] pub struct READ_MODE_R(crate::FieldReader<bool, bool>); impl READ_MODE_R { pub(crate) fn new(bits: bool) -> Self { READ_MODE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for READ_MODE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `READ_MODE` writer - Read Mode"] pub struct READ_MODE_W<'a> { w: &'a mut W, } impl<'a> READ_MODE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !0x01) | (value as u32 & 0x01); self.w } } #[doc = "Field `WRITE_MODE` reader - Write Mode"] pub struct WRITE_MODE_R(crate::FieldReader<bool, bool>); impl WRITE_MODE_R { pub(crate) fn new(bits: bool) -> Self { WRITE_MODE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for WRITE_MODE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `WRITE_MODE` writer - Write Mode"] pub struct WRITE_MODE_W<'a> { w: &'a mut W, } impl<'a> WRITE_MODE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 1)) | ((value as u32 & 0x01) << 1); self.w } } #[doc = "NWAIT Mode\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum EXNW_MODE_A { #[doc = "0: Disabled"] DISABLED = 0, #[doc = "2: Frozen Mode"] FROZEN = 2, #[doc = "3: Ready Mode"] READY = 3, } impl From<EXNW_MODE_A> for u8 { #[inline(always)] fn from(variant: EXNW_MODE_A) -> Self { variant as _ } } #[doc = "Field `EXNW_MODE` reader - NWAIT Mode"] pub struct EXNW_MODE_R(crate::FieldReader<u8, EXNW_MODE_A>); impl EXNW_MODE_R { pub(crate) fn new(bits: u8) -> Self { EXNW_MODE_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> Option<EXNW_MODE_A> { match self.bits { 0 => Some(EXNW_MODE_A::DISABLED), 2 => Some(EXNW_MODE_A::FROZEN), 3 => Some(EXNW_MODE_A::READY), _ => None, } } #[doc = "Checks if the value of the field is `DISABLED`"] #[inline(always)] pub fn is_disabled(&self) -> bool { **self == EXNW_MODE_A::DISABLED } #[doc = "Checks if the value of the field is `FROZEN`"] #[inline(always)] pub fn is_frozen(&self) -> bool { **self == EXNW_MODE_A::FROZEN } #[doc = "Checks if the value of the field is `READY`"] #[inline(always)] pub fn is_ready(&self) -> bool { **self == EXNW_MODE_A::READY } } impl core::ops::Deref for EXNW_MODE_R { type Target = crate::FieldReader<u8, EXNW_MODE_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `EXNW_MODE` writer - NWAIT Mode"] pub struct EXNW_MODE_W<'a> { w: &'a mut W, } impl<'a> EXNW_MODE_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: EXNW_MODE_A) -> &'a mut W { unsafe { self.bits(variant.into()) } } #[doc = "Disabled"] #[inline(always)] pub fn disabled(self) -> &'a mut W { self.variant(EXNW_MODE_A::DISABLED) } #[doc = "Frozen Mode"] #[inline(always)] pub fn frozen(self) -> &'a mut W { self.variant(EXNW_MODE_A::FROZEN) } #[doc = "Ready Mode"] #[inline(always)] pub fn ready(self) -> &'a mut W { self.variant(EXNW_MODE_A::READY) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 4)) | ((value as u32 & 0x03) << 4); self.w } } #[doc = "Byte Access Type\n\nValue on reset: 0"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum BAT_A { #[doc = "0: Byte select access type:- Write operation is controlled using NCS, NWE, NBS0, NBS1.- Read operation is controlled using NCS, NRD, NBS0, NBS1."] BYTE_SELECT = 0, #[doc = "1: Byte write access type:- Write operation is controlled using NCS, NWR0, NWR1.- Read operation is controlled using NCS and NRD."] BYTE_WRITE = 1, } impl From<BAT_A> for bool { #[inline(always)] fn from(variant: BAT_A) -> Self { variant as u8 != 0 } } #[doc = "Field `BAT` reader - Byte Access Type"] pub struct BAT_R(crate::FieldReader<bool, BAT_A>); impl BAT_R { pub(crate) fn new(bits: bool) -> Self { BAT_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> BAT_A { match self.bits { false => BAT_A::BYTE_SELECT, true => BAT_A::BYTE_WRITE, } } #[doc = "Checks if the value of the field is `BYTE_SELECT`"] #[inline(always)] pub fn is_byte_select(&self) -> bool { **self == BAT_A::BYTE_SELECT } #[doc = "Checks if the value of the field is `BYTE_WRITE`"] #[inline(always)] pub fn is_byte_write(&self) -> bool { **self == BAT_A::BYTE_WRITE } } impl core::ops::Deref for BAT_R { type Target = crate::FieldReader<bool, BAT_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `BAT` writer - Byte Access Type"] pub struct
<'a> { w: &'a mut W, } impl<'a> BAT_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: BAT_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "Byte select access type:- Write operation is controlled using NCS, NWE, NBS0, NBS1.- Read operation is controlled using NCS, NRD, NBS0, NBS1."] #[inline(always)] pub fn byte_select(self) -> &'a mut W { self.variant(BAT_A::BYTE_SELECT) } #[doc = "Byte write access type:- Write operation is controlled using NCS, NWR0, NWR1.- Read operation is controlled using NCS and NRD."] #[inline(always)] pub fn byte_write(self) -> &'a mut W { self.variant(BAT_A::BYTE_WRITE) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 8)) | ((value as u32 & 0x01) << 8); self.w } } #[doc = "Data Bus Width\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] pub enum DBW_A { #[doc = "0: 8-bit Data Bus"] _8_BIT = 0, #[doc = "1: 16-bit Data Bus"] _16_BIT = 1, } impl From<DBW_A> for bool { #[inline(always)] fn from(variant: DBW_A) -> Self { variant as u8 != 0 } } #[doc = "Field `DBW` reader - Data Bus Width"] pub struct DBW_R(crate::FieldReader<bool, DBW_A>); impl DBW_R { pub(crate) fn new(bits: bool) -> Self { DBW_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> DBW_A { match self.bits { false => DBW_A::_8_BIT, true => DBW_A::_16_BIT, } } #[doc = "Checks if the value of the field is `_8_BIT`"] #[inline(always)] pub fn is_8_bit(&self) -> bool { **self == DBW_A::_8_BIT } #[doc = "Checks if the value of the field is `_16_BIT`"] #[inline(always)] pub fn is_16_bit(&self) -> bool { **self == DBW_A::_16_BIT } } impl core::ops::Deref for DBW_R { type Target = crate::FieldReader<bool, DBW_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `DBW` writer - Data Bus Width"] pub struct DBW_W<'a> { w: &'a mut W, } impl<'a> DBW_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: DBW_A) -> &'a mut W { self.bit(variant.into()) } #[doc = "8-bit Data Bus"] #[inline(always)] pub fn _8_bit(self) -> &'a mut W { self.variant(DBW_A::_8_BIT) } #[doc = "16-bit Data Bus"] #[inline(always)] pub fn _16_bit(self) -> &'a mut W { self.variant(DBW_A::_16_BIT) } #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 12)) | ((value as u32 & 0x01) << 12); self.w } } #[doc = "Field `TDF_CYCLES` reader - Data Float Time"] pub struct TDF_CYCLES_R(crate::FieldReader<u8, u8>); impl TDF_CYCLES_R { pub(crate) fn new(bits: u8) -> Self { TDF_CYCLES_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for TDF_CYCLES_R { type Target = crate::FieldReader<u8, u8>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TDF_CYCLES` writer - Data Float Time"] pub struct TDF_CYCLES_W<'a> { w: &'a mut W, } impl<'a> TDF_CYCLES_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x0f << 16)) | ((value as u32 & 0x0f) << 16); self.w } } #[doc = "Field `TDF_MODE` reader - TDF Optimization"] pub struct TDF_MODE_R(crate::FieldReader<bool, bool>); impl TDF_MODE_R { pub(crate) fn new(bits: bool) -> Self { TDF_MODE_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for TDF_MODE_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `TDF_MODE` writer - TDF Optimization"] pub struct TDF_MODE_W<'a> { w: &'a mut W, } impl<'a> TDF_MODE_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 20)) | ((value as u32 & 0x01) << 20); self.w } } #[doc = "Field `PMEN` reader - Page Mode Enabled"] pub struct PMEN_R(crate::FieldReader<bool, bool>); impl PMEN_R { pub(crate) fn new(bits: bool) -> Self { PMEN_R(crate::FieldReader::new(bits)) } } impl core::ops::Deref for PMEN_R { type Target = crate::FieldReader<bool, bool>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PMEN` writer - Page Mode Enabled"] pub struct PMEN_W<'a> { w: &'a mut W, } impl<'a> PMEN_W<'a> { #[doc = r"Sets the field bit"] #[inline(always)] pub fn set_bit(self) -> &'a mut W { self.bit(true) } #[doc = r"Clears the field bit"] #[inline(always)] pub fn clear_bit(self) -> &'a mut W { self.bit(false) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bit(self, value: bool) -> &'a mut W { self.w.bits = (self.w.bits & !(0x01 << 24)) | ((value as u32 & 0x01) << 24); self.w } } #[doc = "Page Size\n\nValue on reset: 1"] #[derive(Clone, Copy, Debug, PartialEq)] #[repr(u8)] pub enum PS_A { #[doc = "0: 4-byte page"] _4_BYTE = 0, #[doc = "1: 8-byte page"] _8_BYTE = 1, #[doc = "2: 16-byte page"] _16_BYTE = 2, #[doc = "3: 32-byte page"] _32_BYTE = 3, } impl From<PS_A> for u8 { #[inline(always)] fn from(variant: PS_A) -> Self { variant as _ } } #[doc = "Field `PS` reader - Page Size"] pub struct PS_R(crate::FieldReader<u8, PS_A>); impl PS_R { pub(crate) fn new(bits: u8) -> Self { PS_R(crate::FieldReader::new(bits)) } #[doc = r"Get enumerated values variant"] #[inline(always)] pub fn variant(&self) -> PS_A { match self.bits { 0 => PS_A::_4_BYTE, 1 => PS_A::_8_BYTE, 2 => PS_A::_16_BYTE, 3 => PS_A::_32_BYTE, _ => unreachable!(), } } #[doc = "Checks if the value of the field is `_4_BYTE`"] #[inline(always)] pub fn is_4_byte(&self) -> bool { **self == PS_A::_4_BYTE } #[doc = "Checks if the value of the field is `_8_BYTE`"] #[inline(always)] pub fn is_8_byte(&self) -> bool { **self == PS_A::_8_BYTE } #[doc = "Checks if the value of the field is `_16_BYTE`"] #[inline(always)] pub fn is_16_byte(&self) -> bool { **self == PS_A::_16_BYTE } #[doc = "Checks if the value of the field is `_32_BYTE`"] #[inline(always)] pub fn is_32_byte(&self) -> bool { **self == PS_A::_32_BYTE } } impl core::ops::Deref for PS_R { type Target = crate::FieldReader<u8, PS_A>; #[inline(always)] fn deref(&self) -> &Self::Target { &self.0 } } #[doc = "Field `PS` writer - Page Size"] pub struct PS_W<'a> { w: &'a mut W, } impl<'a> PS_W<'a> { #[doc = r"Writes `variant` to the field"] #[inline(always)] pub fn variant(self, variant: PS_A) -> &'a mut W { self.bits(variant.into()) } #[doc = "4-byte page"] #[inline(always)] pub fn _4_byte(self) -> &'a mut W { self.variant(PS_A::_4_BYTE) } #[doc = "8-byte page"] #[inline(always)] pub fn _8_byte(self) -> &'a mut W { self.variant(PS_A::_8_BYTE) } #[doc = "16-byte page"] #[inline(always)] pub fn _16_byte(self) -> &'a mut W { self.variant(PS_A::_16_BYTE) } #[doc = "32-byte page"] #[inline(always)] pub fn _32_byte(self) -> &'a mut W { self.variant(PS_A::_32_BYTE) } #[doc = r"Writes raw bits to the field"] #[inline(always)] pub fn bits(self, value: u8) -> &'a mut W { self.w.bits = (self.w.bits & !(0x03 << 28)) | ((value as u32 & 0x03) << 28); self.w } } impl R { #[doc = "Bit 0 - Read Mode"] #[inline(always)] pub fn read_mode(&self) -> READ_MODE_R { READ_MODE_R::new((self.bits & 0x01) != 0) } #[doc = "Bit 1 - Write Mode"] #[inline(always)] pub fn write_mode(&self) -> WRITE_MODE_R { WRITE_MODE_R::new(((self.bits >> 1) & 0x01) != 0) } #[doc = "Bits 4:5 - NWAIT Mode"] #[inline(always)] pub fn exnw_mode(&self) -> EXNW_MODE_R { EXNW_MODE_R::new(((self.bits >> 4) & 0x03) as u8) } #[doc = "Bit 8 - Byte Access Type"] #[inline(always)] pub fn bat(&self) -> BAT_R { BAT_R::new(((self.bits >> 8) & 0x01) != 0) } #[doc = "Bit 12 - Data Bus Width"] #[inline(always)] pub fn dbw(&self) -> DBW_R { DBW_R::new(((self.bits >> 12) & 0x01) != 0) } #[doc = "Bits 16:19 - Data Float Time"] #[inline(always)] pub fn tdf_cycles(&self) -> TDF_CYCLES_R { TDF_CYCLES_R::new(((self.bits >> 16) & 0x0f) as u8) } #[doc = "Bit 20 - TDF Optimization"] #[inline(always)] pub fn tdf_mode(&self) -> TDF_MODE_R { TDF_MODE_R::new(((self.bits >> 20) & 0x01) != 0) } #[doc = "Bit 24 - Page Mode Enabled"] #[inline(always)] pub fn pmen(&self) -> PMEN_R { PMEN_R::new(((self.bits >> 24) & 0x01) != 0) } #[doc = "Bits 28:29 - Page Size"] #[inline(always)] pub fn ps(&self) -> PS_R { PS_R::new(((self.bits >> 28) & 0x03) as u8) } } impl W { #[doc = "Bit 0 - Read Mode"] #[inline(always)] pub fn read_mode(&mut self) -> READ_MODE_W { READ_MODE_W { w: self } } #[doc = "Bit 1 - Write Mode"] #[inline(always)] pub fn write_mode(&mut self) -> WRITE_MODE_W { WRITE_MODE_W { w: self } } #[doc = "Bits 4:5 - NWAIT Mode"] #[inline(always)] pub fn exnw_mode(&mut self) -> EXNW_MODE_W { EXNW_MODE_W { w: self } } #[doc = "Bit 8 - Byte Access Type"] #[inline(always)] pub fn bat(&mut self) -> BAT_W { BAT_W { w: self } } #[doc = "Bit 12 - Data Bus Width"] #[inline(always)] pub fn dbw(&mut self) -> DBW_W { DBW_W { w: self } } #[doc = "Bits 16:19 - Data Float Time"] #[inline(always)] pub fn tdf_cycles(&mut self) -> TDF_CYCLES_W { TDF_CYCLES_W { w: self } } #[doc = "Bit 20 - TDF Optimization"] #[inline(always)] pub fn tdf_mode(&mut self) -> TDF_MODE_W { TDF_MODE_W { w: self } } #[doc = "Bit 24 - Page Mode Enabled"] #[inline(always)] pub fn pmen(&mut self) -> PMEN_W { PMEN_W { w: self } } #[doc = "Bits 28:29 - Page Size"] #[inline(always)] pub fn ps(&mut self) -> PS_W { PS_W { w: self } } #[doc = "Writes raw bits to the register."] pub unsafe fn bits(&mut self, bits: u32) -> &mut Self { self.0.bits(bits); self } } #[doc = "SMC MODE Register (CS_number = 0)\n\nThis register you can [`read`](crate::generic::Reg::read), [`write_with_zero`](crate::generic::Reg::write_with_zero), [`reset`](crate::generic::Reg::reset), [`write`](crate::generic::Reg::write), [`modify`](crate::generic::Reg::modify). See [API](https://docs.rs/svd2rust/#read--modify--write-api).\n\nFor information about available fields see [mode0](index.html) module"] pub struct MODE0_SPEC; impl crate::RegisterSpec for MODE0_SPEC { type Ux = u32; } #[doc = "`read()` method returns [mode0::R](R) reader structure"] impl crate::Readable for MODE0_SPEC { type Reader = R; } #[doc = "`write(|w| ..)` method takes [mode0::W](W) writer structure"] impl crate::Writable for MODE0_SPEC { type Writer = W; } #[doc = "`reset()` method sets MODE0 to value 0x1000_1003"] impl crate::Resettable for MODE0_SPEC { #[inline(always)] fn reset_value() -> Self::Ux { 0x1000_1003 } }
BAT_W
config.py
# Copyright (c) 2013 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import copy from oslo_config import cfg CONNECTION_OPTS = [ cfg.StrOpt('runtime-storage-uri', default='memcached://127.0.0.1:11211', help='Storage URI'), ] PROCESSOR_OPTS = [ cfg.StrOpt('default-data-uri', default='https://raw.githubusercontent.com/stackalytics/' 'default_data/master/default_data.json', help='URI for default data. A local file can be used with the ' 'prefix "file://". For example, ' 'default_data_uri = file:///path/to/default_data.json'), cfg.StrOpt('sources-root', default='/var/local/stackalytics', help='The folder that holds all project sources to analyze'), cfg.IntOpt('days_to_update_members', default=30, help='Number of days to update members'), cfg.StrOpt('corrections-uri', default=('https://git.openstack.org/cgit/' 'openstack/stackalytics/plain/etc/corrections.json'), help='The address of file with corrections data'), cfg.StrOpt('review-uri', default='gerrit://review.openstack.org', help='URI of review system'), cfg.StrOpt('git-base-uri', default='git://git.openstack.org', help='git base location'), cfg.StrOpt('ssh-key-filename', default='/home/user/.ssh/id_rsa', help='SSH key for gerrit review system access'), cfg.StrOpt('ssh-username', default='user', help='SSH username for gerrit review system access'), cfg.StrOpt('github-login', default=None, help='Login for github access'), cfg.StrOpt('github-password', default=None, help='Password for github access'), cfg.StrOpt('translation-team-uri', default='https://git.openstack.org/cgit/openstack/i18n/' 'plain/tools/zanata/translation_team.yaml', help='URI of translation team data'), cfg.StrOpt("fetching-user-source", default='launchpad', choices=['launchpad', '<None>'], help="Source for fetching user profiles"), cfg.IntOpt('members-look-ahead', default=250, help='How many member profiles to look ahead after the last'), cfg.IntOpt('read-timeout', default=120, help='Number of seconds to wait for remote response'), cfg.IntOpt('gerrit-retry', default=10, help='How many times to retry after Gerrit errors'), ] def list_opts():
yield (None, copy.deepcopy(CONNECTION_OPTS + PROCESSOR_OPTS))
change_iae_sources.py
import os import sys sys.path.insert(1, os.path.join(sys.path[0], '../..')) import grapher_admin.wsgi from grapher_admin.models import Source import unidecode import json count = 0 all_sources = Source.objects.all() for each in all_sources: if 'iea.org' in each.description.lower() or 'iea stat' in each.description.lower() or 'iea 2014' in each.description.lower(): the_json = json.loads(each.description) if the_json['dataPublishedBy'] == 'World Bank – World Development Indicators': the_json['dataPublishedBy'] = 'International Energy Agency (IEA) via The World Bank' each.description = json.dumps(the_json) each.save() if the_json['dataPublishedBy'] == 'World Bank Climate Change Data': th
if the_json['dataPublishedBy'] == 'United Nations Environment Programme': the_json['dataPublishedBy'] = 'International Energy Agency (IEA) via United Nations Environment Programme' each.description = json.dumps(the_json) each.save() count += 1 print(count)
e_json['dataPublishedBy'] = 'International Energy Agency (IEA) via The World Bank' each.description = json.dumps(the_json) each.save()
conf.go
package conf import ( "errors" "flag" "go-common/library/conf" "go-common/library/database/sql" "go-common/library/log" httpx "go-common/library/net/http/blademaster" "go-common/library/net/trace" "go-common/library/time" "github.com/BurntSushi/toml" ) var ( confPath string // Conf conf. Conf = &Config{} client *conf.Client ) // Config config. type Config struct { // log Xlog *log.Config // Tracer tracer Tracer *trace.Config // DB db DB *DB // Compare compare Compare *Compare // InitCloud init cloud. InitCloud *InitCloud // BM BM *httpx.ServerConfig } // InitCloud init cloud conf. type InitCloud struct { OffsetFilePath string UseOldOffset bool Start, End int64 Batch int Sleep time.Duration } // Compare compare type Compare struct { Cloud2Local *CompareConfig Local2Cloud *CompareConfig } // CompareConfig compare proc config. type CompareConfig struct { On bool Debug bool OffsetFilePath string UseOldOffset bool End bool StartTime string EndTime string DelayDuration time.Duration StepDuration time.Duration LoopDuration time.Duration BatchSize int BatchMissRetryCount int Fix bool } // DB db config. type DB struct { Local *sql.Config Cloud *sql.Config } func init() { flag.StringVar(&confPath, "conf", "", "default config path") } // Init init config. func Init() (err error) { if confPath != "" { return local() } return remote() } func local() (err error) { _, err = toml.DecodeFile(confPath, &Conf) return } func remote() (err error)
func load() (err error) { var ( s string ok bool tmpConf *Config ) if s, ok = client.Toml2(); !ok { return errors.New("load config center error") } if _, err = toml.Decode(s, &tmpConf); err != nil { return errors.New("could not decode config") } *Conf = *tmpConf return }
{ if client, err = conf.New(); err != nil { return } if err = load(); err != nil { return } go func() { for range client.Event() { log.Info("config reload") if load() != nil { log.Error("config reload error (%v)", err) } } }() return }
utils.go
// Copyright 2019 Yunion // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package component import ( "bytes" "encoding/json" "fmt" "path" "reflect" "text/template" "github.com/pkg/errors" apps "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" extensions "k8s.io/api/extensions/v1beta1" apiequality "k8s.io/apimachinery/pkg/api/equality" "k8s.io/apimachinery/pkg/util/intstr" clientset "k8s.io/client-go/kubernetes" "k8s.io/klog" "k8s.io/kubernetes/cmd/kubeadm/app/util/apiclient" "yunion.io/x/onecloud/pkg/cloudcommon/options" "yunion.io/x/onecloud/pkg/mcclient" "yunion.io/x/pkg/util/reflectutils" "yunion.io/x/structarg" "yunion.io/x/onecloud-operator/pkg/apis/constants" "yunion.io/x/onecloud-operator/pkg/apis/onecloud/v1alpha1" "yunion.io/x/onecloud-operator/pkg/controller" "yunion.io/x/onecloud-operator/pkg/util/mysql" "yunion.io/x/onecloud-operator/pkg/util/onecloud" ) const ( // LastAppliedConfigAnnotation is annotation key of last applied configuration LastAppliedConfigAnnotation = "onecloud.yunion.io/last-applied-configuration" // ImagePullBackOff is the pod state of image pull failed ImagePullBackOff = "ImagePullBackOff" // ErrImagePull is the pod state of image pull failed ErrImagePull = "ErrImagePull" ) // templateEqual compares the new podTemplateSpec's spec with old podTemplateSpec's last applied config func templateEqual(new corev1.PodTemplateSpec, old corev1.PodTemplateSpec) bool { oldConfig := corev1.PodSpec{} if lastAppliedConfig, ok := old.Annotations[LastAppliedConfigAnnotation]; ok { err := json.Unmarshal([]byte(lastAppliedConfig), &oldConfig) if err != nil { klog.Errorf("unmarshal PodTemplate: [%s/%s]'s applied config failed,error: %v", old.GetNamespace(), old.GetName(), err) return false } return apiequality.Semantic.DeepEqual(oldConfig, new.Spec) } return false } // SetServiceLastAppliedConfigAnnotation set last applied config info to Service's annotation func SetServiceLastAppliedConfigAnnotation(svc *corev1.Service) error { svcApply, err := encode(svc.Spec) if err != nil { return err } if svc.Annotations == nil { svc.Annotations = map[string]string{} } svc.Annotations[LastAppliedConfigAnnotation] = svcApply return nil } func SetIngressLastAppliedConfigAnnotation(ing *extensions.Ingress) error { ingApply, err := encode(ing.Spec) if err != nil { return err } if ing.Annotations == nil { ing.Annotations = map[string]string{} } ing.Annotations[LastAppliedConfigAnnotation] = ingApply return nil } func SetConfigMapLastAppliedConfigAnnotation(cfg *corev1.ConfigMap) error { cfgApply, err := encode(cfg.Data) if err != nil { return err } if cfg.Annotations == nil { cfg.Annotations = map[string]string{} } cfg.Annotations[LastAppliedConfigAnnotation] = cfgApply return nil } func SetDeploymentLastAppliedConfigAnnotation(deploy *apps.Deployment) error { deployApply, err := encode(deploy.Spec) if err != nil { return err } if deploy.Annotations == nil { deploy.Annotations = map[string]string{} } deploy.Annotations[LastAppliedConfigAnnotation] = deployApply templateApply, err := encode(deploy.Spec.Template.Spec) if err != nil { return err } if deploy.Spec.Template.Annotations == nil { deploy.Spec.Template.Annotations = map[string]string{} } deploy.Spec.Template.Annotations[LastAppliedConfigAnnotation] = templateApply return nil } // serviceEqual compares the new Service's spec with old Service's last applied config func serviceEqual(new, old *corev1.Service) (bool, error) { oldSpec := corev1.ServiceSpec{} if lastAppliedConfig, ok := old.Annotations[LastAppliedConfigAnnotation]; ok { err := json.Unmarshal([]byte(lastAppliedConfig), &oldSpec) if err != nil { klog.Errorf("unmarshal ServiceSpec: [%s/%s]'s applied config failed,error: %v", old.GetNamespace(), old.GetName(), err) return false, err } return apiequality.Semantic.DeepEqual(oldSpec, new.Spec), nil } return false, nil } func ingressEqual(new, old *extensions.Ingress) (bool, error) { oldSpec := extensions.IngressSpec{} if lastAppliedConfig, ok := old.Annotations[LastAppliedConfigAnnotation]; ok { err := json.Unmarshal([]byte(lastAppliedConfig), &oldSpec) if err != nil { return false, err } return apiequality.Semantic.DeepEqual(oldSpec, new.Spec), nil } return false, nil } func configMapEqual(new, old *corev1.ConfigMap) (bool, error) { oldData := map[string]string{} if lastAppliedConfig, ok := old.Annotations[LastAppliedConfigAnnotation]; ok { err := json.Unmarshal([]byte(lastAppliedConfig), &oldData) if err != nil { return false, err } return reflect.DeepEqual(oldData, new.Data), nil } return false, nil
} func deploymentEqual(new apps.Deployment, old apps.Deployment) bool { oldConfig := apps.DeploymentSpec{} if lastAppliedConfig, ok := old.Annotations[LastAppliedConfigAnnotation]; ok { err := json.Unmarshal([]byte(lastAppliedConfig), &oldConfig) if err != nil { klog.Errorf("unmarshal Deployment: [%s/%s]'s applied config failed, error: %v", old.GetNamespace(), old.GetName(), err) return false } return apiequality.Semantic.DeepEqual(oldConfig.Replicas, new.Spec.Replicas) && apiequality.Semantic.DeepEqual(oldConfig.Template, new.Spec.Template) && apiequality.Semantic.DeepEqual(oldConfig.Strategy, new.Spec.Strategy) } return false } func encode(obj interface{}) (string, error) { b, err := json.Marshal(obj) if err != nil { return "", err } return string(b), nil } func deploymentIsUpgrading(deploy *apps.Deployment) bool { if deploy.Status.ObservedGeneration == 0 { return false } if deploy.Generation > deploy.Status.ObservedGeneration && *deploy.Spec.Replicas == deploy.Status.Replicas { return true } return false } // CombineAnnotations merges two annotations maps func CombineAnnotations(a, b map[string]string) map[string]string { if a == nil { a = make(map[string]string) } for k, v := range b { a[k] = v } return a } func CreateOrUpdateConfigMap(client clientset.Interface, cm *corev1.ConfigMap) error { return apiclient.CreateOrUpdateConfigMap(client, cm) } func GetDBConnectionByCluster(oc *v1alpha1.OnecloudCluster) (*mysql.Connection, error) { return mysql.NewConnection(&oc.Spec.Mysql) } func EnsureClusterDBUser(oc *v1alpha1.OnecloudCluster, dbConfig v1alpha1.DBConfig) error { dbName := dbConfig.Database username := dbConfig.Username password := dbConfig.Password conn, err := GetDBConnectionByCluster(oc) if err != nil { return err } defer conn.Close() if err := EnsureDBUser(conn, dbName, username, password); err != nil { return err } return nil } func EnsureDBUser(conn *mysql.Connection, dbName string, username string, password string) error { dbExists, err := conn.IsDatabaseExists(dbName) if err != nil { return errors.Wrap(err, "check db exists") } if !dbExists { if err := conn.CreateDatabase(dbName); err != nil { return errors.Wrapf(err, "create database %q", dbName) } } if err := conn.CreateUser(username, password, dbName); err != nil { return errors.Wrapf(err, "create user %q for database %q", username, dbName) } return nil } func LoginByServiceAccount(s *mcclient.ClientSession, account v1alpha1.CloudUser) (mcclient.TokenCredential, error) { return s.GetClient().AuthenticateWithSource(account.Username, account.Password, constants.DefaultDomain, constants.SysAdminProject, "", "operator") } func EnsureServiceAccount(s *mcclient.ClientSession, account v1alpha1.CloudUser) error { username := account.Username password := account.Password obj, exists, err := onecloud.IsUserExists(s, username) if err != nil { return err } if exists { // password not change if _, err := LoginByServiceAccount(s, account); err == nil { return nil } id, _ := obj.GetString("id") if _, err := onecloud.ChangeUserPassword(s, id, password); err != nil { return errors.Wrapf(err, "user %s already exists, update password", username) } return nil } obj, err = onecloud.CreateUser(s, username, password) if err != nil { return errors.Wrapf(err, "create user %s", username) } userId, _ := obj.GetString("id") return onecloud.ProjectAddUser(s, constants.SysAdminProject, userId, constants.RoleAdmin) } func SetOptionsDefault(opt interface{}, serviceType string) error { parser, err := structarg.NewArgumentParser(opt, serviceType, "", "") if err != nil { return err } parser.SetDefault() var optionsRef *options.BaseOptions if err := reflectutils.FindAnonymouStructPointer(opt, &optionsRef); err != nil { return err } if len(optionsRef.ApplicationID) == 0 { optionsRef.ApplicationID = serviceType } return nil } type VolumeHelper struct { cluster *v1alpha1.OnecloudCluster optionCfgMap string component v1alpha1.ComponentType volumes []corev1.Volume volumeMounts []corev1.VolumeMount } func NewVolumeHelper(oc *v1alpha1.OnecloudCluster, optCfgMap string, component v1alpha1.ComponentType) *VolumeHelper { h := &VolumeHelper{ cluster: oc, optionCfgMap: optCfgMap, component: component, volumes: make([]corev1.Volume, 0), volumeMounts: make([]corev1.VolumeMount, 0), } h.volumes = []corev1.Volume{ { Name: constants.VolumeCertsName, VolumeSource: corev1.VolumeSource{ Secret: &corev1.SecretVolumeSource{ SecretName: controller.ClustercertSecretName(h.cluster), Items: []corev1.KeyToPath{ {Key: constants.CACertName, Path: constants.CACertName}, {Key: constants.ServiceCertName, Path: constants.ServiceCertName}, {Key: constants.ServiceKeyName, Path: constants.ServiceKeyName}, }, }, }, }, } h.volumeMounts = append(h.volumeMounts, corev1.VolumeMount{Name: constants.VolumeCertsName, ReadOnly: true, MountPath: constants.CertDir}) if h.optionCfgMap != "" { cfgVol := corev1.Volume{ Name: constants.VolumeConfigName, VolumeSource: corev1.VolumeSource{ ConfigMap: &corev1.ConfigMapVolumeSource{ LocalObjectReference: corev1.LocalObjectReference{ Name: h.optionCfgMap, }, Items: []corev1.KeyToPath{ {Key: constants.VolumeConfigName, Path: fmt.Sprintf("%s.conf", h.component)}, }, }, }, } h.volumes = append(h.volumes, cfgVol) h.volumeMounts = append(h.volumeMounts, corev1.VolumeMount{Name: constants.VolumeConfigName, ReadOnly: true, MountPath: constants.ConfigDir}) } return h } func (h *VolumeHelper) GetVolumes() []corev1.Volume { return h.volumes } func (h *VolumeHelper) GetVolumeMounts() []corev1.VolumeMount { return h.volumeMounts } func NewServiceNodePort(name string, port int32) corev1.ServicePort { return corev1.ServicePort{ Name: name, Protocol: corev1.ProtocolTCP, Port: port, TargetPort: intstr.FromInt(int(port)), NodePort: port, } } func SetOptionsServiceTLS(config *options.BaseOptions) { enableConfigTLS(config, constants.CertDir, constants.CACertName, constants.ServiceCertName, constants.ServiceKeyName) } func enableConfigTLS(config *options.BaseOptions, certDir string, ca string, cert string, key string) { config.EnableSsl = true config.SslCaCerts = path.Join(certDir, ca) config.SslCertfile = path.Join(certDir, cert) config.SslKeyfile = path.Join(certDir, key) } func SetServiceBaseOptions(opt *options.BaseOptions, region string, input v1alpha1.ServiceBaseConfig) { opt.Region = region opt.Port = input.Port } func SetServiceCommonOptions(opt *options.CommonOptions, oc *v1alpha1.OnecloudCluster, input v1alpha1.ServiceCommonOptions) { SetServiceBaseOptions(&opt.BaseOptions, oc.Spec.Region, input.ServiceBaseConfig) opt.AuthURL = controller.GetAuthURL(oc) opt.AdminUser = input.CloudUser.Username opt.AdminDomain = constants.DefaultDomain opt.AdminPassword = input.CloudUser.Password opt.AdminProject = constants.SysAdminProject } func SetDBOptions(opt *options.DBOptions, mysql v1alpha1.Mysql, input v1alpha1.DBConfig) { opt.SqlConnection = fmt.Sprintf("mysql+pymysql://%s:%s@%s:%d/%s?charset=utf8", input.Username, input.Password, mysql.Host, mysql.Port, input.Database) } func CompileTemplateFromMap(tmplt string, configMap interface{}) (string, error) { out := new(bytes.Buffer) t := template.Must(template.New("compiled_template").Parse(tmplt)) if err := t.Execute(out, configMap); err != nil { return "", err } return out.String(), nil } func GetEdition(spec *v1alpha1.DeploymentSpec) string { edition := constants.OnecloudCommunityEdition if spec.Annotations == nil { return edition } curEdition := spec.Annotations[constants.OnecloudEditionAnnotationKey] if curEdition == constants.OnecloudEnterpriseEdition { return curEdition } return edition } func IsEnterpriseEdition(spec *v1alpha1.DeploymentSpec) bool { return GetEdition(spec) == constants.OnecloudEnterpriseEdition } type PVCVolumePair struct { name string mountPath string claimName string component v1alpha1.ComponentType } func NewPVCVolumePair(name, mountPath string, oc *v1alpha1.OnecloudCluster, comp v1alpha1.ComponentType) *PVCVolumePair { return &PVCVolumePair{ name: name, mountPath: mountPath, claimName: controller.NewClusterComponentName(oc.GetName(), comp), component: comp, } } func (p PVCVolumePair) GetVolume() corev1.Volume { return corev1.Volume{ Name: p.name, VolumeSource: corev1.VolumeSource{ PersistentVolumeClaim: &corev1.PersistentVolumeClaimVolumeSource{ ClaimName: p.claimName, ReadOnly: false, }, }, } } func (p PVCVolumePair) GetVolumeMount() corev1.VolumeMount { return corev1.VolumeMount{ Name: p.name, MountPath: p.mountPath, } }
intern.rs
//! This module specifies the type based interner for constants. //! //! After a const evaluation has computed a value, before we destroy the const evaluator's session //! memory, we need to extract all memory allocations to the global memory pool so they stay around. //! //! In principle, this is not very complicated: we recursively walk the final value, follow all the //! pointers, and move all reachable allocations to the global `tcx` memory. The only complication //! is picking the right mutability for the allocations in a `static` initializer: we want to make //! as many allocations as possible immutable so LLVM can put them into read-only memory. At the //! same time, we need to make memory that could be mutated by the program mutable to avoid //! incorrect compilations. To achieve this, we do a type-based traversal of the final value, //! tracking mutable and shared references and `UnsafeCell` to determine the current mutability. //! (In principle, we could skip this type-based part for `const` and promoteds, as they need to be //! always immutable. At least for `const` however we use this opportunity to reject any `const` //! that contains allocations whose mutability we cannot identify.) use super::validity::RefTracking; use rustc_data_structures::fx::{FxHashMap, FxHashSet}; use rustc_errors::ErrorReported; use rustc_hir as hir; use rustc_middle::mir::interpret::InterpResult; use rustc_middle::ty::{self, layout::TyAndLayout, Ty}; use rustc_target::abi::Size; use rustc_ast::Mutability; use super::{AllocId, Allocation, InterpCx, MPlaceTy, Machine, MemoryKind, Scalar, ValueVisitor}; use crate::const_eval; pub trait CompileTimeMachine<'mir, 'tcx, T> = Machine< 'mir, 'tcx, MemoryKind = T, PointerTag = (), ExtraFnVal = !, FrameExtra = (), AllocExtra = (), MemoryMap = FxHashMap<AllocId, (MemoryKind<T>, Allocation)>, >; struct InternVisitor<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> { /// The ectx from which we intern. ecx: &'rt mut InterpCx<'mir, 'tcx, M>, /// Previously encountered safe references. ref_tracking: &'rt mut RefTracking<(MPlaceTy<'tcx>, InternMode)>, /// A list of all encountered allocations. After type-based interning, we traverse this list to /// also intern allocations that are only referenced by a raw pointer or inside a union. leftover_allocations: &'rt mut FxHashSet<AllocId>, /// The root kind of the value that we're looking at. This field is never mutated for a /// particular allocation. It is primarily used to make as many allocations as possible /// read-only so LLVM can place them in const memory. mode: InternMode, /// This field stores whether we are *currently* inside an `UnsafeCell`. This can affect /// the intern mode of references we encounter. inside_unsafe_cell: bool, } #[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)] enum InternMode { /// A static and its current mutability. Below shared references inside a `static mut`, /// this is *immutable*, and below mutable references inside an `UnsafeCell`, this /// is *mutable*. Static(hir::Mutability), /// A `const`. Const, } /// Signalling data structure to ensure we don't recurse /// into the memory of other constants or statics struct IsStaticOrFn; /// Intern an allocation without looking at its children. /// `mode` is the mode of the environment where we found this pointer. /// `mutablity` is the mutability of the place to be interned; even if that says /// `immutable` things might become mutable if `ty` is not frozen. /// `ty` can be `None` if there is no potential interior mutability /// to account for (e.g. for vtables). fn intern_shallow<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>( ecx: &'rt mut InterpCx<'mir, 'tcx, M>, leftover_allocations: &'rt mut FxHashSet<AllocId>, alloc_id: AllocId, mode: InternMode, ty: Option<Ty<'tcx>>, ) -> Option<IsStaticOrFn> { trace!("intern_shallow {:?} with {:?}", alloc_id, mode); // remove allocation let tcx = ecx.tcx; let (kind, mut alloc) = match ecx.memory.alloc_map.remove(&alloc_id) { Some(entry) => entry, None => { // Pointer not found in local memory map. It is either a pointer to the global // map, or dangling. // If the pointer is dangling (neither in local nor global memory), we leave it // to validation to error -- it has the much better error messages, pointing out where // in the value the dangling reference lies. // The `delay_span_bug` ensures that we don't forget such a check in validation. if tcx.get_global_alloc(alloc_id).is_none() { tcx.sess.delay_span_bug(ecx.tcx.span, "tried to intern dangling pointer"); } // treat dangling pointers like other statics // just to stop trying to recurse into them return Some(IsStaticOrFn); } }; // This match is just a canary for future changes to `MemoryKind`, which most likely need // changes in this function. match kind { MemoryKind::Stack | MemoryKind::Machine(const_eval::MemoryKind::Heap) | MemoryKind::Vtable | MemoryKind::CallerLocation => {} } // Set allocation mutability as appropriate. This is used by LLVM to put things into // read-only memory, and also by Miri when evaluating other globals that // access this one. if let InternMode::Static(mutability) = mode { // For this, we need to take into account `UnsafeCell`. When `ty` is `None`, we assume // no interior mutability. let frozen = ty.map_or(true, |ty| ty.is_freeze(ecx.tcx, ecx.param_env)); // For statics, allocation mutability is the combination of place mutability and // type mutability. // The entire allocation needs to be mutable if it contains an `UnsafeCell` anywhere. let immutable = mutability == Mutability::Not && frozen; if immutable { alloc.mutability = Mutability::Not; } else { // Just making sure we are not "upgrading" an immutable allocation to mutable. assert_eq!(alloc.mutability, Mutability::Mut); } } else { // No matter what, *constants are never mutable*. Mutating them is UB. // See const_eval::machine::MemoryExtra::can_access_statics for why // immutability is so important. // Validation will ensure that there is no `UnsafeCell` on an immutable allocation. alloc.mutability = Mutability::Not; }; // link the alloc id to the actual allocation let alloc = tcx.intern_const_alloc(alloc); leftover_allocations.extend(alloc.relocations().iter().map(|&(_, ((), reloc))| reloc)); tcx.set_alloc_id_memory(alloc_id, alloc); None } impl<'rt, 'mir, 'tcx, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> InternVisitor<'rt, 'mir, 'tcx, M> { fn intern_shallow( &mut self, alloc_id: AllocId, mode: InternMode, ty: Option<Ty<'tcx>>, ) -> Option<IsStaticOrFn> { intern_shallow(self.ecx, self.leftover_allocations, alloc_id, mode, ty) } } impl<'rt, 'mir, 'tcx: 'mir, M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>> ValueVisitor<'mir, 'tcx, M> for InternVisitor<'rt, 'mir, 'tcx, M> { type V = MPlaceTy<'tcx>; #[inline(always)] fn ecx(&self) -> &InterpCx<'mir, 'tcx, M> { &self.ecx } fn visit_aggregate( &mut self, mplace: MPlaceTy<'tcx>, fields: impl Iterator<Item = InterpResult<'tcx, Self::V>>, ) -> InterpResult<'tcx> { // ZSTs cannot contain pointers, so we can skip them. if mplace.layout.is_zst() { return Ok(()); } if let Some(def) = mplace.layout.ty.ty_adt_def() { if Some(def.did) == self.ecx.tcx.lang_items().unsafe_cell_type() { // We are crossing over an `UnsafeCell`, we can mutate again. This means that // References we encounter inside here are interned as pointing to mutable // allocations. // Remember the `old` value to handle nested `UnsafeCell`. let old = std::mem::replace(&mut self.inside_unsafe_cell, true); let walked = self.walk_aggregate(mplace, fields); self.inside_unsafe_cell = old; return walked; } } self.walk_aggregate(mplace, fields) } fn visit_value(&mut self, mplace: MPlaceTy<'tcx>) -> InterpResult<'tcx> { // Handle Reference types, as these are the only relocations supported by const eval. // Raw pointers (and boxes) are handled by the `leftover_relocations` logic. let tcx = self.ecx.tcx; let ty = mplace.layout.ty; if let ty::Ref(_, referenced_ty, ref_mutability) = *ty.kind() { let value = self.ecx.read_immediate(mplace.into())?; let mplace = self.ecx.ref_to_mplace(value)?; assert_eq!(mplace.layout.ty, referenced_ty); // Handle trait object vtables. if let ty::Dynamic(..) = tcx.struct_tail_erasing_lifetimes(referenced_ty, self.ecx.param_env).kind() { if let Scalar::Ptr(vtable) = mplace.meta.unwrap_meta() { // Explicitly choose const mode here, since vtables are immutable, even // if the reference of the fat pointer is mutable. self.intern_shallow(vtable.alloc_id, InternMode::Const, None); } else { // Validation will error (with a better message) on an invalid vtable pointer. // Let validation show the error message, but make sure it *does* error. tcx.sess .delay_span_bug(tcx.span, "vtables pointers cannot be integer pointers"); } } // Check if we have encountered this pointer+layout combination before. // Only recurse for allocation-backed pointers. if let Scalar::Ptr(ptr) = mplace.ptr { // Compute the mode with which we intern this. Our goal here is to make as many // statics as we can immutable so they can be placed in read-only memory by LLVM. let ref_mode = match self.mode { InternMode::Static(mutbl) => { // In statics, merge outer mutability with reference mutability and // take into account whether we are in an `UnsafeCell`. // The only way a mutable reference actually works as a mutable reference is // by being in a `static mut` directly or behind another mutable reference. // If there's an immutable reference or we are inside a `static`, then our // mutable reference is equivalent to an immutable one. As an example: // `&&mut Foo` is semantically equivalent to `&&Foo` match ref_mutability { _ if self.inside_unsafe_cell => { // Inside an `UnsafeCell` is like inside a `static mut`, the "outer" // mutability does not matter. InternMode::Static(ref_mutability) } Mutability::Not => { // A shared reference, things become immutable. // We do *not* consider `freeze` here: `intern_shallow` considers // `freeze` for the actual mutability of this allocation; the intern // mode for references contained in this allocation is tracked more // precisely when traversing the referenced data (by tracking // `UnsafeCell`). This makes sure that `&(&i32, &Cell<i32>)` still // has the left inner reference interned into a read-only // allocation. InternMode::Static(Mutability::Not) } Mutability::Mut => { // Mutable reference. InternMode::Static(mutbl) } } } InternMode::Const => { // Ignore `UnsafeCell`, everything is immutable. Validity does some sanity // checking for mutable references that we encounter -- they must all be // ZST. InternMode::Const } }; match self.intern_shallow(ptr.alloc_id, ref_mode, Some(referenced_ty)) { // No need to recurse, these are interned already and statics may have // cycles, so we don't want to recurse there Some(IsStaticOrFn) => {} // intern everything referenced by this value. The mutability is taken from the // reference. It is checked above that mutable references only happen in // `static mut` None => self.ref_tracking.track((mplace, ref_mode), || ()), } } Ok(()) } else { // Not a reference -- proceed recursively. self.walk_value(mplace) } } } #[derive(Copy, Clone, Debug, PartialEq, Hash, Eq)] pub enum
{ /// The `mutability` of the static, ignoring the type which may have interior mutability. Static(hir::Mutability), Constant, Promoted, } /// Intern `ret` and everything it references. /// /// This *cannot raise an interpreter error*. Doing so is left to validation, which /// tracks where in the value we are and thus can show much better error messages. /// Any errors here would anyway be turned into `const_err` lints, whereas validation failures /// are hard errors. #[tracing::instrument(level = "debug", skip(ecx))] pub fn intern_const_alloc_recursive<M: CompileTimeMachine<'mir, 'tcx, const_eval::MemoryKind>>( ecx: &mut InterpCx<'mir, 'tcx, M>, intern_kind: InternKind, ret: MPlaceTy<'tcx>, ) -> Result<(), ErrorReported> where 'tcx: 'mir, { let tcx = ecx.tcx; let base_intern_mode = match intern_kind { InternKind::Static(mutbl) => InternMode::Static(mutbl), // `Constant` includes array lengths. // `Promoted` includes non-`Copy` array initializers and `rustc_args_required_const` arguments. InternKind::Constant | InternKind::Promoted => InternMode::Const, }; // Type based interning. // `ref_tracking` tracks typed references we have already interned and still need to crawl for // more typed information inside them. // `leftover_allocations` collects *all* allocations we see, because some might not // be available in a typed way. They get interned at the end. let mut ref_tracking = RefTracking::empty(); let leftover_allocations = &mut FxHashSet::default(); // start with the outermost allocation intern_shallow( ecx, leftover_allocations, // The outermost allocation must exist, because we allocated it with // `Memory::allocate`. ret.ptr.assert_ptr().alloc_id, base_intern_mode, Some(ret.layout.ty), ); ref_tracking.track((ret, base_intern_mode), || ()); while let Some(((mplace, mode), _)) = ref_tracking.todo.pop() { let res = InternVisitor { ref_tracking: &mut ref_tracking, ecx, mode, leftover_allocations, inside_unsafe_cell: false, } .visit_value(mplace); // We deliberately *ignore* interpreter errors here. When there is a problem, the remaining // references are "leftover"-interned, and later validation will show a proper error // and point at the right part of the value causing the problem. match res { Ok(()) => {} Err(error) => { ecx.tcx.sess.delay_span_bug( ecx.tcx.span, &format!( "error during interning should later cause validation failure: {}", error ), ); // Some errors shouldn't come up because creating them causes // an allocation, which we should avoid. When that happens, // dedicated error variants should be introduced instead. assert!( !error.kind().allocates(), "interning encountered allocating error: {}", error ); } } } // Intern the rest of the allocations as mutable. These might be inside unions, padding, raw // pointers, ... So we can't intern them according to their type rules let mut todo: Vec<_> = leftover_allocations.iter().cloned().collect(); while let Some(alloc_id) = todo.pop() { if let Some((_, mut alloc)) = ecx.memory.alloc_map.remove(&alloc_id) { // We can't call the `intern_shallow` method here, as its logic is tailored to safe // references and a `leftover_allocations` set (where we only have a todo-list here). // So we hand-roll the interning logic here again. match intern_kind { // Statics may contain mutable allocations even behind relocations. // Even for immutable statics it would be ok to have mutable allocations behind // raw pointers, e.g. for `static FOO: *const AtomicUsize = &AtomicUsize::new(42)`. InternKind::Static(_) => {} // Raw pointers in promoteds may only point to immutable things so we mark // everything as immutable. // It is UB to mutate through a raw pointer obtained via an immutable reference: // Since all references and pointers inside a promoted must by their very definition // be created from an immutable reference (and promotion also excludes interior // mutability), mutating through them would be UB. // There's no way we can check whether the user is using raw pointers correctly, // so all we can do is mark this as immutable here. InternKind::Promoted => { // See const_eval::machine::MemoryExtra::can_access_statics for why // immutability is so important. alloc.mutability = Mutability::Not; } InternKind::Constant => { // If it's a constant, we should not have any "leftovers" as everything // is tracked by const-checking. // FIXME: downgrade this to a warning? It rejects some legitimate consts, // such as `const CONST_RAW: *const Vec<i32> = &Vec::new() as *const _;`. ecx.tcx .sess .span_err(ecx.tcx.span, "untyped pointers are not allowed in constant"); // For better errors later, mark the allocation as immutable. alloc.mutability = Mutability::Not; } } let alloc = tcx.intern_const_alloc(alloc); tcx.set_alloc_id_memory(alloc_id, alloc); for &(_, ((), reloc)) in alloc.relocations().iter() { if leftover_allocations.insert(reloc) { todo.push(reloc); } } } else if ecx.memory.dead_alloc_map.contains_key(&alloc_id) { // Codegen does not like dangling pointers, and generally `tcx` assumes that // all allocations referenced anywhere actually exist. So, make sure we error here. ecx.tcx.sess.span_err(ecx.tcx.span, "encountered dangling pointer in final constant"); return Err(ErrorReported); } else if ecx.tcx.get_global_alloc(alloc_id).is_none() { // We have hit an `AllocId` that is neither in local or global memory and isn't // marked as dangling by local memory. That should be impossible. span_bug!(ecx.tcx.span, "encountered unknown alloc id {:?}", alloc_id); } } Ok(()) } impl<'mir, 'tcx: 'mir, M: super::intern::CompileTimeMachine<'mir, 'tcx, !>> InterpCx<'mir, 'tcx, M> { /// A helper function that allocates memory for the layout given and gives you access to mutate /// it. Once your own mutation code is done, the backing `Allocation` is removed from the /// current `Memory` and returned. pub(crate) fn intern_with_temp_alloc( &mut self, layout: TyAndLayout<'tcx>, f: impl FnOnce( &mut InterpCx<'mir, 'tcx, M>, MPlaceTy<'tcx, M::PointerTag>, ) -> InterpResult<'tcx, ()>, ) -> InterpResult<'tcx, &'tcx Allocation> { let dest = self.allocate(layout, MemoryKind::Stack); f(self, dest)?; let ptr = dest.ptr.assert_ptr(); assert_eq!(ptr.offset, Size::ZERO); let mut alloc = self.memory.alloc_map.remove(&ptr.alloc_id).unwrap().1; alloc.mutability = Mutability::Not; Ok(self.tcx.intern_const_alloc(alloc)) } }
InternKind
utils.py
""" ***************** Utility Functions ***************** Utility functions useful in the implementation and testing of the Synapse client. """ from __future__ import absolute_import from __future__ import division from __future__ import print_function from __future__ import unicode_literals from future.utils import implements_iterator from builtins import str import six try: from urllib.parse import urlparse from urllib.parse import urlencode from urllib.parse import parse_qs from urllib.parse import urlunparse from urllib.parse import ParseResult from urllib.parse import urlsplit except ImportError: from urlparse import urlparse from urllib import urlencode from urlparse import parse_qs from urlparse import urlunparse from urlparse import ParseResult from urlparse import urlsplit try: import urllib.request, urllib.error except ImportError: import urllib import os, sys import hashlib, re import cgi import errno import inspect import random import requests import collections import tempfile import platform import functools import threading import uuid import importlib from datetime import datetime as Datetime from datetime import date as Date from datetime import timedelta from numbers import Number UNIX_EPOCH = Datetime(1970, 1, 1, 0, 0) ISO_FORMAT = "%Y-%m-%dT%H:%M:%S.000Z" ISO_FORMAT_MICROS = "%Y-%m-%dT%H:%M:%S.%fZ" GB = 2**30 MB = 2**20 KB = 2**10 BUFFER_SIZE = 8*KB def md5_for_file(filename, block_size=2*MB): """ Calculates the MD5 of the given file. See `source <http://stackoverflow.com/questions/1131220/get-md5-hash-of-a-files-without-open-it-in-python>`_. :param filename: The file to read in :param block_size: How much of the file to read in at once (bytes). Defaults to 2 MB :returns: The MD5 """ md5 = hashlib.md5() with open(filename,'rb') as f: while True: data = f.read(block_size) if not data: break md5.update(data) return(md5) def download_file(url, localFilepath=None): """ Downloads a remote file. :param localFilePath: May be None, in which case a temporary file is created :returns: localFilePath """ f = None try: if localFilepath: dir = os.path.dirname(localFilepath) if not os.path.exists(dir): os.makedirs(dir) f = open(localFilepath, 'wb') else: f = tempfile.NamedTemporaryFile(delete=False) localFilepath = f.name r = requests.get(url, stream=True) toBeTransferred = float(r.headers['content-length']) for nChunks, chunk in enumerate(r.iter_content(chunk_size=1024*10)): if chunk: f.write(chunk) printTransferProgress(nChunks*1024*10, toBeTransferred) finally: if f: f.close() printTransferProgress(toBeTransferred, toBeTransferred) return localFilepath def extract_filename(content_disposition_header, default_filename=None): """ Extract a filename from an HTTP content-disposition header field. See `this memo <http://tools.ietf.org/html/rfc6266>`_ and `this package <http://pypi.python.org/pypi/rfc6266>`_ for cryptic details. """ if not content_disposition_header: return default_filename value, params = cgi.parse_header(content_disposition_header) return params.get('filename', default_filename) def extract_user_name(profile): """ Extract a displayable user name from a user's profile """ if 'userName' in profile and profile['userName']: return profile['userName'] elif 'displayName' in profile and profile['displayName']: return profile['displayName'] else: if 'firstName' in profile and profile['firstName'] and 'lastName' in profile and profile['lastName']: return profile['firstName'] + ' ' + profile['lastName'] elif 'lastName' in profile and profile['lastName']: return profile['lastName'] elif 'firstName' in profile and profile['firstName']: return profile['firstName'] else: return str(profile.get('id', 'Unknown-user')) def _get_from_members_items_or_properties(obj, key): try: if hasattr(obj, key): return getattr(obj, key) if hasattr(obj, 'properties') and key in obj.properties: return obj.properties[key] except (KeyError, TypeError, AttributeError): pass try: if key in obj: return obj[key] elif 'properties' in obj and key in obj['properties']: return obj['properties'][key] except (KeyError, TypeError): pass return None ## TODO: what does this do on an unsaved Synapse Entity object? def id_of(obj): """ Try to figure out the Synapse ID of the given object. :param obj: May be a string, Entity object, or dictionary :returns: The ID or throws an exception """ if isinstance(obj, six.string_types): return str(obj) if isinstance(obj, Number): return str(obj) id_attr_names = ['id', 'ownerId', 'tableId'] #possible attribute names for a synapse Id for attribute_name in id_attr_names: syn_id = _get_from_members_items_or_properties(obj, attribute_name) if syn_id is not None: return str(syn_id) raise ValueError('Invalid parameters: couldn\'t find id of ' + str(obj)) def is_in_path(id, path): """Determines whether id is in the path as returned from /entity/{id}/path :param id: synapse id string :param path: object as returned from '/entity/{id}/path' :returns: True or False """ return id in [item['id'] for item in path['path']] def get_properties(entity): """Returns the dictionary of properties of the given Entity.""" return entity.properties if hasattr(entity, 'properties') else entity def is_url(s): """Return True if the string appears to be a valid URL.""" if isinstance(s, six.string_types): try: url_parts = urlsplit(s) ## looks like a Windows drive letter? if len(url_parts.scheme)==1 and url_parts.scheme.isalpha(): return False if url_parts.scheme == 'file' and bool(url_parts.path): return True return bool(url_parts.scheme) and bool(url_parts.netloc) except Exception as e: return False return False def as_url(s): """Tries to convert the input into a proper URL.""" url_parts = urlsplit(s) ## Windows drive letter? if len(url_parts.scheme)==1 and url_parts.scheme.isalpha(): return 'file:///%s' % str(s).replace("\\","/") if url_parts.scheme: return url_parts.geturl() else: return 'file://%s' % str(s) def guess_file_name(string): """Tries to derive a filename from an arbitrary string.""" path = normalize_path(urlparse(string).path) tokens = [x for x in path.split('/') if x != ''] if len(tokens) > 0: return tokens[-1] # Try scrubbing the path of illegal characters if len(path) > 0: path = re.sub(r"[^a-zA-Z0-9_.+() -]", "", path) if len(path) > 0: return path raise ValueError("Could not derive a name from %s" % string) def normalize_path(path): """Transforms a path into an absolute path with forward slashes only.""" if path is None: return None return re.sub(r'\\', '/', os.path.normcase(os.path.abspath(path))) def equal_paths(path1, path2): """ Compare file paths in a platform neutral way """ return normalize_path(path1) == normalize_path(path2) def file_url_to_path(url, verify_exists=False): """ Convert a file URL to a path, handling some odd cases around Windows paths. :param url: a file URL :param verify_exists: If true, return an populated dict only if the resulting file path exists on the local file system. :returns: a path or None if the URL is not a file URL. """ parts = urlsplit(url) if parts.scheme=='file' or parts.scheme=='': path = parts.path ## A windows file URL, for example file:///c:/WINDOWS/asdf.txt ## will get back a path of: /c:/WINDOWS/asdf.txt, which we need to fix by ## lopping off the leading slash character. Apparently, the Python developers ## think this is not a bug: http://bugs.python.org/issue7965 if re.match(r'\/[A-Za-z]:', path): path = path[1:] if os.path.exists(path) or not verify_exists: return path return None def is_same_base_url(url1, url2): """Compares two urls to see if they are the same excluding up to the base path :param url1: a URL :param url2: a second URL :returns: Boolean """ url1 = urlsplit(url1) url2 = urlsplit(url2) return (url1.scheme==url2.scheme and url1.hostname==url2.hostname) def is_synapse_id(obj): """If the input is a Synapse ID return it, otherwise return None""" if isinstance(obj, six.string_types): m = re.match(r'(syn\d+)', obj) if m: return m.group(1) return None def _is_date(dt): """Objects of class datetime.date and datetime.datetime will be recognized as dates""" return isinstance(dt,Date) or isinstance(dt,Datetime) def _to_list(value): """Convert the value (an iterable or a scalar value) to a list.""" if isinstance(value, collections.Iterable) and not isinstance(value, six.string_types): return list(value) else: return [value] def _to_iterable(value): """Convert the value (an iterable or a scalar value) to an iterable.""" if isinstance(value, six.string_types): return (value,) if isinstance(value, collections.Iterable): return value return (value,) def make_bogus_data_file(n=100, seed=None): """ Makes a bogus data file for testing. It is the caller's responsibility to clean up the file when finished. :param n: How many random floating point numbers to be written into the file, separated by commas :param seed: Random seed for the random numbers :returns: The name of the file """ if seed is not None: random.seed(seed) data = [random.gauss(mu=0.0, sigma=1.0) for i in range(n)] f = tempfile.NamedTemporaryFile(mode="w", suffix=".txt", delete=False) try: f.write(", ".join(str(n) for n in data)) f.write("\n") finally: f.close() return normalize_path(f.name) def make_bogus_binary_file(n=1*MB, filepath=None, printprogress=False): """ Makes a bogus binary data file for testing. It is the caller's responsibility to clean up the file when finished. :param n: How many bytes to write :returns: The name of the file """ with open(filepath, 'wb') if filepath else tempfile.NamedTemporaryFile(mode='wb', suffix=".dat", delete=False) as f: if not filepath: filepath = f.name progress = 0 remaining = n while remaining > 0: buff_size = int(min(remaining, 1*MB)) f.write(os.urandom(buff_size)) remaining -= buff_size if printprogress: progress += buff_size printTransferProgress(progress, n, 'Generated ', filepath) return normalize_path(filepath) def to_unix_epoch_time(dt): """ Convert either `datetime.date or datetime.datetime objects <http://docs.python.org/2/library/datetime.html>`_ to UNIX time. """ if type(dt) == Date: return (dt - UNIX_EPOCH.date()).total_seconds() * 1000 return int((dt - UNIX_EPOCH).total_seconds() * 1000) def to_unix_epoch_time_secs(dt): """ Convert either `datetime.date or datetime.datetime objects <http://docs.python.org/2/library/datetime.html>`_ to UNIX time. """ if type(dt) == Date: return (dt - UNIX_EPOCH.date()).total_seconds() return (dt - UNIX_EPOCH).total_seconds() def from_unix_epoch_time_secs(secs): """Returns a Datetime object given milliseconds since midnight Jan 1, 1970.""" if isinstance(secs, six.string_types): secs = float(secs) # utcfromtimestamp() fails for negative values (dates before 1970-1-1) on Windows # so, here's a hack that enables ancient events, such as Chris's birthday to be # converted from milliseconds since the UNIX epoch to higher level Datetime objects. Ha! if platform.system()=='Windows' and secs < 0: mirror_date = Datetime.utcfromtimestamp(abs(secs)) return (UNIX_EPOCH - (mirror_date-UNIX_EPOCH)) return Datetime.utcfromtimestamp(secs) def from_unix_epoch_time(ms): """Returns a Datetime object given milliseconds since midnight Jan 1, 1970.""" if isinstance(ms, six.string_types): ms = float(ms) return from_unix_epoch_time_secs(ms/1000.0) def datetime_to_iso(dt, sep="T"): ## Round microseconds to milliseconds (as expected by older clients) ## and add back the "Z" at the end. ## see: http://stackoverflow.com/questions/30266188/how-to-convert-date-string-to-iso8601-standard fmt = "{time.year:04}-{time.month:02}-{time.day:02}{sep}{time.hour:02}:{time.minute:02}:{time.second:02}.{millisecond:03}{tz}" if dt.microsecond >= 999500: dt -= timedelta(microseconds=dt.microsecond) dt += timedelta(seconds=1) return fmt.format(time=dt, millisecond=int(round(dt.microsecond/1000.0)), tz="Z", sep=sep) def iso_to_datetime(iso_time): return Datetime.strptime(iso_time, ISO_FORMAT_MICROS) def format_time_interval(seconds): """Format a time interval given in seconds to a readable value, e.g. \"5 minutes, 37 seconds\".""" periods = ( ('year', 60*60*24*365), ('month', 60*60*24*30), ('day', 60*60*24), ('hour', 60*60), ('minute', 60), ('second', 1),) result=[] for period_name,period_seconds in periods: if seconds > period_seconds or period_name=='second': period_value, seconds = divmod(seconds, period_seconds) if period_value > 0 or period_name=='second': if period_value == 1: result.append("%d %s" % (period_value, period_name)) else: result.append("%d %ss" % (period_value, period_name)) return ", ".join(result) def _find_used(activity, predicate): """Finds a particular used resource in an activity that matches a predicate.""" for resource in activity['used']: if predicate(resource): return resource return None def itersubclasses(cls, _seen=None): """ http://code.activestate.com/recipes/576949/ (r3) itersubclasses(cls) Generator over all subclasses of a given class, in depth first order. >>> list(itersubclasses(int)) == [bool] True >>> class A(object): pass >>> class B(A): pass >>> class C(A): pass >>> class D(B,C): pass >>> class E(D): pass >>> >>> for cls in itersubclasses(A): ... print(cls.__name__) B D E C >>> # get ALL (new-style) classes currently defined >>> [cls.__name__ for cls in itersubclasses(object)] #doctest: +ELLIPSIS ['type', ...'tuple', ...] """ if not isinstance(cls, type): raise TypeError('itersubclasses must be called with ' 'new-style classes, not %.100r' % cls) if _seen is None: _seen = set() try: subs = cls.__subclasses__() except TypeError: # fails only when cls is type subs = cls.__subclasses__(cls) for sub in subs: if sub not in _seen: _seen.add(sub) yield sub for sub in itersubclasses(sub, _seen): yield sub def normalize_whitespace(s): """ Strips the string and replace all whitespace sequences and other non-printable characters with a single space. """ assert isinstance(s, six.string_types) return re.sub(r'[\x00-\x20\s]+', ' ', s.strip()) def normalize_lines(s): assert isinstance(s, six.string_types) s2 = re.sub(r'[\t ]*\n[\t ]*', '\n', s.strip()) return re.sub(r'[\t ]+', ' ', s2) def _synapse_error_msg(ex): """ Format a human readable error message """ if isinstance(ex, six.string_types): return ex return '\n' + ex.__class__.__name__ + ': ' + str(ex) + '\n\n' def _limit_and_offset(uri, limit=None, offset=None): """ Set limit and/or offset query parameters of the given URI. """ parts = urlparse(uri) query = parse_qs(parts.query) if limit is None: query.pop('limit', None) else: query['limit'] = limit if offset is None: query.pop('offset', None) else: query['offset'] = offset ## in Python 2, urllib expects encoded byte-strings if six.PY2: new_query = {} for k,v in query.items(): if isinstance(v,list): v = [unicode(element).encode('utf-8') for element in v] elif isinstance(v,str): v = unicode(v).encode('utf-8') new_query[unicode(k).encode('utf-8')] = v query = new_query new_query_string = urlencode(query, doseq=True) return urlunparse(ParseResult( scheme=parts.scheme, netloc=parts.netloc, path=parts.path, params=parts.params, query=new_query_string, fragment=parts.fragment)) def query_limit_and_offset(query, hard_limit=1000): """ Extract limit and offset from the end of a query string. :returns: A triple containing the query with limit and offset removed, the limit at most equal to the hard_limit, and the offset which defaults to 1 """ # Regex a lower-case string to simplify matching tempQueryStr = query.lower() regex = '\A(.*\s)(offset|limit)\s*(\d*\s*)\Z' # Continue to strip off and save the last limit/offset match = re.search(regex, tempQueryStr) options = {} while match is not None: options[match.group(2)] = int(match.group(3)) tempQueryStr = match.group(1) match = re.search(regex, tempQueryStr) # Get a truncated version of the original query string (not in lower-case) query = query[:len(tempQueryStr)].strip() # Continue querying until the entire query has been fetched (or crash out) limit = min(options.get('limit',hard_limit), hard_limit) offset = options.get('offset',1) return query, limit, offset def _extract_synapse_id_from_query(query): """ An unfortunate hack to pull the synapse ID out of a table query of the form "select column1, column2 from syn12345 where...." needed to build URLs for table services. """ m = re.search(r"from\s+(syn\d+)", query, re.IGNORECASE) if m: return m.group(1) else: raise ValueError("Couldn't extract synapse ID from query: \"%s\"" % query) #Derived from https://wiki.python.org/moin/PythonDecoratorLibrary#Memoize def memoize(obj): cache = obj._memoize_cache = {} @functools.wraps(obj) def memoizer(*args, **kwargs): refresh = kwargs.pop('refresh', False) key = str(args) + str(kwargs) if refresh or key not in cache: cache[key] = obj(*args, **kwargs) return cache[key] return memoizer def printTransferProgress(transferred, toBeTransferred, prefix = '', postfix='', isBytes=True, dt=None, previouslyTransferred = 0): """Prints a progress bar :param transferred: a number of items/bytes completed :param toBeTransferred: total number of items/bytes when completed :param prefix: String printed before progress bar :param prefix: String printed after progress bar :param isBytes: A boolean indicating whether to convert bytes to kB, MB, GB etc. :param dt: The time in seconds that has passed since transfer started is used to calculate rate. :param previouslyTransferred: the number of bytes that were already transferred before this transfer began( e.g. someone ctrl+c'd out of an upload and restarted it later) """ if not sys.stdout.isatty(): return barLength = 20 # Modify this to change the length of the progress bar status = '' rate = '' if dt is not None and dt != 0: rate = (transferred - previouslyTransferred)/float(dt) rate = '(%s/s)' % humanizeBytes(rate) if isBytes else rate if toBeTransferred<0: defaultToBeTransferred = (barLength*1*MB) if transferred > defaultToBeTransferred: progress = float(transferred % defaultToBeTransferred) / defaultToBeTransferred else: progress = float(transferred) / defaultToBeTransferred elif toBeTransferred==0: #There is nothing to be transferred
if progress >= 1: progress = 1 status = "Done...\n" block = int(round(barLength*progress)) nbytes = humanizeBytes(transferred) if isBytes else transferred if toBeTransferred>0: outOf = "/%s" % (humanizeBytes(toBeTransferred) if isBytes else toBeTransferred) percentage = "%4.2f%%"%(progress*100) else: outOf = "" percentage = "" text = "\r%s [%s]%s %s%s %s %s %s " % (prefix, "#"*block + "-"*(barLength-block), percentage, nbytes, outOf, rate, postfix, status) sys.stdout.write(text) sys.stdout.flush() def humanizeBytes(bytes): bytes = float(bytes) units = ['bytes', 'kB', 'MB', 'GB', 'TB', 'PB', 'EB'] for i, unit in enumerate(units): if bytes<1024: return '%3.1f%s' %(bytes, units[i]) else: bytes /= 1024 return 'Oops larger than Exabytes' def touch(path, times=None): """ Make sure a file exists. Update its access and modified times. """ basedir = os.path.dirname(path) if not os.path.exists(basedir): try: os.makedirs(basedir) except OSError as err: ## alternate processes might be creating these at the same time if err.errno != errno.EEXIST: raise with open(path, 'a'): os.utime(path, times) return path def _is_json(content_type): """detect if a content-type is JSON""" ## The value of Content-Type defined here: ## http://www.w3.org/Protocols/rfc2616/rfc2616-sec3.html#sec3.7 return content_type.lower().strip().startswith('application/json') if content_type else False def find_data_file_handle(bundle): """Return the fileHandle whose ID matches the dataFileHandleId in an entity bundle""" for fileHandle in bundle['fileHandles']: if fileHandle['id'] == bundle['entity']['dataFileHandleId']: return fileHandle return None def unique_filename(path): """Returns a unique path by appending (n) for some number n to the end of the filename.""" base, ext = os.path.splitext(path) counter = 0 while os.path.exists(path): counter += 1 path = base + ("(%d)" % counter) + ext return path @implements_iterator class threadsafe_iter: """Takes an iterator/generator and makes it thread-safe by serializing call to the `next` method of given iterator/generator. See: http://anandology.com/blog/using-iterators-and-generators/ """ def __init__(self, it): self.it = it self.lock = threading.Lock() def __iter__(self): return self def __next__(self): with self.lock: return next(self.it) def threadsafe_generator(f): """A decorator that takes a generator function and makes it thread-safe. See: http://anandology.com/blog/using-iterators-and-generators/ """ def g(*a, **kw): return threadsafe_iter(f(*a, **kw)) return g def extract_prefix(keys): """ Takes a list of strings and extracts a common prefix delimited by a dot, for example: >>> extract_prefix(["entity.bang", "entity.bar", "entity.bat"]) entity. """ prefixes = set() for key in keys: parts = key.split(".") if len(parts) > 1: prefixes.add(parts[0]) else: return "" if len(prefixes) == 1: return prefixes.pop() + "." return "" def temp_download_filename(destination, file_handle_id): suffix = "synapse_download_" + (str(file_handle_id) \ if file_handle_id else \ str(uuid.uuid4())) return os.path.join(destination, suffix) \ if os.path.isdir(destination) else \ destination + '.' + suffix def _extract_zip_file_to_directory(zip_file, zip_entry_name, target_dir): """ Extracts a specified file in a zip to the specified directory :param zip_file: an opened zip file. e.g. "with zipfile.ZipFile(zipfilepath) as zip_file:" :param zip_entry_name: the name of the file to be extracted from the zip e.g. folderInsideZipIfAny/fileName.txt :param target_dir: the directory to which the file will be extracted :return: full path to the extracted file """ file_base_name = os.path.basename(zip_entry_name) # base name of the file filepath = os.path.join(target_dir, file_base_name) # file path to the cached file to write # Create the cache directory if it does not exist if not os.path.exists(target_dir): os.makedirs(target_dir) # write the file from the zip into the cache with open(filepath, 'wb') as cache_file: cache_file.write(zip_file.read(zip_entry_name)) return filepath def _is_integer(x): try: return float.is_integer(x) except TypeError: try: int(x) return True except (ValueError, TypeError): ## anything that's not an integer, for example: empty string, None, 'NaN' or float('Nan') return False def topolgical_sort(graph): """Given a graph in the form of a dictionary returns a sorted list Adapted from: http://blog.jupo.org/2012/04/06/topological-sorting-acyclic-directed-graphs/ :param graph: a dictionary with values containing lists of keys referencing back into the dictionary :returns: sorted list of items """ graph_unsorted = graph.copy() graph_sorted = [] # Convert the unsorted graph into a hash table. This gives us # constant-time lookup for checking if edges are unresolved # Run until the unsorted graph is empty. while graph_unsorted: # Go through each of the node/edges pairs in the unsorted # graph. If a set of edges doesn't contain any nodes that # haven't been resolved, that is, that are still in the # unsorted graph, remove the pair from the unsorted graph, # and append it to the sorted graph. Note here that by using # using the items() method for iterating, a copy of the # unsorted graph is used, allowing us to modify the unsorted # graph as we move through it. We also keep a flag for # checking that that graph is acyclic, which is true if any # nodes are resolved during each pass through the graph. If # not, we need to bail out as the graph therefore can't be # sorted. acyclic = False for node, edges in list(graph_unsorted.items()): for edge in edges: if edge in graph_unsorted: break else: acyclic = True del graph_unsorted[node] graph_sorted.append((node, edges)) if not acyclic: # We've passed through all the unsorted nodes and # weren't able to resolve any of them, which means there # are nodes with cyclic edges that will never be resolved, # so we bail out with an error. raise RuntimeError("A cyclic dependency occurred. Some files in provenance reference each other circularly.") return graph_sorted def caller_module_name(current_frame): """ :param current_frame: use inspect.currentframe(). :return: the name of the module calling the function, foo(), in which this calling_module() is invoked. Ignores callers that belong in the same module as foo() """ current_frame_filename = current_frame.f_code.co_filename #filename in which foo() resides #go back a frame takes us to the frame calling foo() caller_frame = current_frame.f_back caller_filename = caller_frame.f_code.co_filename # find the first frame that does not have the same filename. this ensures that we don't consider functions within the same module as foo() that use foo() as a helper function while(caller_filename == current_frame_filename): caller_frame = caller_frame.f_back caller_filename = caller_frame.f_code.co_filename return inspect.getmodulename(caller_filename) def attempt_import(module_name, fail_message): try: return importlib.import_module(module_name) except ImportError: sys.stderr.write( (fail_message + "To install this library on Mac or Linux distributions:\n" " (sudo) pip install %s\n\n" "On Windows, right click the Command Prompt(cmd.exe) and select 'Run as administrator' then:\n" " pip install %s\n\n" "\n\n\n" % (module_name, module_name))) raise
progress = 1 status = "Done...\n" else: progress = float(transferred) / toBeTransferred
setup.py
from setuptools import setup, find_packages with open('README.rst') as f: readme = f.read() with open('LICENSE') as f: license = f.read() setup( name='Cat chaser', version='0.1.0', description='Code for the cat chaser.', long_description=readme, author='Joakim Nyman', author_email='[email protected]', url='https://github.com/Maharacha/cat_chaser', license=license, packages=find_packages(exclude=('tests', 'docs')) )
# -*- coding: utf-8 -*-
package_list.rs
use ament_rs::*; fn main() { if let Ok(ament) = Ament::new()
else { eprintln!( "environment variable '{}' is not set or empty", AMENT_PREFIX_PATH_ENV_VAR ); } }
{ println!("{:#?}", ament.get_packages_prefixes()) }
network_security_group_paged.py
# coding=utf-8 # -------------------------------------------------------------------------- # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for # license information. # # Code generated by Microsoft (R) AutoRest Code Generator. # Changes may cause incorrect behavior and will be lost if the code is # regenerated. # -------------------------------------------------------------------------- from msrest.paging import Paged class NetworkSecurityGroupPaged(Paged): """ A paging container for iterating over a list of :class:`NetworkSecurityGroup <azure.mgmt.network.v2017_09_01.models.NetworkSecurityGroup>` object """ _attribute_map = { 'next_link': {'key': 'nextLink', 'type': 'str'}, 'current_page': {'key': 'value', 'type': '[NetworkSecurityGroup]'} } def __init__(self, *args, **kwargs):
super(NetworkSecurityGroupPaged, self).__init__(*args, **kwargs)
rb.go
package sanitize import ( "context" "github.com/derailed/popeye/internal" "github.com/derailed/popeye/internal/cache" "github.com/derailed/popeye/internal/issues" ) type ( // RBLister represents RB dependencies. RBLister interface { RoleBindingLister ClusterRoleLister RoleLister } // RoleBinding tracks RoleBinding sanitization. RoleBinding struct { *issues.Collector RBLister } ) // NewRoleBinding returns a new sanitizer. func NewRoleBinding(c *issues.Collector, lister RBLister) *RoleBinding { return &RoleBinding{ Collector: c, RBLister: lister, } } // Sanitize cleanse the resource.. func (r *RoleBinding) Sanitize(ctx context.Context) error { for fqn, rb := range r.ListRoleBindings() { r.InitOutcome(fqn) ctx = internal.WithFQN(ctx, fqn) switch rb.RoleRef.Kind { case "ClusterRole": if _, ok := r.ListClusterRoles()[rb.RoleRef.Name]; !ok
case "Role": rFQN := cache.FQN(rb.Namespace, rb.RoleRef.Name) if _, ok := r.ListRoles()[rFQN]; !ok { r.AddCode(ctx, 1300, rb.RoleRef.Kind, rFQN) } } if r.NoConcerns(fqn) && r.Config.ExcludeFQN(internal.MustExtractSectionGVR(ctx), fqn) { r.ClearOutcome(fqn) } } return nil }
{ r.AddCode(ctx, 1300, rb.RoleRef.Kind, rb.RoleRef.Name) }
sbx_specs.rs
#![allow(dead_code)] pub const SBX_LARGEST_BLOCK_SIZE: usize = 4096; pub const SBX_FIRST_DATA_SEQ_NUM: u32 = 1; pub const SBX_LAST_SEQ_NUM: u32 = u32::max_value(); pub const SBX_METADATA_BLOCK_COUNT: usize = 1; pub const SBX_SCAN_BLOCK_SIZE: usize = 128; pub const SBX_FILE_UID_LEN: usize = common_params::FILE_UID_LEN; pub const SBX_SIGNATURE: &[u8] = common_params::SIGNATURE; pub const SBX_HEADER_SIZE: usize = common_params::HEADER_SIZE; pub const SBX_MAX_DATA_BLOCK_COUNT: u32 = u32::max_value(); pub const SBX_MAX_BURST_ERR_RESISTANCE: usize = 1000; #[derive(Clone, Copy, Debug, PartialEq)] pub enum Version { V1, V2, V3, V17, V18, V19, } mod common_params { use std::u32; pub const FILE_UID_LEN: usize = 6; pub const SIGNATURE: &[u8] = b"SBx"; pub const HEADER_SIZE: usize = 16; pub const MAX_BLOCK_NUM: u64 = u32::MAX as u64; } mod params_for_v1 { use super::common_params; pub const BLOCK_SIZE: usize = 512; pub const DATA_SIZE: usize = BLOCK_SIZE - common_params::HEADER_SIZE; } mod params_for_v2 { use super::common_params; pub const BLOCK_SIZE: usize = 128; pub const DATA_SIZE: usize = BLOCK_SIZE - common_params::HEADER_SIZE; } mod params_for_v3 { use super::common_params; pub const BLOCK_SIZE: usize = 4096; pub const DATA_SIZE: usize = BLOCK_SIZE - common_params::HEADER_SIZE; } mod params_for_v17 { use super::params_for_v1; pub const BLOCK_SIZE: usize = params_for_v1::BLOCK_SIZE; pub const DATA_SIZE: usize = params_for_v1::DATA_SIZE; } mod params_for_v18 { use super::params_for_v2; pub const BLOCK_SIZE: usize = params_for_v2::BLOCK_SIZE; pub const DATA_SIZE: usize = params_for_v2::DATA_SIZE; } mod params_for_v19 { use super::params_for_v3; pub const BLOCK_SIZE: usize = params_for_v3::BLOCK_SIZE; pub const DATA_SIZE: usize = params_for_v3::DATA_SIZE; } pub fn ver_to_usize(version: Version) -> usize { use self::Version::*; match version { V1 => 1, V2 => 2, V3 => 3, V17 => 17, V18 => 18, V19 => 19, } } pub fn string_to_ver(string: &str) -> Result<Version, ()> { use self::Version::*; match string { "1" => Ok(V1), "2" => Ok(V2), "3" => Ok(V3), "17" => Ok(V17), "18" => Ok(V18), "19" => Ok(V19), _ => Err(()), } } pub fn ver_to_block_size(version: Version) -> usize { use self::Version::*; match version { V1 => params_for_v1::BLOCK_SIZE, V2 => params_for_v2::BLOCK_SIZE, V3 => params_for_v3::BLOCK_SIZE, V17 => params_for_v17::BLOCK_SIZE, V18 => params_for_v18::BLOCK_SIZE, V19 => params_for_v19::BLOCK_SIZE, } } pub fn ver_to_data_size(version: Version) -> usize { use self::Version::*; match version { V1 => params_for_v1::DATA_SIZE, V2 => params_for_v2::DATA_SIZE, V3 => params_for_v3::DATA_SIZE, V17 => params_for_v17::DATA_SIZE, V18 => params_for_v18::DATA_SIZE, V19 => params_for_v19::DATA_SIZE, } } pub fn ver_uses_rs(version: Version) -> bool { use self::Version::*; match version { V1 | V2 | V3 => false, V17 | V18 | V19 => true, } } pub fn ver_forces_meta_enabled(version: Version) -> bool { use self::Version::*; match version { V1 | V2 | V3 => false, V17 | V18 | V19 => true, } } pub fn ver_to_max_block_set_count( version: Version, data_par_burst: Option<(usize, usize, usize)>, ) -> Option<u32> { if ver_uses_rs(version) { let (data, parity, _) = data_par_burst.unwrap(); let block_set_size = (data + parity) as u32; Some(SBX_MAX_DATA_BLOCK_COUNT / block_set_size) } else { assert!(data_par_burst == None); None } } pub fn ver_to_last_data_seq_num_exc_parity( version: Version, data_par_burst: Option<(usize, usize, usize)>, ) -> u32
pub fn ver_to_max_data_file_size( version: Version, data_par_burst: Option<(usize, usize, usize)>, ) -> u64 { let data_size = ver_to_data_size(version) as u64; if ver_uses_rs(version) { let (data, _, _) = data_par_burst.unwrap(); let max_block_set_count = ver_to_max_block_set_count(version, data_par_burst).unwrap() as u64; max_block_set_count * data as u64 * data_size } else { assert!(data_par_burst == None); SBX_MAX_DATA_BLOCK_COUNT as u64 * data_size } }
{ if ver_uses_rs(version) { let (data, parity, _) = data_par_burst.unwrap(); let block_set_size = data + parity; let max_block_set_count = ver_to_max_block_set_count(version, data_par_burst).unwrap(); max_block_set_count * block_set_size as u32 - parity as u32 } else { assert!(data_par_burst == None); SBX_LAST_SEQ_NUM } }
virtual_machine.go
/* Copyright (c) 2017-2018 VMware, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package simulator import ( "bytes" "fmt" "io/ioutil" "log" "net" "os" "path" "path/filepath" "reflect" "strconv" "strings" "sync/atomic" "time" "github.com/google/uuid" "github.com/vmware/govmomi/object" "github.com/vmware/govmomi/simulator/esx" "github.com/vmware/govmomi/vim25/methods" "github.com/vmware/govmomi/vim25/mo" "github.com/vmware/govmomi/vim25/soap" "github.com/vmware/govmomi/vim25/types" ) type VirtualMachine struct { mo.VirtualMachine log string sid int32 run container uid uuid.UUID imc *types.CustomizationSpec } func asVirtualMachineMO(obj mo.Reference) (*mo.VirtualMachine, bool) { vm, ok := getManagedObject(obj).Addr().Interface().(*mo.VirtualMachine) return vm, ok } func NewVirtualMachine(ctx *Context, parent types.ManagedObjectReference, spec *types.VirtualMachineConfigSpec) (*VirtualMachine, types.BaseMethodFault) { vm := &VirtualMachine{} vm.Parent = &parent folder := Map.Get(parent) f, _ := asFolderMO(folder) folderPutChild(ctx, f, vm) if spec.Name == "" { return vm, &types.InvalidVmConfig{Property: "configSpec.name"} } if spec.Files == nil || spec.Files.VmPathName == "" { return vm, &types.InvalidVmConfig{Property: "configSpec.files.vmPathName"} } rspec := types.DefaultResourceConfigSpec() vm.Guest = &types.GuestInfo{} vm.Config = &types.VirtualMachineConfigInfo{ ExtraConfig: []types.BaseOptionValue{&types.OptionValue{Key: "govcsim", Value: "TRUE"}}, Tools: &types.ToolsConfigInfo{}, MemoryAllocation: &rspec.MemoryAllocation, CpuAllocation: &rspec.CpuAllocation, LatencySensitivity: &types.LatencySensitivity{Level: types.LatencySensitivitySensitivityLevelNormal}, BootOptions: &types.VirtualMachineBootOptions{}, CreateDate: types.NewTime(time.Now()), } vm.Layout = &types.VirtualMachineFileLayout{} vm.LayoutEx = &types.VirtualMachineFileLayoutEx{ Timestamp: time.Now(), } vm.Snapshot = nil // intentionally set to nil until a snapshot is created vm.Storage = &types.VirtualMachineStorageInfo{ Timestamp: time.Now(), } vm.Summary.Guest = &types.VirtualMachineGuestSummary{} vm.Summary.Vm = &vm.Self vm.Summary.Storage = &types.VirtualMachineStorageSummary{ Timestamp: time.Now(), } vmx := vm.vmx(spec) if vmx.Path == "" { // Append VM Name as the directory name if not specified vmx.Path = spec.Name } dc := Map.getEntityDatacenter(folder.(mo.Entity)) ds := Map.FindByName(vmx.Datastore, dc.Datastore).(*Datastore) dir := path.Join(ds.Info.GetDatastoreInfo().Url, vmx.Path) if path.Ext(vmx.Path) == ".vmx" { dir = path.Dir(dir) // Ignore error here, deferring to createFile _ = os.Mkdir(dir, 0700) } else { // Create VM directory, renaming if already exists name := dir for i := 0; i < 1024; /* just in case */ i++ { err := os.Mkdir(name, 0700) if err != nil { if os.IsExist(err) { name = fmt.Sprintf("%s (%d)", dir, i) continue } return nil, &types.FileFault{File: name} } break } vmx.Path = path.Join(path.Base(name), spec.Name+".vmx") } spec.Files.VmPathName = vmx.String() dsPath := path.Dir(spec.Files.VmPathName) vm.uid = sha1UUID(spec.Files.VmPathName) defaults := types.VirtualMachineConfigSpec{ NumCPUs: 1, NumCoresPerSocket: 1, MemoryMB: 32, Uuid: vm.uid.String(), InstanceUuid: newUUID(strings.ToUpper(spec.Files.VmPathName)), Version: esx.HardwareVersion, Firmware: string(types.GuestOsDescriptorFirmwareTypeBios), Files: &types.VirtualMachineFileInfo{ SnapshotDirectory: dsPath, SuspendDirectory: dsPath, LogDirectory: dsPath, }, } // Add the default devices defaults.DeviceChange, _ = object.VirtualDeviceList(esx.VirtualDevice).ConfigSpec(types.VirtualDeviceConfigSpecOperationAdd) err := vm.configure(&defaults) if err != nil { return vm, err } vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff vm.Runtime.ConnectionState = types.VirtualMachineConnectionStateConnected vm.Summary.Runtime = vm.Runtime vm.Summary.QuickStats.GuestHeartbeatStatus = types.ManagedEntityStatusGray vm.Summary.OverallStatus = types.ManagedEntityStatusGreen vm.ConfigStatus = types.ManagedEntityStatusGreen return vm, nil } func (o *VirtualMachine) RenameTask(r *types.Rename_Task) soap.HasFault { return RenameTask(o, r) } func (*VirtualMachine) Reload(*types.Reload) soap.HasFault { return &methods.ReloadBody{Res: new(types.ReloadResponse)} } func (vm *VirtualMachine) event() types.VmEvent { host := Map.Get(*vm.Runtime.Host).(*HostSystem) return types.VmEvent{ Event: types.Event{ Datacenter: datacenterEventArgument(host), ComputeResource: host.eventArgumentParent(), Host: host.eventArgument(), Vm: &types.VmEventArgument{ EntityEventArgument: types.EntityEventArgument{Name: vm.Name}, Vm: vm.Self, }, }, } } func (vm *VirtualMachine) apply(spec *types.VirtualMachineConfigSpec) { if spec.Files == nil { spec.Files = new(types.VirtualMachineFileInfo) } apply := []struct { src string dst *string }{ {spec.AlternateGuestName, &vm.Config.AlternateGuestName}, {spec.Annotation, &vm.Config.Annotation}, {spec.Firmware, &vm.Config.Firmware}, {spec.InstanceUuid, &vm.Config.InstanceUuid}, {spec.LocationId, &vm.Config.LocationId}, {spec.NpivWorldWideNameType, &vm.Config.NpivWorldWideNameType}, {spec.Name, &vm.Name}, {spec.Name, &vm.Config.Name}, {spec.Name, &vm.Summary.Config.Name}, {spec.GuestId, &vm.Config.GuestId}, {spec.GuestId, &vm.Config.GuestFullName}, {spec.GuestId, &vm.Summary.Guest.GuestId}, {spec.GuestId, &vm.Summary.Config.GuestId}, {spec.GuestId, &vm.Summary.Config.GuestFullName}, {spec.Uuid, &vm.Config.Uuid}, {spec.Uuid, &vm.Summary.Config.Uuid}, {spec.InstanceUuid, &vm.Config.InstanceUuid}, {spec.InstanceUuid, &vm.Summary.Config.InstanceUuid}, {spec.Version, &vm.Config.Version}, {spec.Files.VmPathName, &vm.Config.Files.VmPathName}, {spec.Files.VmPathName, &vm.Summary.Config.VmPathName}, {spec.Files.SnapshotDirectory, &vm.Config.Files.SnapshotDirectory}, {spec.Files.SuspendDirectory, &vm.Config.Files.SuspendDirectory}, {spec.Files.LogDirectory, &vm.Config.Files.LogDirectory}, } for _, f := range apply { if f.src != "" { *f.dst = f.src } } applyb := []struct { src *bool dst **bool }{ {spec.NestedHVEnabled, &vm.Config.NestedHVEnabled}, {spec.CpuHotAddEnabled, &vm.Config.CpuHotAddEnabled}, {spec.CpuHotRemoveEnabled, &vm.Config.CpuHotRemoveEnabled}, {spec.GuestAutoLockEnabled, &vm.Config.GuestAutoLockEnabled}, {spec.MemoryHotAddEnabled, &vm.Config.MemoryHotAddEnabled}, {spec.MemoryReservationLockedToMax, &vm.Config.MemoryReservationLockedToMax}, {spec.MessageBusTunnelEnabled, &vm.Config.MessageBusTunnelEnabled}, {spec.NpivTemporaryDisabled, &vm.Config.NpivTemporaryDisabled}, {spec.NpivOnNonRdmDisks, &vm.Config.NpivOnNonRdmDisks}, {spec.ChangeTrackingEnabled, &vm.Config.ChangeTrackingEnabled}, } for _, f := range applyb { if f.src != nil { *f.dst = f.src } } if spec.Flags != nil { vm.Config.Flags = *spec.Flags } if spec.LatencySensitivity != nil { vm.Config.LatencySensitivity = spec.LatencySensitivity } if spec.ManagedBy != nil { vm.Config.ManagedBy = spec.ManagedBy } if spec.BootOptions != nil { vm.Config.BootOptions = spec.BootOptions } if spec.RepConfig != nil { vm.Config.RepConfig = spec.RepConfig } if spec.Tools != nil { vm.Config.Tools = spec.Tools } if spec.ConsolePreferences != nil { vm.Config.ConsolePreferences = spec.ConsolePreferences } if spec.CpuAffinity != nil { vm.Config.CpuAffinity = spec.CpuAffinity } if spec.CpuAllocation != nil { vm.Config.CpuAllocation = spec.CpuAllocation } if spec.MemoryAffinity != nil { vm.Config.MemoryAffinity = spec.MemoryAffinity } if spec.MemoryAllocation != nil { vm.Config.MemoryAllocation = spec.MemoryAllocation } if spec.LatencySensitivity != nil { vm.Config.LatencySensitivity = spec.LatencySensitivity } if spec.MemoryMB != 0 { vm.Config.Hardware.MemoryMB = int32(spec.MemoryMB) vm.Summary.Config.MemorySizeMB = vm.Config.Hardware.MemoryMB } if spec.NumCPUs != 0 { vm.Config.Hardware.NumCPU = spec.NumCPUs vm.Summary.Config.NumCpu = vm.Config.Hardware.NumCPU } if spec.NumCoresPerSocket != 0 { vm.Config.Hardware.NumCoresPerSocket = spec.NumCoresPerSocket } if spec.GuestId != "" { vm.Guest.GuestFamily = guestFamily(spec.GuestId) } vm.Config.Modified = time.Now() } var extraConfigAlias = map[string]string{ "ip0": "SET.guest.ipAddress", } func extraConfigKey(key string) string { if k, ok := extraConfigAlias[key]; ok { return k } return key } func (vm *VirtualMachine) applyExtraConfig(spec *types.VirtualMachineConfigSpec) { var changes []types.PropertyChange for _, c := range spec.ExtraConfig { val := c.GetOptionValue() key := strings.TrimPrefix(extraConfigKey(val.Key), "SET.") if key == val.Key { vm.Config.ExtraConfig = append(vm.Config.ExtraConfig, c) continue } changes = append(changes, types.PropertyChange{Name: key, Val: val.Value}) switch key { case "guest.ipAddress": if len(vm.Guest.Net) > 0 { ip := val.Value.(string) vm.Guest.Net[0].IpAddress = []string{ip} changes = append(changes, types.PropertyChange{Name: "summary." + key, Val: ip}, types.PropertyChange{Name: "guest.net", Val: vm.Guest.Net}, ) } case "guest.hostName": changes = append(changes, types.PropertyChange{Name: "summary." + key, Val: val.Value}, ) } } if len(changes) != 0 { Map.Update(vm, changes) } } func
(id string) types.BaseMethodFault { for _, x := range GuestID { if id == string(x) { return nil } } return &types.InvalidArgument{InvalidProperty: "configSpec.guestId"} } func (vm *VirtualMachine) configure(spec *types.VirtualMachineConfigSpec) types.BaseMethodFault { vm.apply(spec) if spec.MemoryAllocation != nil { if err := updateResourceAllocation("memory", spec.MemoryAllocation, vm.Config.MemoryAllocation); err != nil { return err } } if spec.CpuAllocation != nil { if err := updateResourceAllocation("cpu", spec.CpuAllocation, vm.Config.CpuAllocation); err != nil { return err } } if spec.GuestId != "" { if err := validateGuestID(spec.GuestId); err != nil { return err } } if o := spec.BootOptions; o != nil { if isTrue(o.EfiSecureBootEnabled) && vm.Config.Firmware != string(types.GuestOsDescriptorFirmwareTypeEfi) { return &types.InvalidVmConfig{Property: "msg.hostd.configSpec.efi"} } } return vm.configureDevices(spec) } func getVMFileType(fileName string) types.VirtualMachineFileLayoutExFileType { var fileType types.VirtualMachineFileLayoutExFileType fileExt := path.Ext(fileName) fileNameNoExt := strings.TrimSuffix(fileName, fileExt) switch fileExt { case ".vmx": fileType = types.VirtualMachineFileLayoutExFileTypeConfig case ".core": fileType = types.VirtualMachineFileLayoutExFileTypeCore case ".vmdk": fileType = types.VirtualMachineFileLayoutExFileTypeDiskDescriptor if strings.HasSuffix(fileNameNoExt, "-digest") { fileType = types.VirtualMachineFileLayoutExFileTypeDigestDescriptor } extentSuffixes := []string{"-flat", "-delta", "-s", "-rdm", "-rdmp"} for _, suffix := range extentSuffixes { if strings.HasSuffix(fileNameNoExt, suffix) { fileType = types.VirtualMachineFileLayoutExFileTypeDiskExtent } else if strings.HasSuffix(fileNameNoExt, "-digest"+suffix) { fileType = types.VirtualMachineFileLayoutExFileTypeDigestExtent } } case ".psf": fileType = types.VirtualMachineFileLayoutExFileTypeDiskReplicationState case ".vmxf": fileType = types.VirtualMachineFileLayoutExFileTypeExtendedConfig case ".vmft": fileType = types.VirtualMachineFileLayoutExFileTypeFtMetadata case ".log": fileType = types.VirtualMachineFileLayoutExFileTypeLog case ".nvram": fileType = types.VirtualMachineFileLayoutExFileTypeNvram case ".png", ".bmp": fileType = types.VirtualMachineFileLayoutExFileTypeScreenshot case ".vmsn": fileType = types.VirtualMachineFileLayoutExFileTypeSnapshotData case ".vmsd": fileType = types.VirtualMachineFileLayoutExFileTypeSnapshotList case ".xml": if strings.HasSuffix(fileNameNoExt, "-aux") { fileType = types.VirtualMachineFileLayoutExFileTypeSnapshotManifestList } case ".stat": fileType = types.VirtualMachineFileLayoutExFileTypeStat case ".vmss": fileType = types.VirtualMachineFileLayoutExFileTypeSuspend case ".vmem": if strings.Contains(fileNameNoExt, "Snapshot") { fileType = types.VirtualMachineFileLayoutExFileTypeSnapshotMemory } else { fileType = types.VirtualMachineFileLayoutExFileTypeSuspendMemory } case ".vswp": if strings.HasPrefix(fileNameNoExt, "vmx-") { fileType = types.VirtualMachineFileLayoutExFileTypeUwswap } else { fileType = types.VirtualMachineFileLayoutExFileTypeSwap } case "": if strings.HasPrefix(fileNameNoExt, "imcf-") { fileType = types.VirtualMachineFileLayoutExFileTypeGuestCustomization } } return fileType } func (vm *VirtualMachine) addFileLayoutEx(datastorePath object.DatastorePath, fileSize int64) int32 { var newKey int32 for _, layoutFile := range vm.LayoutEx.File { if layoutFile.Name == datastorePath.String() { return layoutFile.Key } if layoutFile.Key >= newKey { newKey = layoutFile.Key + 1 } } fileType := getVMFileType(filepath.Base(datastorePath.Path)) switch fileType { case types.VirtualMachineFileLayoutExFileTypeNvram, types.VirtualMachineFileLayoutExFileTypeSnapshotList: vm.addConfigLayout(datastorePath.Path) case types.VirtualMachineFileLayoutExFileTypeLog: vm.addLogLayout(datastorePath.Path) case types.VirtualMachineFileLayoutExFileTypeSwap: vm.addSwapLayout(datastorePath.String()) } vm.LayoutEx.File = append(vm.LayoutEx.File, types.VirtualMachineFileLayoutExFileInfo{ Accessible: types.NewBool(true), BackingObjectId: "", Key: newKey, Name: datastorePath.String(), Size: fileSize, Type: string(fileType), UniqueSize: fileSize, }) vm.LayoutEx.Timestamp = time.Now() vm.updateStorage() return newKey } func (vm *VirtualMachine) addConfigLayout(name string) { for _, config := range vm.Layout.ConfigFile { if config == name { return } } vm.Layout.ConfigFile = append(vm.Layout.ConfigFile, name) vm.updateStorage() } func (vm *VirtualMachine) addLogLayout(name string) { for _, log := range vm.Layout.LogFile { if log == name { return } } vm.Layout.LogFile = append(vm.Layout.LogFile, name) vm.updateStorage() } func (vm *VirtualMachine) addSwapLayout(name string) { vm.Layout.SwapFile = name vm.updateStorage() } func (vm *VirtualMachine) addSnapshotLayout(snapshot types.ManagedObjectReference, dataKey int32) { for _, snapshotLayout := range vm.Layout.Snapshot { if snapshotLayout.Key == snapshot { return } } var snapshotFiles []string for _, file := range vm.LayoutEx.File { if file.Key == dataKey || file.Type == "diskDescriptor" { snapshotFiles = append(snapshotFiles, file.Name) } } vm.Layout.Snapshot = append(vm.Layout.Snapshot, types.VirtualMachineFileLayoutSnapshotLayout{ Key: snapshot, SnapshotFile: snapshotFiles, }) vm.updateStorage() } func (vm *VirtualMachine) addSnapshotLayoutEx(snapshot types.ManagedObjectReference, dataKey int32, memoryKey int32) { for _, snapshotLayoutEx := range vm.LayoutEx.Snapshot { if snapshotLayoutEx.Key == snapshot { return } } vm.LayoutEx.Snapshot = append(vm.LayoutEx.Snapshot, types.VirtualMachineFileLayoutExSnapshotLayout{ DataKey: dataKey, Disk: vm.LayoutEx.Disk, Key: snapshot, MemoryKey: memoryKey, }) vm.LayoutEx.Timestamp = time.Now() vm.updateStorage() } // Updates both vm.Layout.Disk and vm.LayoutEx.Disk func (vm *VirtualMachine) updateDiskLayouts() types.BaseMethodFault { var disksLayout []types.VirtualMachineFileLayoutDiskLayout var disksLayoutEx []types.VirtualMachineFileLayoutExDiskLayout disks := object.VirtualDeviceList(vm.Config.Hardware.Device).SelectByType((*types.VirtualDisk)(nil)) for _, disk := range disks { disk := disk.(*types.VirtualDisk) diskBacking := disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo) diskLayout := &types.VirtualMachineFileLayoutDiskLayout{Key: disk.Key} diskLayoutEx := &types.VirtualMachineFileLayoutExDiskLayout{Key: disk.Key} // Iterate through disk and its parents for { dFileName := diskBacking.GetVirtualDeviceFileBackingInfo().FileName var fileKeys []int32 // Add disk descriptor and extent files for _, diskName := range vdmNames(dFileName) { // get full path including datastore location p, fault := parseDatastorePath(diskName) if fault != nil { return fault } datastore := vm.useDatastore(p.Datastore) dFilePath := path.Join(datastore.Info.GetDatastoreInfo().Url, p.Path) var fileSize int64 // If file can not be opened - fileSize will be 0 if dFileInfo, err := os.Stat(dFilePath); err == nil { fileSize = dFileInfo.Size() } diskKey := vm.addFileLayoutEx(*p, fileSize) fileKeys = append(fileKeys, diskKey) } diskLayout.DiskFile = append(diskLayout.DiskFile, dFileName) diskLayoutEx.Chain = append(diskLayoutEx.Chain, types.VirtualMachineFileLayoutExDiskUnit{ FileKey: fileKeys, }) if parent := diskBacking.Parent; parent != nil { diskBacking = parent } else { break } } disksLayout = append(disksLayout, *diskLayout) disksLayoutEx = append(disksLayoutEx, *diskLayoutEx) } vm.Layout.Disk = disksLayout vm.LayoutEx.Disk = disksLayoutEx vm.LayoutEx.Timestamp = time.Now() vm.updateStorage() return nil } func (vm *VirtualMachine) updateStorage() types.BaseMethodFault { // Committed - sum of Size for each file in vm.LayoutEx.File // Unshared - sum of Size for each disk (.vmdk) in vm.LayoutEx.File // Uncommitted - disk capacity minus disk usage (only currently used disk) var datastoresUsage []types.VirtualMachineUsageOnDatastore disks := object.VirtualDeviceList(vm.Config.Hardware.Device).SelectByType((*types.VirtualDisk)(nil)) for _, file := range vm.LayoutEx.File { p, fault := parseDatastorePath(file.Name) if fault != nil { return fault } datastore := vm.useDatastore(p.Datastore) dsUsage := &types.VirtualMachineUsageOnDatastore{ Datastore: datastore.Self, } for idx, usage := range datastoresUsage { if usage.Datastore == datastore.Self { datastoresUsage = append(datastoresUsage[:idx], datastoresUsage[idx+1:]...) dsUsage = &usage break } } dsUsage.Committed = file.Size if path.Ext(file.Name) == ".vmdk" { dsUsage.Unshared = file.Size } for _, disk := range disks { disk := disk.(*types.VirtualDisk) backing := disk.Backing.(types.BaseVirtualDeviceFileBackingInfo).GetVirtualDeviceFileBackingInfo() if backing.FileName == file.Name { dsUsage.Uncommitted = disk.CapacityInBytes } } datastoresUsage = append(datastoresUsage, *dsUsage) } vm.Storage.PerDatastoreUsage = datastoresUsage vm.Storage.Timestamp = time.Now() storageSummary := &types.VirtualMachineStorageSummary{ Timestamp: time.Now(), } for _, usage := range datastoresUsage { storageSummary.Committed += usage.Committed storageSummary.Uncommitted += usage.Uncommitted storageSummary.Unshared += usage.Unshared } vm.Summary.Storage = storageSummary return nil } func (vm *VirtualMachine) RefreshStorageInfo(ctx *Context, req *types.RefreshStorageInfo) soap.HasFault { body := new(methods.RefreshStorageInfoBody) if vm.Runtime.Host == nil { // VM not fully created return body } // Validate that all files in vm.LayoutEx.File can still be found for idx := len(vm.LayoutEx.File) - 1; idx >= 0; idx-- { file := vm.LayoutEx.File[idx] p, fault := parseDatastorePath(file.Name) if fault != nil { body.Fault_ = Fault("", fault) return body } if _, err := os.Stat(p.String()); err != nil { vm.LayoutEx.File = append(vm.LayoutEx.File[:idx], vm.LayoutEx.File[idx+1:]...) } } // Directories will be used to locate VM files. // Does not include information about virtual disk file locations. locations := []string{ vm.Config.Files.VmPathName, vm.Config.Files.SnapshotDirectory, vm.Config.Files.LogDirectory, vm.Config.Files.SuspendDirectory, vm.Config.Files.FtMetadataDirectory, } for _, directory := range locations { if directory == "" { continue } p, fault := parseDatastorePath(directory) if fault != nil { body.Fault_ = Fault("", fault) return body } datastore := vm.useDatastore(p.Datastore) directory := path.Join(datastore.Info.GetDatastoreInfo().Url, p.Path) if path.Ext(p.Path) == ".vmx" { directory = path.Dir(directory) // vm.Config.Files.VmPathName can be a directory or full path to .vmx } if _, err := os.Stat(directory); err != nil { // Can not access the directory continue } files, err := ioutil.ReadDir(directory) if err != nil { body.Fault_ = soap.ToSoapFault(err) return body } for _, file := range files { datastorePath := object.DatastorePath{ Datastore: p.Datastore, Path: strings.TrimPrefix(file.Name(), datastore.Info.GetDatastoreInfo().Url), } vm.addFileLayoutEx(datastorePath, file.Size()) } } fault := vm.updateDiskLayouts() if fault != nil { body.Fault_ = Fault("", fault) return body } vm.LayoutEx.Timestamp = time.Now() body.Res = new(types.RefreshStorageInfoResponse) return body } func (vm *VirtualMachine) findDatastore(name string) *Datastore { host := Map.Get(*vm.Runtime.Host).(*HostSystem) return Map.FindByName(name, host.Datastore).(*Datastore) } func (vm *VirtualMachine) useDatastore(name string) *Datastore { ds := vm.findDatastore(name) if FindReference(vm.Datastore, ds.Self) == nil { vm.Datastore = append(vm.Datastore, ds.Self) } return ds } func (vm *VirtualMachine) vmx(spec *types.VirtualMachineConfigSpec) object.DatastorePath { var p object.DatastorePath vmx := vm.Config.Files.VmPathName if spec != nil { vmx = spec.Files.VmPathName } p.FromString(vmx) return p } func (vm *VirtualMachine) createFile(spec string, name string, register bool) (*os.File, types.BaseMethodFault) { p, fault := parseDatastorePath(spec) if fault != nil { return nil, fault } ds := vm.useDatastore(p.Datastore) nhost := len(ds.Host) if ds.Name == "vsanDatastore" && nhost < 3 { fault := new(types.CannotCreateFile) fault.FaultMessage = []types.LocalizableMessage{ { Key: "vob.vsanprovider.object.creation.failed", Message: "Failed to create object.", }, { Key: "vob.vsan.clomd.needMoreFaultDomains2", Message: fmt.Sprintf("There are currently %d usable fault domains. The operation requires %d more usable fault domains.", nhost, 3-nhost), }, } fault.File = p.Path return nil, fault } file := path.Join(ds.Info.GetDatastoreInfo().Url, p.Path) if name != "" { if path.Ext(p.Path) == ".vmx" { file = path.Dir(file) // vm.Config.Files.VmPathName can be a directory or full path to .vmx } file = path.Join(file, name) } if register { f, err := os.Open(filepath.Clean(file)) if err != nil { log.Printf("register %s: %s", vm.Reference(), err) if os.IsNotExist(err) { return nil, &types.NotFound{} } return nil, &types.InvalidArgument{} } return f, nil } _, err := os.Stat(file) if err == nil { fault := &types.FileAlreadyExists{FileFault: types.FileFault{File: file}} log.Printf("%T: %s", fault, file) return nil, fault } // Create parent directory if needed dir := path.Dir(file) _, err = os.Stat(dir) if err != nil { if os.IsNotExist(err) { _ = os.Mkdir(dir, 0700) } } f, err := os.Create(file) if err != nil { log.Printf("create(%s): %s", file, err) return nil, &types.FileFault{ File: file, } } return f, nil } // Rather than keep an fd open for each VM, open/close the log for each messages. // This is ok for now as we do not do any heavy VM logging. func (vm *VirtualMachine) logPrintf(format string, v ...interface{}) { f, err := os.OpenFile(vm.log, os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0) if err != nil { log.Println(err) return } log.New(f, "vmx ", log.Flags()).Printf(format, v...) _ = f.Close() } func (vm *VirtualMachine) create(spec *types.VirtualMachineConfigSpec, register bool) types.BaseMethodFault { vm.apply(spec) if spec.Version != "" { v := strings.TrimPrefix(spec.Version, "vmx-") _, err := strconv.Atoi(v) if err != nil { log.Printf("unsupported hardware version: %s", spec.Version) return new(types.NotSupported) } } files := []struct { spec string name string use *string }{ {vm.Config.Files.VmPathName, "", nil}, {vm.Config.Files.VmPathName, fmt.Sprintf("%s.nvram", vm.Name), nil}, {vm.Config.Files.LogDirectory, "vmware.log", &vm.log}, } for _, file := range files { f, err := vm.createFile(file.spec, file.name, register) if err != nil { return err } if file.use != nil { *file.use = f.Name() } _ = f.Close() } vm.logPrintf("created") return vm.configureDevices(spec) } var vmwOUI = net.HardwareAddr([]byte{0x0, 0xc, 0x29}) // From http://pubs.vmware.com/vsphere-60/index.jsp?topic=%2Fcom.vmware.vsphere.networking.doc%2FGUID-DC7478FF-DC44-4625-9AD7-38208C56A552.html // "The host generates generateMAC addresses that consists of the VMware OUI 00:0C:29 and the last three octets in hexadecimal // format of the virtual machine UUID. The virtual machine UUID is based on a hash calculated by using the UUID of the // ESXi physical machine and the path to the configuration file (.vmx) of the virtual machine." func (vm *VirtualMachine) generateMAC(unit int32) string { id := []byte(vm.Config.Uuid) offset := len(id) - len(vmwOUI) key := id[offset] + byte(unit) // add device unit number, giving each VM NIC a unique MAC id = append([]byte{key}, id[offset+1:]...) mac := append(vmwOUI, id...) return mac.String() } func numberToString(n int64, sep rune) string { buf := &bytes.Buffer{} if n < 0 { n = -n buf.WriteRune('-') } s := strconv.FormatInt(n, 10) pos := 3 - (len(s) % 3) for i := 0; i < len(s); i++ { if pos == 3 { if i != 0 { buf.WriteRune(sep) } pos = 0 } pos++ buf.WriteByte(s[i]) } return buf.String() } func getDiskSize(disk *types.VirtualDisk) int64 { if disk.CapacityInBytes == 0 { return disk.CapacityInKB * 1024 } return disk.CapacityInBytes } func (vm *VirtualMachine) validateSwitchMembers(id string) types.BaseMethodFault { var dswitch *DistributedVirtualSwitch var find func(types.ManagedObjectReference) find = func(child types.ManagedObjectReference) { s, ok := Map.Get(child).(*DistributedVirtualSwitch) if ok && s.Uuid == id { dswitch = s return } walk(Map.Get(child), find) } f := Map.getEntityDatacenter(vm).NetworkFolder walk(Map.Get(f), find) // search in NetworkFolder and any sub folders if dswitch == nil { log.Printf("DVS %s cannot be found", id) return new(types.NotFound) } h := Map.Get(*vm.Runtime.Host).(*HostSystem) c := hostParent(&h.HostSystem) isMember := func(val types.ManagedObjectReference) bool { for _, mem := range dswitch.Summary.HostMember { if mem == val { return true } } log.Printf("%s is not a member of VDS %s", h.Name, dswitch.Name) return false } for _, ref := range c.Host { if !isMember(ref) { return &types.InvalidArgument{InvalidProperty: "spec.deviceChange.device.port.switchUuid"} } } return nil } func (vm *VirtualMachine) configureDevice(devices object.VirtualDeviceList, spec *types.VirtualDeviceConfigSpec) types.BaseMethodFault { device := spec.Device d := device.GetVirtualDevice() var controller types.BaseVirtualController if d.Key <= 0 { // Keys can't be negative; Key 0 is reserved d.Key = devices.NewKey() d.Key *= -1 } // Choose a unique key for { if devices.FindByKey(d.Key) == nil { break } d.Key++ } label := devices.Name(device) summary := label dc := Map.getEntityDatacenter(Map.Get(*vm.Parent).(mo.Entity)) switch x := device.(type) { case types.BaseVirtualEthernetCard: controller = devices.PickController((*types.VirtualPCIController)(nil)) var net types.ManagedObjectReference var name string switch b := d.Backing.(type) { case *types.VirtualEthernetCardNetworkBackingInfo: name = b.DeviceName summary = name net = Map.FindByName(b.DeviceName, dc.Network).Reference() b.Network = &net case *types.VirtualEthernetCardDistributedVirtualPortBackingInfo: summary = fmt.Sprintf("DVSwitch: %s", b.Port.SwitchUuid) net.Type = "DistributedVirtualPortgroup" net.Value = b.Port.PortgroupKey if err := vm.validateSwitchMembers(b.Port.SwitchUuid); err != nil { return err } } Map.Update(vm, []types.PropertyChange{ {Name: "summary.config.numEthernetCards", Val: vm.Summary.Config.NumEthernetCards + 1}, {Name: "network", Val: append(vm.Network, net)}, }) c := x.GetVirtualEthernetCard() if c.MacAddress == "" { if c.UnitNumber == nil { devices.AssignController(device, controller) } c.MacAddress = vm.generateMAC(*c.UnitNumber - 7) // Note 7 == PCI offset } if spec.Operation == types.VirtualDeviceConfigSpecOperationAdd { vm.Guest.Net = append(vm.Guest.Net, types.GuestNicInfo{ Network: name, IpAddress: nil, MacAddress: c.MacAddress, Connected: true, DeviceConfigId: c.Key, }) } case *types.VirtualDisk: summary = fmt.Sprintf("%s KB", numberToString(x.CapacityInKB, ',')) switch b := d.Backing.(type) { case types.BaseVirtualDeviceFileBackingInfo: info := b.GetVirtualDeviceFileBackingInfo() var path object.DatastorePath path.FromString(info.FileName) if path.Path == "" { filename, err := vm.genVmdkPath(path) if err != nil { return err } info.FileName = filename } err := vdmCreateVirtualDisk(spec.FileOperation, &types.CreateVirtualDisk_Task{ Datacenter: &dc.Self, Name: info.FileName, }) if err != nil { return err } Map.Update(vm, []types.PropertyChange{ {Name: "summary.config.numVirtualDisks", Val: vm.Summary.Config.NumVirtualDisks + 1}, }) p, _ := parseDatastorePath(info.FileName) ds := vm.findDatastore(p.Datastore) info.Datastore = &ds.Self // XXX: compare disk size and free space until windows stat is supported Map.WithLock(ds, func() { ds.Summary.FreeSpace -= getDiskSize(x) ds.Info.GetDatastoreInfo().FreeSpace = ds.Summary.FreeSpace }) vm.updateDiskLayouts() if disk, ok := b.(*types.VirtualDiskFlatVer2BackingInfo); ok { // These properties default to false props := []**bool{ &disk.EagerlyScrub, &disk.ThinProvisioned, &disk.WriteThrough, &disk.Split, &disk.DigestEnabled, } for _, prop := range props { if *prop == nil { *prop = types.NewBool(false) } } disk.Uuid = virtualDiskUUID(&dc.Self, info.FileName) } } case *types.VirtualCdrom: if b, ok := d.Backing.(types.BaseVirtualDeviceFileBackingInfo); ok { summary = "ISO " + b.GetVirtualDeviceFileBackingInfo().FileName } case *types.VirtualFloppy: if b, ok := d.Backing.(types.BaseVirtualDeviceFileBackingInfo); ok { summary = "Image " + b.GetVirtualDeviceFileBackingInfo().FileName } case *types.VirtualSerialPort: switch b := d.Backing.(type) { case types.BaseVirtualDeviceFileBackingInfo: summary = "File " + b.GetVirtualDeviceFileBackingInfo().FileName case *types.VirtualSerialPortURIBackingInfo: summary = "Remote " + b.ServiceURI } } if d.UnitNumber == nil && controller != nil { devices.AssignController(device, controller) } if d.DeviceInfo == nil { d.DeviceInfo = &types.Description{ Label: label, Summary: summary, } } else { info := d.DeviceInfo.GetDescription() if info.Label == "" { info.Label = label } if info.Summary == "" { info.Summary = summary } } switch device.(type) { case types.BaseVirtualEthernetCard, *types.VirtualCdrom, *types.VirtualFloppy, *types.VirtualUSB, *types.VirtualSerialPort: if d.Connectable == nil { d.Connectable = &types.VirtualDeviceConnectInfo{StartConnected: true, Connected: true} } } return nil } func (vm *VirtualMachine) removeDevice(devices object.VirtualDeviceList, spec *types.VirtualDeviceConfigSpec) object.VirtualDeviceList { key := spec.Device.GetVirtualDevice().Key for i, d := range devices { if d.GetVirtualDevice().Key != key { continue } devices = append(devices[:i], devices[i+1:]...) switch device := spec.Device.(type) { case *types.VirtualDisk: if spec.FileOperation == types.VirtualDeviceConfigSpecFileOperationDestroy { var file string switch b := device.Backing.(type) { case types.BaseVirtualDeviceFileBackingInfo: file = b.GetVirtualDeviceFileBackingInfo().FileName p, _ := parseDatastorePath(file) ds := vm.findDatastore(p.Datastore) Map.WithLock(ds, func() { ds.Summary.FreeSpace += getDiskSize(device) ds.Info.GetDatastoreInfo().FreeSpace = ds.Summary.FreeSpace }) } if file != "" { dc := Map.getEntityDatacenter(vm) dm := Map.VirtualDiskManager() if dc == nil { continue // parent was destroyed } dm.DeleteVirtualDiskTask(internalContext, &types.DeleteVirtualDisk_Task{ Name: file, Datacenter: &dc.Self, }) } } Map.Update(vm, []types.PropertyChange{ {Name: "summary.config.numVirtualDisks", Val: vm.Summary.Config.NumVirtualDisks - 1}, }) vm.updateDiskLayouts() case types.BaseVirtualEthernetCard: var net types.ManagedObjectReference switch b := device.GetVirtualEthernetCard().Backing.(type) { case *types.VirtualEthernetCardNetworkBackingInfo: net = *b.Network case *types.VirtualEthernetCardDistributedVirtualPortBackingInfo: net.Type = "DistributedVirtualPortgroup" net.Value = b.Port.PortgroupKey } networks := vm.Network RemoveReference(&networks, net) Map.Update(vm, []types.PropertyChange{ {Name: "summary.config.numEthernetCards", Val: vm.Summary.Config.NumEthernetCards - 1}, {Name: "network", Val: networks}, }) } break } return devices } func (vm *VirtualMachine) genVmdkPath(p object.DatastorePath) (string, types.BaseMethodFault) { if p.Datastore == "" { p.FromString(vm.Config.Files.VmPathName) } if p.Path == "" { p.Path = vm.Config.Name } else { p.Path = path.Dir(p.Path) } vmdir := p.String() index := 0 for { var filename string if index == 0 { filename = fmt.Sprintf("%s.vmdk", vm.Config.Name) } else { filename = fmt.Sprintf("%s_%d.vmdk", vm.Config.Name, index) } f, err := vm.createFile(vmdir, filename, false) if err != nil { switch err.(type) { case *types.FileAlreadyExists: index++ continue default: return "", err } } _ = f.Close() _ = os.Remove(f.Name()) return path.Join(vmdir, filename), nil } } func (vm *VirtualMachine) configureDevices(spec *types.VirtualMachineConfigSpec) types.BaseMethodFault { devices := object.VirtualDeviceList(vm.Config.Hardware.Device) for i, change := range spec.DeviceChange { dspec := change.GetVirtualDeviceConfigSpec() device := dspec.Device.GetVirtualDevice() invalid := &types.InvalidDeviceSpec{DeviceIndex: int32(i)} switch dspec.FileOperation { case types.VirtualDeviceConfigSpecFileOperationCreate: switch dspec.Device.(type) { case *types.VirtualDisk: if device.UnitNumber == nil { return invalid } } } switch dspec.Operation { case types.VirtualDeviceConfigSpecOperationAdd: if devices.FindByKey(device.Key) != nil && device.ControllerKey == 0 { // Note: real ESX does not allow adding base controllers (ControllerKey = 0) // after VM is created (returns success but device is not added). continue } else if device.UnitNumber != nil && devices.SelectByType(dspec.Device).Select(func(d types.BaseVirtualDevice) bool { base := d.GetVirtualDevice() if base.UnitNumber != nil { if base.ControllerKey != device.ControllerKey { return false } return *base.UnitNumber == *device.UnitNumber } return false }) != nil { // UnitNumber for this device type is taken return invalid } key := device.Key err := vm.configureDevice(devices, dspec) if err != nil { return err } devices = append(devices, dspec.Device) if key != device.Key { // Update ControllerKey refs for i := range spec.DeviceChange { ckey := &spec.DeviceChange[i].GetVirtualDeviceConfigSpec().Device.GetVirtualDevice().ControllerKey if *ckey == key { *ckey = device.Key } } } case types.VirtualDeviceConfigSpecOperationEdit: rspec := *dspec rspec.Device = devices.FindByKey(device.Key) if rspec.Device == nil { return invalid } devices = vm.removeDevice(devices, &rspec) device.DeviceInfo.GetDescription().Summary = "" // regenerate summary err := vm.configureDevice(devices, dspec) if err != nil { return err } devices = append(devices, dspec.Device) case types.VirtualDeviceConfigSpecOperationRemove: devices = vm.removeDevice(devices, dspec) } } Map.Update(vm, []types.PropertyChange{ {Name: "config.hardware.device", Val: []types.BaseVirtualDevice(devices)}, }) vm.updateDiskLayouts() vm.applyExtraConfig(spec) // Do this after device config, as some may apply to the devices themselves (e.g. ethernet -> guest.net) return nil } type powerVMTask struct { *VirtualMachine state types.VirtualMachinePowerState ctx *Context } func (c *powerVMTask) Run(task *Task) (types.AnyType, types.BaseMethodFault) { c.logPrintf("running power task: requesting %s, existing %s", c.state, c.VirtualMachine.Runtime.PowerState) if c.VirtualMachine.Runtime.PowerState == c.state { return nil, &types.InvalidPowerState{ RequestedState: c.state, ExistingState: c.VirtualMachine.Runtime.PowerState, } } var boot types.AnyType if c.state == types.VirtualMachinePowerStatePoweredOn { boot = time.Now() } event := c.event() switch c.state { case types.VirtualMachinePowerStatePoweredOn: c.run.start(c.VirtualMachine) c.ctx.postEvent( &types.VmStartingEvent{VmEvent: event}, &types.VmPoweredOnEvent{VmEvent: event}, ) c.customize(c.ctx) case types.VirtualMachinePowerStatePoweredOff: c.run.stop(c.VirtualMachine) c.ctx.postEvent( &types.VmStoppingEvent{VmEvent: event}, &types.VmPoweredOffEvent{VmEvent: event}, ) case types.VirtualMachinePowerStateSuspended: if c.VirtualMachine.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOn { return nil, &types.InvalidPowerState{ RequestedState: types.VirtualMachinePowerStatePoweredOn, ExistingState: c.VirtualMachine.Runtime.PowerState, } } c.run.pause(c.VirtualMachine) c.ctx.postEvent( &types.VmSuspendingEvent{VmEvent: event}, &types.VmSuspendedEvent{VmEvent: event}, ) } Map.Update(c.VirtualMachine, []types.PropertyChange{ {Name: "runtime.powerState", Val: c.state}, {Name: "summary.runtime.powerState", Val: c.state}, {Name: "summary.runtime.bootTime", Val: boot}, }) return nil, nil } func (vm *VirtualMachine) PowerOnVMTask(ctx *Context, c *types.PowerOnVM_Task) soap.HasFault { if vm.Config.Template { return &methods.PowerOnVM_TaskBody{ Fault_: Fault("cannot powerOn a template", &types.InvalidState{}), } } runner := &powerVMTask{vm, types.VirtualMachinePowerStatePoweredOn, ctx} task := CreateTask(runner.Reference(), "powerOn", runner.Run) return &methods.PowerOnVM_TaskBody{ Res: &types.PowerOnVM_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) PowerOffVMTask(ctx *Context, c *types.PowerOffVM_Task) soap.HasFault { runner := &powerVMTask{vm, types.VirtualMachinePowerStatePoweredOff, ctx} task := CreateTask(runner.Reference(), "powerOff", runner.Run) return &methods.PowerOffVM_TaskBody{ Res: &types.PowerOffVM_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) SuspendVMTask(ctx *Context, req *types.SuspendVM_Task) soap.HasFault { runner := &powerVMTask{vm, types.VirtualMachinePowerStateSuspended, ctx} task := CreateTask(runner.Reference(), "suspend", runner.Run) return &methods.SuspendVM_TaskBody{ Res: &types.SuspendVM_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) ResetVMTask(ctx *Context, req *types.ResetVM_Task) soap.HasFault { task := CreateTask(vm, "reset", func(task *Task) (types.AnyType, types.BaseMethodFault) { res := vm.PowerOffVMTask(ctx, &types.PowerOffVM_Task{This: vm.Self}) ctask := Map.Get(res.(*methods.PowerOffVM_TaskBody).Res.Returnval).(*Task) if ctask.Info.Error != nil { return nil, ctask.Info.Error.Fault } _ = vm.PowerOnVMTask(ctx, &types.PowerOnVM_Task{This: vm.Self}) return nil, nil }) return &methods.ResetVM_TaskBody{ Res: &types.ResetVM_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) RebootGuest(ctx *Context, req *types.RebootGuest) soap.HasFault { body := new(methods.RebootGuestBody) if vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOn { body.Fault_ = Fault("", &types.InvalidPowerState{ RequestedState: types.VirtualMachinePowerStatePoweredOn, ExistingState: vm.Runtime.PowerState, }) return body } body.Fault_ = Fault("", new(types.ToolsUnavailable)) return body } func (vm *VirtualMachine) ReconfigVMTask(ctx *Context, req *types.ReconfigVM_Task) soap.HasFault { task := CreateTask(vm, "reconfigVm", func(t *Task) (types.AnyType, types.BaseMethodFault) { ctx.postEvent(&types.VmReconfiguredEvent{ VmEvent: vm.event(), ConfigSpec: req.Spec, }) if vm.Config.Template { expect := types.VirtualMachineConfigSpec{ Name: req.Spec.Name, Annotation: req.Spec.Annotation, } if !reflect.DeepEqual(&req.Spec, &expect) { log.Printf("template reconfigure only allows name and annotation change") return nil, new(types.NotSupported) } } err := vm.configure(&req.Spec) return nil, err }) return &methods.ReconfigVM_TaskBody{ Res: &types.ReconfigVM_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) UpgradeVMTask(req *types.UpgradeVM_Task) soap.HasFault { body := &methods.UpgradeVM_TaskBody{} task := CreateTask(vm, "upgradeVm", func(t *Task) (types.AnyType, types.BaseMethodFault) { if vm.Config.Version != esx.HardwareVersion { Map.Update(vm, []types.PropertyChange{{ Name: "config.version", Val: esx.HardwareVersion, }}) } return nil, nil }) body.Res = &types.UpgradeVM_TaskResponse{ Returnval: task.Run(), } return body } func (vm *VirtualMachine) DestroyTask(ctx *Context, req *types.Destroy_Task) soap.HasFault { dc := ctx.Map.getEntityDatacenter(vm) task := CreateTask(vm, "destroy", func(t *Task) (types.AnyType, types.BaseMethodFault) { if dc == nil { return nil, &types.ManagedObjectNotFound{Obj: vm.Self} // If our Parent was destroyed, so were we. } r := vm.UnregisterVM(ctx, &types.UnregisterVM{ This: req.This, }) if r.Fault() != nil { return nil, r.Fault().VimFault().(types.BaseMethodFault) } // Remove all devices devices := object.VirtualDeviceList(vm.Config.Hardware.Device) spec, _ := devices.ConfigSpec(types.VirtualDeviceConfigSpecOperationRemove) vm.configureDevices(&types.VirtualMachineConfigSpec{DeviceChange: spec}) // Delete VM files from the datastore (ignoring result for now) m := Map.FileManager() _ = m.DeleteDatastoreFileTask(&types.DeleteDatastoreFile_Task{ This: m.Reference(), Name: vm.Config.Files.LogDirectory, Datacenter: &dc.Self, }) vm.run.remove(vm) return nil, nil }) return &methods.Destroy_TaskBody{ Res: &types.Destroy_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) SetCustomValue(ctx *Context, req *types.SetCustomValue) soap.HasFault { return SetCustomValue(ctx, req) } func (vm *VirtualMachine) UnregisterVM(ctx *Context, c *types.UnregisterVM) soap.HasFault { r := &methods.UnregisterVMBody{} if vm.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn { r.Fault_ = Fault("", &types.InvalidPowerState{ RequestedState: types.VirtualMachinePowerStatePoweredOff, ExistingState: vm.Runtime.PowerState, }) return r } host := Map.Get(*vm.Runtime.Host).(*HostSystem) Map.RemoveReference(host, &host.Vm, vm.Self) if vm.ResourcePool != nil { switch pool := Map.Get(*vm.ResourcePool).(type) { case *ResourcePool: Map.RemoveReference(pool, &pool.Vm, vm.Self) case *VirtualApp: Map.RemoveReference(pool, &pool.Vm, vm.Self) } } for i := range vm.Datastore { ds := Map.Get(vm.Datastore[i]).(*Datastore) Map.RemoveReference(ds, &ds.Vm, vm.Self) } ctx.postEvent(&types.VmRemovedEvent{VmEvent: vm.event()}) if f, ok := asFolderMO(Map.getEntityParent(vm, "Folder")); ok { folderRemoveChild(ctx, f, c.This) } r.Res = new(types.UnregisterVMResponse) return r } type vmFolder interface { CreateVMTask(ctx *Context, c *types.CreateVM_Task) soap.HasFault } func (vm *VirtualMachine) CloneVMTask(ctx *Context, req *types.CloneVM_Task) soap.HasFault { pool := req.Spec.Location.Pool if pool == nil { if !vm.Config.Template { pool = vm.ResourcePool } } folder, _ := asFolderMO(Map.Get(req.Folder)) host := Map.Get(*vm.Runtime.Host).(*HostSystem) event := vm.event() ctx.postEvent(&types.VmBeingClonedEvent{ VmCloneEvent: types.VmCloneEvent{ VmEvent: event, }, DestFolder: folderEventArgument(folder), DestName: req.Name, DestHost: *host.eventArgument(), }) vmx := vm.vmx(nil) vmx.Path = req.Name if ref := req.Spec.Location.Datastore; ref != nil { ds := Map.Get(*ref).(*Datastore).Name vmx.Datastore = ds } task := CreateTask(vm, "cloneVm", func(t *Task) (types.AnyType, types.BaseMethodFault) { if pool == nil { return nil, &types.InvalidArgument{InvalidProperty: "spec.location.pool"} } config := types.VirtualMachineConfigSpec{ Name: req.Name, GuestId: vm.Config.GuestId, Files: &types.VirtualMachineFileInfo{ VmPathName: vmx.String(), }, } if req.Spec.Config != nil { config.ExtraConfig = req.Spec.Config.ExtraConfig config.InstanceUuid = req.Spec.Config.InstanceUuid } // Copying hardware properties config.NumCPUs = vm.Config.Hardware.NumCPU config.MemoryMB = int64(vm.Config.Hardware.MemoryMB) config.NumCoresPerSocket = vm.Config.Hardware.NumCoresPerSocket config.VirtualICH7MPresent = vm.Config.Hardware.VirtualICH7MPresent config.VirtualSMCPresent = vm.Config.Hardware.VirtualSMCPresent defaultDevices := object.VirtualDeviceList(esx.VirtualDevice) devices := vm.Config.Hardware.Device for _, device := range devices { var fop types.VirtualDeviceConfigSpecFileOperation if defaultDevices.Find(object.VirtualDeviceList(devices).Name(device)) != nil { // Default devices are added during CreateVMTask continue } switch disk := device.(type) { case *types.VirtualDisk: // TODO: consider VirtualMachineCloneSpec.DiskMoveType fop = types.VirtualDeviceConfigSpecFileOperationCreate // Leave FileName empty so CreateVM will just create a new one under VmPathName disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo).FileName = "" disk.Backing.(*types.VirtualDiskFlatVer2BackingInfo).Parent = nil } config.DeviceChange = append(config.DeviceChange, &types.VirtualDeviceConfigSpec{ Operation: types.VirtualDeviceConfigSpecOperationAdd, Device: device, FileOperation: fop, }) } res := Map.Get(req.Folder).(vmFolder).CreateVMTask(ctx, &types.CreateVM_Task{ This: folder.Self, Config: config, Pool: *pool, Host: vm.Runtime.Host, }) ctask := Map.Get(res.(*methods.CreateVM_TaskBody).Res.Returnval).(*Task) if ctask.Info.Error != nil { return nil, ctask.Info.Error.Fault } ref := ctask.Info.Result.(types.ManagedObjectReference) clone := Map.Get(ref).(*VirtualMachine) clone.configureDevices(&types.VirtualMachineConfigSpec{DeviceChange: req.Spec.Location.DeviceChange}) if req.Spec.Config != nil && req.Spec.Config.DeviceChange != nil { clone.configureDevices(&types.VirtualMachineConfigSpec{DeviceChange: req.Spec.Config.DeviceChange}) } if req.Spec.Template { _ = clone.MarkAsTemplate(&types.MarkAsTemplate{This: clone.Self}) } ctx.postEvent(&types.VmClonedEvent{ VmCloneEvent: types.VmCloneEvent{VmEvent: clone.event()}, SourceVm: *event.Vm, }) return ref, nil }) return &methods.CloneVM_TaskBody{ Res: &types.CloneVM_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) RelocateVMTask(ctx *Context, req *types.RelocateVM_Task) soap.HasFault { task := CreateTask(vm, "relocateVm", func(t *Task) (types.AnyType, types.BaseMethodFault) { var changes []types.PropertyChange if ref := req.Spec.Datastore; ref != nil { ds := Map.Get(*ref).(*Datastore) Map.RemoveReference(ds, &ds.Vm, *ref) // TODO: migrate vm.Config.Files, vm.Summary.Config.VmPathName, vm.Layout and vm.LayoutEx changes = append(changes, types.PropertyChange{Name: "datastore", Val: []types.ManagedObjectReference{*ref}}) } if ref := req.Spec.Pool; ref != nil { pool := Map.Get(*ref).(*ResourcePool) Map.RemoveReference(pool, &pool.Vm, *ref) changes = append(changes, types.PropertyChange{Name: "resourcePool", Val: ref}) } if ref := req.Spec.Host; ref != nil { host := Map.Get(*ref).(*HostSystem) Map.RemoveReference(host, &host.Vm, *ref) changes = append(changes, types.PropertyChange{Name: "runtime.host", Val: ref}, types.PropertyChange{Name: "summary.runtime.host", Val: ref}, ) } if ref := req.Spec.Folder; ref != nil { folder := Map.Get(*ref).(*Folder) folder.MoveIntoFolderTask(ctx, &types.MoveIntoFolder_Task{ List: []types.ManagedObjectReference{vm.Self}, }) } Map.Update(vm, changes) return nil, nil }) return &methods.RelocateVM_TaskBody{ Res: &types.RelocateVM_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) customize(ctx *Context) { if vm.imc == nil { return } event := types.CustomizationEvent{VmEvent: vm.event()} ctx.postEvent(&types.CustomizationStartedEvent{CustomizationEvent: event}) changes := []types.PropertyChange{ {Name: "config.tools.pendingCustomization", Val: ""}, } hostname := "" address := "" switch c := vm.imc.Identity.(type) { case *types.CustomizationLinuxPrep: hostname = customizeName(vm, c.HostName) case *types.CustomizationSysprep: hostname = customizeName(vm, c.UserData.ComputerName) } for i, s := range vm.imc.NicSettingMap { nic := &vm.Guest.Net[i] if s.MacAddress != "" { nic.MacAddress = s.MacAddress } if nic.DnsConfig == nil { nic.DnsConfig = new(types.NetDnsConfigInfo) } if s.Adapter.DnsDomain != "" { nic.DnsConfig.DomainName = s.Adapter.DnsDomain } if len(s.Adapter.DnsServerList) != 0 { nic.DnsConfig.IpAddress = s.Adapter.DnsServerList } if hostname != "" { nic.DnsConfig.HostName = hostname } if len(vm.imc.GlobalIPSettings.DnsSuffixList) != 0 { nic.DnsConfig.SearchDomain = vm.imc.GlobalIPSettings.DnsSuffixList } if nic.IpConfig == nil { nic.IpConfig = new(types.NetIpConfigInfo) } switch ip := s.Adapter.Ip.(type) { case *types.CustomizationCustomIpGenerator: case *types.CustomizationDhcpIpGenerator: case *types.CustomizationFixedIp: if address == "" { address = ip.IpAddress } nic.IpAddress = []string{ip.IpAddress} nic.IpConfig.IpAddress = []types.NetIpConfigInfoIpAddress{{ IpAddress: ip.IpAddress, }} case *types.CustomizationUnknownIpGenerator: } } if len(vm.imc.NicSettingMap) != 0 { changes = append(changes, types.PropertyChange{Name: "guest.net", Val: vm.Guest.Net}) } if hostname != "" { changes = append(changes, types.PropertyChange{Name: "guest.hostName", Val: hostname}) } if address != "" { changes = append(changes, types.PropertyChange{Name: "guest.ipAddress", Val: address}) } vm.imc = nil Map.Update(vm, changes) ctx.postEvent(&types.CustomizationSucceeded{CustomizationEvent: event}) } func (vm *VirtualMachine) CustomizeVMTask(req *types.CustomizeVM_Task) soap.HasFault { task := CreateTask(vm, "customizeVm", func(t *Task) (types.AnyType, types.BaseMethodFault) { if vm.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOn { return nil, &types.InvalidPowerState{ RequestedState: types.VirtualMachinePowerStatePoweredOff, ExistingState: vm.Runtime.PowerState, } } if vm.Config.Tools.PendingCustomization != "" { return nil, new(types.CustomizationPending) } if len(vm.Guest.Net) != len(req.Spec.NicSettingMap) { return nil, &types.NicSettingMismatch{ NumberOfNicsInSpec: int32(len(req.Spec.NicSettingMap)), NumberOfNicsInVM: int32(len(vm.Guest.Net)), } } vm.imc = &req.Spec vm.Config.Tools.PendingCustomization = uuid.New().String() return nil, nil }) return &methods.CustomizeVM_TaskBody{ Res: &types.CustomizeVM_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) CreateSnapshotTask(req *types.CreateSnapshot_Task) soap.HasFault { task := CreateTask(vm, "createSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) { var changes []types.PropertyChange if vm.Snapshot == nil { vm.Snapshot = &types.VirtualMachineSnapshotInfo{} } snapshot := &VirtualMachineSnapshot{} snapshot.Vm = vm.Reference() snapshot.Config = *vm.Config Map.Put(snapshot) treeItem := types.VirtualMachineSnapshotTree{ Snapshot: snapshot.Self, Vm: snapshot.Vm, Name: req.Name, Description: req.Description, Id: atomic.AddInt32(&vm.sid, 1), CreateTime: time.Now(), State: vm.Runtime.PowerState, Quiesced: req.Quiesce, BackupManifest: "", ReplaySupported: types.NewBool(false), } cur := vm.Snapshot.CurrentSnapshot if cur != nil { parent := Map.Get(*cur).(*VirtualMachineSnapshot) parent.ChildSnapshot = append(parent.ChildSnapshot, snapshot.Self) ss := findSnapshotInTree(vm.Snapshot.RootSnapshotList, *cur) ss.ChildSnapshotList = append(ss.ChildSnapshotList, treeItem) } else { changes = append(changes, types.PropertyChange{ Name: "snapshot.rootSnapshotList", Val: append(vm.Snapshot.RootSnapshotList, treeItem), }) } snapshot.createSnapshotFiles() changes = append(changes, types.PropertyChange{Name: "snapshot.currentSnapshot", Val: snapshot.Self}) Map.Update(vm, changes) return snapshot.Self, nil }) return &methods.CreateSnapshot_TaskBody{ Res: &types.CreateSnapshot_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) RevertToCurrentSnapshotTask(req *types.RevertToCurrentSnapshot_Task) soap.HasFault { body := &methods.RevertToCurrentSnapshot_TaskBody{} if vm.Snapshot == nil || vm.Snapshot.CurrentSnapshot == nil { body.Fault_ = Fault("snapshot not found", &types.NotFound{}) return body } task := CreateTask(vm, "revertSnapshot", func(t *Task) (types.AnyType, types.BaseMethodFault) { return nil, nil }) body.Res = &types.RevertToCurrentSnapshot_TaskResponse{ Returnval: task.Run(), } return body } func (vm *VirtualMachine) RemoveAllSnapshotsTask(ctx *Context, req *types.RemoveAllSnapshots_Task) soap.HasFault { task := CreateTask(vm, "RemoveAllSnapshots", func(t *Task) (types.AnyType, types.BaseMethodFault) { if vm.Snapshot == nil { return nil, nil } refs := allSnapshotsInTree(vm.Snapshot.RootSnapshotList) Map.Update(vm, []types.PropertyChange{ {Name: "snapshot", Val: nil}, }) for _, ref := range refs { Map.Get(ref).(*VirtualMachineSnapshot).removeSnapshotFiles(ctx) Map.Remove(ref) } return nil, nil }) return &methods.RemoveAllSnapshots_TaskBody{ Res: &types.RemoveAllSnapshots_TaskResponse{ Returnval: task.Run(), }, } } func (vm *VirtualMachine) ShutdownGuest(ctx *Context, c *types.ShutdownGuest) soap.HasFault { r := &methods.ShutdownGuestBody{} // should be poweron if vm.Runtime.PowerState == types.VirtualMachinePowerStatePoweredOff { r.Fault_ = Fault("", &types.InvalidPowerState{ RequestedState: types.VirtualMachinePowerStatePoweredOn, ExistingState: vm.Runtime.PowerState, }) return r } // change state vm.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff vm.Summary.Runtime.PowerState = types.VirtualMachinePowerStatePoweredOff event := vm.event() ctx.postEvent( &types.VmGuestShutdownEvent{VmEvent: event}, &types.VmPoweredOffEvent{VmEvent: event}, ) vm.run.stop(vm) Map.Update(vm, []types.PropertyChange{ {Name: "runtime.powerState", Val: types.VirtualMachinePowerStatePoweredOff}, {Name: "summary.runtime.powerState", Val: types.VirtualMachinePowerStatePoweredOff}, }) r.Res = new(types.ShutdownGuestResponse) return r } func (vm *VirtualMachine) MarkAsTemplate(req *types.MarkAsTemplate) soap.HasFault { r := &methods.MarkAsTemplateBody{} if vm.Config.Template { r.Fault_ = Fault("", new(types.NotSupported)) return r } if vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOff { r.Fault_ = Fault("", &types.InvalidPowerState{ RequestedState: types.VirtualMachinePowerStatePoweredOff, ExistingState: vm.Runtime.PowerState, }) return r } vm.Config.Template = true vm.Summary.Config.Template = true vm.ResourcePool = nil r.Res = new(types.MarkAsTemplateResponse) return r } func (vm *VirtualMachine) MarkAsVirtualMachine(req *types.MarkAsVirtualMachine) soap.HasFault { r := &methods.MarkAsVirtualMachineBody{} if !vm.Config.Template { r.Fault_ = Fault("", new(types.NotSupported)) return r } if vm.Runtime.PowerState != types.VirtualMachinePowerStatePoweredOff { r.Fault_ = Fault("", &types.InvalidPowerState{ RequestedState: types.VirtualMachinePowerStatePoweredOff, ExistingState: vm.Runtime.PowerState, }) return r } vm.Config.Template = false vm.Summary.Config.Template = false vm.ResourcePool = &req.Pool if req.Host != nil { vm.Runtime.Host = req.Host } r.Res = new(types.MarkAsVirtualMachineResponse) return r } func findSnapshotInTree(tree []types.VirtualMachineSnapshotTree, ref types.ManagedObjectReference) *types.VirtualMachineSnapshotTree { if tree == nil { return nil } for i, ss := range tree { if ss.Snapshot == ref { return &tree[i] } target := findSnapshotInTree(ss.ChildSnapshotList, ref) if target != nil { return target } } return nil } func findParentSnapshot(tree types.VirtualMachineSnapshotTree, ref types.ManagedObjectReference) *types.ManagedObjectReference { for _, ss := range tree.ChildSnapshotList { if ss.Snapshot == ref { return &tree.Snapshot } res := findParentSnapshot(ss, ref) if res != nil { return res } } return nil } func findParentSnapshotInTree(tree []types.VirtualMachineSnapshotTree, ref types.ManagedObjectReference) *types.ManagedObjectReference { if tree == nil { return nil } for _, ss := range tree { res := findParentSnapshot(ss, ref) if res != nil { return res } } return nil } func removeSnapshotInTree(tree []types.VirtualMachineSnapshotTree, ref types.ManagedObjectReference, removeChildren bool) []types.VirtualMachineSnapshotTree { if tree == nil { return tree } var result []types.VirtualMachineSnapshotTree for _, ss := range tree { if ss.Snapshot == ref { if !removeChildren { result = append(result, ss.ChildSnapshotList...) } } else { ss.ChildSnapshotList = removeSnapshotInTree(ss.ChildSnapshotList, ref, removeChildren) result = append(result, ss) } } return result } func allSnapshotsInTree(tree []types.VirtualMachineSnapshotTree) []types.ManagedObjectReference { var result []types.ManagedObjectReference if tree == nil { return result } for _, ss := range tree { result = append(result, ss.Snapshot) result = append(result, allSnapshotsInTree(ss.ChildSnapshotList)...) } return result }
validateGuestID
cereal.pb.go
// Code generated by protoc-gen-go. DO NOT EDIT. // versions: // protoc-gen-go v1.25.0 // protoc v3.12.3 // source: interservice/cereal/cereal.proto package cereal import ( context "context" proto "github.com/golang/protobuf/proto" timestamp "github.com/golang/protobuf/ptypes/timestamp" wrappers "github.com/golang/protobuf/ptypes/wrappers" grpc "google.golang.org/grpc" codes "google.golang.org/grpc/codes" status "google.golang.org/grpc/status" protoreflect "google.golang.org/protobuf/reflect/protoreflect" protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" ) const ( // Verify that this generated code is sufficiently up-to-date. _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) // Verify that runtime/protoimpl is sufficiently up-to-date. _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) ) // This is a compile-time assertion that a sufficiently up-to-date version // of the legacy proto package is being used. const _ = proto.ProtoPackageIsVersion4 type Task struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty" toml:"name,omitempty" mapstructure:"name,omitempty"` Parameters []byte `protobuf:"bytes,2,opt,name=parameters,proto3" json:"parameters,omitempty" toml:"parameters,omitempty" mapstructure:"parameters,omitempty"` StartAfter *timestamp.Timestamp `protobuf:"bytes,3,opt,name=start_after,json=startAfter,proto3" json:"start_after,omitempty" toml:"start_after,omitempty" mapstructure:"start_after,omitempty"` Metadata *TaskMetadata `protobuf:"bytes,4,opt,name=metadata,proto3" json:"metadata,omitempty" toml:"metadata,omitempty" mapstructure:"metadata,omitempty"` } func (x *Task) Reset() { *x = Task{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[0] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Task) String() string { return protoimpl.X.MessageStringOf(x) } func (*Task) ProtoMessage() {} func (x *Task) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[0] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Task.ProtoReflect.Descriptor instead. func (*Task) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{0} } func (x *Task) GetName() string { if x != nil { return x.Name } return "" } func (x *Task) GetParameters() []byte { if x != nil { return x.Parameters } return nil } func (x *Task) GetStartAfter() *timestamp.Timestamp { if x != nil { return x.StartAfter } return nil } func (x *Task) GetMetadata() *TaskMetadata { if x != nil { return x.Metadata } return nil } type TaskMetadata struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields EnqueuedAt *timestamp.Timestamp `protobuf:"bytes,1,opt,name=enqueued_at,json=enqueuedAt,proto3" json:"enqueued_at,omitempty" toml:"enqueued_at,omitempty" mapstructure:"enqueued_at,omitempty"` } func (x *TaskMetadata) Reset() { *x = TaskMetadata{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[1] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *TaskMetadata) String() string { return protoimpl.X.MessageStringOf(x) } func (*TaskMetadata) ProtoMessage() {} func (x *TaskMetadata) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[1] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use TaskMetadata.ProtoReflect.Descriptor instead. func (*TaskMetadata) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{1} } func (x *TaskMetadata) GetEnqueuedAt() *timestamp.Timestamp { if x != nil { return x.EnqueuedAt } return nil } type WorkflowInstance struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty" toml:"instance_name,omitempty" mapstructure:"instance_name,omitempty"` WorkflowName string `protobuf:"bytes,3,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty" toml:"workflow_name,omitempty" mapstructure:"workflow_name,omitempty"` Status string `protobuf:"bytes,4,opt,name=status,proto3" json:"status,omitempty" toml:"status,omitempty" mapstructure:"status,omitempty"` Parameters []byte `protobuf:"bytes,5,opt,name=parameters,proto3" json:"parameters,omitempty" toml:"parameters,omitempty" mapstructure:"parameters,omitempty"` Payload []byte `protobuf:"bytes,6,opt,name=payload,proto3" json:"payload,omitempty" toml:"payload,omitempty" mapstructure:"payload,omitempty"` Result []byte `protobuf:"bytes,7,opt,name=result,proto3" json:"result,omitempty" toml:"result,omitempty" mapstructure:"result,omitempty"` Err string `protobuf:"bytes,8,opt,name=err,proto3" json:"err,omitempty" toml:"err,omitempty" mapstructure:"err,omitempty"` } func (x *WorkflowInstance) Reset() { *x = WorkflowInstance{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[2] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *WorkflowInstance) String() string { return protoimpl.X.MessageStringOf(x) } func (*WorkflowInstance) ProtoMessage() {} func (x *WorkflowInstance) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[2] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use WorkflowInstance.ProtoReflect.Descriptor instead. func (*WorkflowInstance) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{2} } func (x *WorkflowInstance) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *WorkflowInstance) GetInstanceName() string { if x != nil { return x.InstanceName } return "" } func (x *WorkflowInstance) GetWorkflowName() string { if x != nil { return x.WorkflowName } return "" } func (x *WorkflowInstance) GetStatus() string { if x != nil { return x.Status } return "" } func (x *WorkflowInstance) GetParameters() []byte { if x != nil { return x.Parameters } return nil } func (x *WorkflowInstance) GetPayload() []byte { if x != nil { return x.Payload } return nil } func (x *WorkflowInstance) GetResult() []byte { if x != nil { return x.Result } return nil } func (x *WorkflowInstance) GetErr() string { if x != nil { return x.Err } return "" } type TaskResult struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields TaskName string `protobuf:"bytes,1,opt,name=task_name,json=taskName,proto3" json:"task_name,omitempty" toml:"task_name,omitempty" mapstructure:"task_name,omitempty"` Parameters []byte `protobuf:"bytes,2,opt,name=parameters,proto3" json:"parameters,omitempty" toml:"parameters,omitempty" mapstructure:"parameters,omitempty"` Status string `protobuf:"bytes,3,opt,name=status,proto3" json:"status,omitempty" toml:"status,omitempty" mapstructure:"status,omitempty"` ErrorText string `protobuf:"bytes,4,opt,name=error_text,json=errorText,proto3" json:"error_text,omitempty" toml:"error_text,omitempty" mapstructure:"error_text,omitempty"` Result []byte `protobuf:"bytes,5,opt,name=result,proto3" json:"result,omitempty" toml:"result,omitempty" mapstructure:"result,omitempty"` } func (x *TaskResult) Reset() { *x = TaskResult{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[3] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *TaskResult) String() string { return protoimpl.X.MessageStringOf(x) } func (*TaskResult) ProtoMessage() {} func (x *TaskResult) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[3] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use TaskResult.ProtoReflect.Descriptor instead. func (*TaskResult) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{3} } func (x *TaskResult) GetTaskName() string { if x != nil { return x.TaskName } return "" } func (x *TaskResult) GetParameters() []byte { if x != nil { return x.Parameters } return nil } func (x *TaskResult) GetStatus() string { if x != nil { return x.Status } return "" } func (x *TaskResult) GetErrorText() string { if x != nil { return x.ErrorText } return "" } func (x *TaskResult) GetResult() []byte { if x != nil { return x.Result } return nil } type WorkflowEvent struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty" toml:"type,omitempty" mapstructure:"type,omitempty"` EnqueuedTaskCount int64 `protobuf:"varint,2,opt,name=enqueued_task_count,json=enqueuedTaskCount,proto3" json:"enqueued_task_count,omitempty" toml:"enqueued_task_count,omitempty" mapstructure:"enqueued_task_count,omitempty"` CompletedTaskCount int64 `protobuf:"varint,3,opt,name=completed_task_count,json=completedTaskCount,proto3" json:"completed_task_count,omitempty" toml:"completed_task_count,omitempty" mapstructure:"completed_task_count,omitempty"` TaskResult *TaskResult `protobuf:"bytes,4,opt,name=task_result,json=taskResult,proto3" json:"task_result,omitempty" toml:"task_result,omitempty" mapstructure:"task_result,omitempty"` EnqueuedAt *timestamp.Timestamp `protobuf:"bytes,5,opt,name=enqueued_at,json=enqueuedAt,proto3" json:"enqueued_at,omitempty" toml:"enqueued_at,omitempty" mapstructure:"enqueued_at,omitempty"` } func (x *WorkflowEvent) Reset() { *x = WorkflowEvent{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[4] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *WorkflowEvent) String() string { return protoimpl.X.MessageStringOf(x) } func (*WorkflowEvent) ProtoMessage() {} func (x *WorkflowEvent) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[4] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use WorkflowEvent.ProtoReflect.Descriptor instead. func (*WorkflowEvent) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{4} } func (x *WorkflowEvent) GetType() string { if x != nil { return x.Type } return "" } func (x *WorkflowEvent) GetEnqueuedTaskCount() int64 { if x != nil { return x.EnqueuedTaskCount } return 0 } func (x *WorkflowEvent) GetCompletedTaskCount() int64 { if x != nil { return x.CompletedTaskCount } return 0 } func (x *WorkflowEvent) GetTaskResult() *TaskResult { if x != nil { return x.TaskResult } return nil } func (x *WorkflowEvent) GetEnqueuedAt() *timestamp.Timestamp { if x != nil { return x.EnqueuedAt } return nil } type DequeueWorkflowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to Cmd: // *DequeueWorkflowRequest_Dequeue_ // *DequeueWorkflowRequest_Continue_ // *DequeueWorkflowRequest_Done_ // *DequeueWorkflowRequest_Fail_ Cmd isDequeueWorkflowRequest_Cmd `protobuf_oneof:"cmd"` } func (x *DequeueWorkflowRequest) Reset() { *x = DequeueWorkflowRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[5] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowRequest) ProtoMessage() {} func (x *DequeueWorkflowRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[5] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowRequest.ProtoReflect.Descriptor instead. func (*DequeueWorkflowRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{5} } func (m *DequeueWorkflowRequest) GetCmd() isDequeueWorkflowRequest_Cmd { if m != nil { return m.Cmd } return nil } func (x *DequeueWorkflowRequest) GetDequeue() *DequeueWorkflowRequest_Dequeue { if x, ok := x.GetCmd().(*DequeueWorkflowRequest_Dequeue_); ok { return x.Dequeue } return nil } func (x *DequeueWorkflowRequest) GetContinue() *DequeueWorkflowRequest_Continue { if x, ok := x.GetCmd().(*DequeueWorkflowRequest_Continue_); ok { return x.Continue } return nil } func (x *DequeueWorkflowRequest) GetDone() *DequeueWorkflowRequest_Done { if x, ok := x.GetCmd().(*DequeueWorkflowRequest_Done_); ok { return x.Done } return nil } func (x *DequeueWorkflowRequest) GetFail() *DequeueWorkflowRequest_Fail { if x, ok := x.GetCmd().(*DequeueWorkflowRequest_Fail_); ok { return x.Fail } return nil } type isDequeueWorkflowRequest_Cmd interface { isDequeueWorkflowRequest_Cmd() } type DequeueWorkflowRequest_Dequeue_ struct { Dequeue *DequeueWorkflowRequest_Dequeue `protobuf:"bytes,1,opt,name=dequeue,proto3,oneof"` } type DequeueWorkflowRequest_Continue_ struct { Continue *DequeueWorkflowRequest_Continue `protobuf:"bytes,2,opt,name=continue,proto3,oneof"` } type DequeueWorkflowRequest_Done_ struct { Done *DequeueWorkflowRequest_Done `protobuf:"bytes,3,opt,name=done,proto3,oneof"` } type DequeueWorkflowRequest_Fail_ struct { Fail *DequeueWorkflowRequest_Fail `protobuf:"bytes,4,opt,name=fail,proto3,oneof"` } func (*DequeueWorkflowRequest_Dequeue_) isDequeueWorkflowRequest_Cmd() {} func (*DequeueWorkflowRequest_Continue_) isDequeueWorkflowRequest_Cmd() {} func (*DequeueWorkflowRequest_Done_) isDequeueWorkflowRequest_Cmd() {} func (*DequeueWorkflowRequest_Fail_) isDequeueWorkflowRequest_Cmd() {} type DequeueWorkflowChunkRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to Cmd: // *DequeueWorkflowChunkRequest_Dequeue_ // *DequeueWorkflowChunkRequest_Continue_ // *DequeueWorkflowChunkRequest_Done_ // *DequeueWorkflowChunkRequest_Fail_ Cmd isDequeueWorkflowChunkRequest_Cmd `protobuf_oneof:"cmd"` } func (x *DequeueWorkflowChunkRequest) Reset() { *x = DequeueWorkflowChunkRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[6] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowChunkRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowChunkRequest) ProtoMessage() {} func (x *DequeueWorkflowChunkRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[6] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowChunkRequest.ProtoReflect.Descriptor instead. func (*DequeueWorkflowChunkRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{6} } func (m *DequeueWorkflowChunkRequest) GetCmd() isDequeueWorkflowChunkRequest_Cmd { if m != nil { return m.Cmd } return nil } func (x *DequeueWorkflowChunkRequest) GetDequeue() *DequeueWorkflowChunkRequest_Dequeue { if x, ok := x.GetCmd().(*DequeueWorkflowChunkRequest_Dequeue_); ok { return x.Dequeue } return nil } func (x *DequeueWorkflowChunkRequest) GetContinue() *DequeueWorkflowChunkRequest_Continue { if x, ok := x.GetCmd().(*DequeueWorkflowChunkRequest_Continue_); ok { return x.Continue } return nil } func (x *DequeueWorkflowChunkRequest) GetDone() *DequeueWorkflowChunkRequest_Done { if x, ok := x.GetCmd().(*DequeueWorkflowChunkRequest_Done_); ok { return x.Done } return nil } func (x *DequeueWorkflowChunkRequest) GetFail() *DequeueWorkflowChunkRequest_Fail { if x, ok := x.GetCmd().(*DequeueWorkflowChunkRequest_Fail_); ok { return x.Fail } return nil } type isDequeueWorkflowChunkRequest_Cmd interface { isDequeueWorkflowChunkRequest_Cmd() } type DequeueWorkflowChunkRequest_Dequeue_ struct { Dequeue *DequeueWorkflowChunkRequest_Dequeue `protobuf:"bytes,1,opt,name=dequeue,proto3,oneof"` } type DequeueWorkflowChunkRequest_Continue_ struct { Continue *DequeueWorkflowChunkRequest_Continue `protobuf:"bytes,2,opt,name=continue,proto3,oneof"` } type DequeueWorkflowChunkRequest_Done_ struct { Done *DequeueWorkflowChunkRequest_Done `protobuf:"bytes,3,opt,name=done,proto3,oneof"` } type DequeueWorkflowChunkRequest_Fail_ struct { Fail *DequeueWorkflowChunkRequest_Fail `protobuf:"bytes,4,opt,name=fail,proto3,oneof"` } func (*DequeueWorkflowChunkRequest_Dequeue_) isDequeueWorkflowChunkRequest_Cmd() {} func (*DequeueWorkflowChunkRequest_Continue_) isDequeueWorkflowChunkRequest_Cmd() {} func (*DequeueWorkflowChunkRequest_Done_) isDequeueWorkflowChunkRequest_Cmd() {} func (*DequeueWorkflowChunkRequest_Fail_) isDequeueWorkflowChunkRequest_Cmd() {} type DequeueWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to Cmd: // *DequeueWorkflowResponse_Dequeue_ // *DequeueWorkflowResponse_Status_ // *DequeueWorkflowResponse_Committed_ Cmd isDequeueWorkflowResponse_Cmd `protobuf_oneof:"cmd"` } func (x *DequeueWorkflowResponse) Reset() { *x = DequeueWorkflowResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[7] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowResponse) ProtoMessage() {} func (x *DequeueWorkflowResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[7] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowResponse.ProtoReflect.Descriptor instead. func (*DequeueWorkflowResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{7} } func (m *DequeueWorkflowResponse) GetCmd() isDequeueWorkflowResponse_Cmd { if m != nil { return m.Cmd } return nil } func (x *DequeueWorkflowResponse) GetDequeue() *DequeueWorkflowResponse_Dequeue { if x, ok := x.GetCmd().(*DequeueWorkflowResponse_Dequeue_); ok { return x.Dequeue } return nil } func (x *DequeueWorkflowResponse) GetStatus() *DequeueWorkflowResponse_Status { if x, ok := x.GetCmd().(*DequeueWorkflowResponse_Status_); ok { return x.Status } return nil } func (x *DequeueWorkflowResponse) GetCommitted() *DequeueWorkflowResponse_Committed { if x, ok := x.GetCmd().(*DequeueWorkflowResponse_Committed_); ok { return x.Committed } return nil } type isDequeueWorkflowResponse_Cmd interface { isDequeueWorkflowResponse_Cmd() } type DequeueWorkflowResponse_Dequeue_ struct { Dequeue *DequeueWorkflowResponse_Dequeue `protobuf:"bytes,1,opt,name=dequeue,proto3,oneof"` } type DequeueWorkflowResponse_Status_ struct { Status *DequeueWorkflowResponse_Status `protobuf:"bytes,2,opt,name=status,proto3,oneof"` } type DequeueWorkflowResponse_Committed_ struct { Committed *DequeueWorkflowResponse_Committed `protobuf:"bytes,3,opt,name=committed,proto3,oneof"` } func (*DequeueWorkflowResponse_Dequeue_) isDequeueWorkflowResponse_Cmd() {} func (*DequeueWorkflowResponse_Status_) isDequeueWorkflowResponse_Cmd() {} func (*DequeueWorkflowResponse_Committed_) isDequeueWorkflowResponse_Cmd() {} type DequeueWorkflowChunkResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Chunk []byte `protobuf:"bytes,1,opt,name=chunk,proto3" json:"chunk,omitempty" toml:"chunk,omitempty" mapstructure:"chunk,omitempty"` } func (x *DequeueWorkflowChunkResponse) Reset() { *x = DequeueWorkflowChunkResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[8] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowChunkResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowChunkResponse) ProtoMessage() {} func (x *DequeueWorkflowChunkResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[8] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowChunkResponse.ProtoReflect.Descriptor instead. func (*DequeueWorkflowChunkResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{8} } func (x *DequeueWorkflowChunkResponse) GetChunk() []byte { if x != nil { return x.Chunk } return nil } type EnqueueWorkflowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty" toml:"instance_name,omitempty" mapstructure:"instance_name,omitempty"` WorkflowName string `protobuf:"bytes,3,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty" toml:"workflow_name,omitempty" mapstructure:"workflow_name,omitempty"` Parameters []byte `protobuf:"bytes,4,opt,name=parameters,proto3" json:"parameters,omitempty" toml:"parameters,omitempty" mapstructure:"parameters,omitempty"` } func (x *EnqueueWorkflowRequest) Reset() { *x = EnqueueWorkflowRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[9] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *EnqueueWorkflowRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*EnqueueWorkflowRequest) ProtoMessage() {} func (x *EnqueueWorkflowRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[9] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use EnqueueWorkflowRequest.ProtoReflect.Descriptor instead. func (*EnqueueWorkflowRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{9} } func (x *EnqueueWorkflowRequest) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *EnqueueWorkflowRequest) GetInstanceName() string { if x != nil { return x.InstanceName } return "" } func (x *EnqueueWorkflowRequest) GetWorkflowName() string { if x != nil { return x.WorkflowName } return "" } func (x *EnqueueWorkflowRequest) GetParameters() []byte { if x != nil { return x.Parameters } return nil } type EnqueueWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *EnqueueWorkflowResponse) Reset() { *x = EnqueueWorkflowResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[10] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *EnqueueWorkflowResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*EnqueueWorkflowResponse) ProtoMessage() {} func (x *EnqueueWorkflowResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[10] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use EnqueueWorkflowResponse.ProtoReflect.Descriptor instead. func (*EnqueueWorkflowResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{10} } type CancelWorkflowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty" toml:"instance_name,omitempty" mapstructure:"instance_name,omitempty"` WorkflowName string `protobuf:"bytes,3,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty" toml:"workflow_name,omitempty" mapstructure:"workflow_name,omitempty"` } func (x *CancelWorkflowRequest) Reset() { *x = CancelWorkflowRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[11] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CancelWorkflowRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*CancelWorkflowRequest) ProtoMessage() {} func (x *CancelWorkflowRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[11] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CancelWorkflowRequest.ProtoReflect.Descriptor instead. func (*CancelWorkflowRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{11} } func (x *CancelWorkflowRequest) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *CancelWorkflowRequest) GetInstanceName() string { if x != nil { return x.InstanceName } return "" } func (x *CancelWorkflowRequest) GetWorkflowName() string { if x != nil { return x.WorkflowName } return "" } type CancelWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *CancelWorkflowResponse) Reset() { *x = CancelWorkflowResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[12] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CancelWorkflowResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*CancelWorkflowResponse) ProtoMessage() {} func (x *CancelWorkflowResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[12] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CancelWorkflowResponse.ProtoReflect.Descriptor instead. func (*CancelWorkflowResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{12} } type KillWorkflowRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty" toml:"instance_name,omitempty" mapstructure:"instance_name,omitempty"` WorkflowName string `protobuf:"bytes,3,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty" toml:"workflow_name,omitempty" mapstructure:"workflow_name,omitempty"` } func (x *KillWorkflowRequest) Reset() { *x = KillWorkflowRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[13] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *KillWorkflowRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*KillWorkflowRequest) ProtoMessage() {} func (x *KillWorkflowRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[13] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use KillWorkflowRequest.ProtoReflect.Descriptor instead. func (*KillWorkflowRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{13} } func (x *KillWorkflowRequest) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *KillWorkflowRequest) GetInstanceName() string { if x != nil { return x.InstanceName } return "" } func (x *KillWorkflowRequest) GetWorkflowName() string { if x != nil { return x.WorkflowName } return "" } type KillWorkflowResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *KillWorkflowResponse) Reset() { *x = KillWorkflowResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[14] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *KillWorkflowResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*KillWorkflowResponse) ProtoMessage() {} func (x *KillWorkflowResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[14] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use KillWorkflowResponse.ProtoReflect.Descriptor instead. func (*KillWorkflowResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{14} } type DequeueTaskRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to Cmd: // *DequeueTaskRequest_Dequeue_ // *DequeueTaskRequest_Succeed_ // *DequeueTaskRequest_Fail_ Cmd isDequeueTaskRequest_Cmd `protobuf_oneof:"cmd"` } func (x *DequeueTaskRequest) Reset() { *x = DequeueTaskRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[15] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueTaskRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueTaskRequest) ProtoMessage() {} func (x *DequeueTaskRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[15] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueTaskRequest.ProtoReflect.Descriptor instead. func (*DequeueTaskRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{15} } func (m *DequeueTaskRequest) GetCmd() isDequeueTaskRequest_Cmd { if m != nil { return m.Cmd } return nil } func (x *DequeueTaskRequest) GetDequeue() *DequeueTaskRequest_Dequeue { if x, ok := x.GetCmd().(*DequeueTaskRequest_Dequeue_); ok { return x.Dequeue } return nil } func (x *DequeueTaskRequest) GetSucceed() *DequeueTaskRequest_Succeed { if x, ok := x.GetCmd().(*DequeueTaskRequest_Succeed_); ok { return x.Succeed } return nil } func (x *DequeueTaskRequest) GetFail() *DequeueTaskRequest_Fail { if x, ok := x.GetCmd().(*DequeueTaskRequest_Fail_); ok { return x.Fail } return nil } type isDequeueTaskRequest_Cmd interface { isDequeueTaskRequest_Cmd() } type DequeueTaskRequest_Dequeue_ struct { Dequeue *DequeueTaskRequest_Dequeue `protobuf:"bytes,1,opt,name=dequeue,proto3,oneof"` } type DequeueTaskRequest_Succeed_ struct { Succeed *DequeueTaskRequest_Succeed `protobuf:"bytes,2,opt,name=succeed,proto3,oneof"` } type DequeueTaskRequest_Fail_ struct { Fail *DequeueTaskRequest_Fail `protobuf:"bytes,3,opt,name=fail,proto3,oneof"` } func (*DequeueTaskRequest_Dequeue_) isDequeueTaskRequest_Cmd() {} func (*DequeueTaskRequest_Succeed_) isDequeueTaskRequest_Cmd() {} func (*DequeueTaskRequest_Fail_) isDequeueTaskRequest_Cmd() {} type DequeueTaskResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields // Types that are assignable to Cmd: // *DequeueTaskResponse_Dequeue_ // *DequeueTaskResponse_Cancel_ // *DequeueTaskResponse_Committed_ Cmd isDequeueTaskResponse_Cmd `protobuf_oneof:"cmd"` } func (x *DequeueTaskResponse) Reset() { *x = DequeueTaskResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[16] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueTaskResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueTaskResponse) ProtoMessage() {} func (x *DequeueTaskResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[16] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueTaskResponse.ProtoReflect.Descriptor instead. func (*DequeueTaskResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{16} } func (m *DequeueTaskResponse) GetCmd() isDequeueTaskResponse_Cmd { if m != nil { return m.Cmd } return nil } func (x *DequeueTaskResponse) GetDequeue() *DequeueTaskResponse_Dequeue { if x, ok := x.GetCmd().(*DequeueTaskResponse_Dequeue_); ok { return x.Dequeue } return nil } func (x *DequeueTaskResponse) GetCancel() *DequeueTaskResponse_Cancel { if x, ok := x.GetCmd().(*DequeueTaskResponse_Cancel_); ok { return x.Cancel } return nil } func (x *DequeueTaskResponse) GetCommitted() *DequeueTaskResponse_Committed { if x, ok := x.GetCmd().(*DequeueTaskResponse_Committed_); ok { return x.Committed } return nil } type isDequeueTaskResponse_Cmd interface { isDequeueTaskResponse_Cmd() } type DequeueTaskResponse_Dequeue_ struct { Dequeue *DequeueTaskResponse_Dequeue `protobuf:"bytes,1,opt,name=dequeue,proto3,oneof"` } type DequeueTaskResponse_Cancel_ struct { Cancel *DequeueTaskResponse_Cancel `protobuf:"bytes,2,opt,name=cancel,proto3,oneof"` } type DequeueTaskResponse_Committed_ struct { Committed *DequeueTaskResponse_Committed `protobuf:"bytes,3,opt,name=committed,proto3,oneof"` } func (*DequeueTaskResponse_Dequeue_) isDequeueTaskResponse_Cmd() {} func (*DequeueTaskResponse_Cancel_) isDequeueTaskResponse_Cmd() {} func (*DequeueTaskResponse_Committed_) isDequeueTaskResponse_Cmd() {} type CreateWorkflowScheduleRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty" toml:"instance_name,omitempty" mapstructure:"instance_name,omitempty"` WorkflowName string `protobuf:"bytes,3,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty" toml:"workflow_name,omitempty" mapstructure:"workflow_name,omitempty"` Parameters []byte `protobuf:"bytes,4,opt,name=parameters,proto3" json:"parameters,omitempty" toml:"parameters,omitempty" mapstructure:"parameters,omitempty"` Enabled bool `protobuf:"varint,5,opt,name=enabled,proto3" json:"enabled,omitempty" toml:"enabled,omitempty" mapstructure:"enabled,omitempty"` Recurrence string `protobuf:"bytes,6,opt,name=recurrence,proto3" json:"recurrence,omitempty" toml:"recurrence,omitempty" mapstructure:"recurrence,omitempty"` Payload []byte `protobuf:"bytes,7,opt,name=payload,proto3" json:"payload,omitempty" toml:"payload,omitempty" mapstructure:"payload,omitempty"` NextRunAt *timestamp.Timestamp `protobuf:"bytes,8,opt,name=next_run_at,json=nextRunAt,proto3" json:"next_run_at,omitempty" toml:"next_run_at,omitempty" mapstructure:"next_run_at,omitempty"` } func (x *CreateWorkflowScheduleRequest) Reset() { *x = CreateWorkflowScheduleRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[17] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateWorkflowScheduleRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateWorkflowScheduleRequest) ProtoMessage() {} func (x *CreateWorkflowScheduleRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[17] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateWorkflowScheduleRequest.ProtoReflect.Descriptor instead. func (*CreateWorkflowScheduleRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{17} } func (x *CreateWorkflowScheduleRequest) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *CreateWorkflowScheduleRequest) GetInstanceName() string { if x != nil { return x.InstanceName } return "" } func (x *CreateWorkflowScheduleRequest) GetWorkflowName() string { if x != nil { return x.WorkflowName } return "" } func (x *CreateWorkflowScheduleRequest) GetParameters() []byte { if x != nil { return x.Parameters } return nil } func (x *CreateWorkflowScheduleRequest) GetEnabled() bool { if x != nil { return x.Enabled } return false } func (x *CreateWorkflowScheduleRequest) GetRecurrence() string { if x != nil { return x.Recurrence } return "" } func (x *CreateWorkflowScheduleRequest) GetPayload() []byte { if x != nil { return x.Payload } return nil } func (x *CreateWorkflowScheduleRequest) GetNextRunAt() *timestamp.Timestamp { if x != nil { return x.NextRunAt } return nil } type CreateWorkflowScheduleResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *CreateWorkflowScheduleResponse) Reset() { *x = CreateWorkflowScheduleResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[18] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *CreateWorkflowScheduleResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*CreateWorkflowScheduleResponse) ProtoMessage() {} func (x *CreateWorkflowScheduleResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[18] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use CreateWorkflowScheduleResponse.ProtoReflect.Descriptor instead. func (*CreateWorkflowScheduleResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{18} } type ListWorkflowSchedulesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` } func (x *ListWorkflowSchedulesRequest) Reset() { *x = ListWorkflowSchedulesRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[19] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListWorkflowSchedulesRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListWorkflowSchedulesRequest) ProtoMessage() {} func (x *ListWorkflowSchedulesRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[19] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListWorkflowSchedulesRequest.ProtoReflect.Descriptor instead. func (*ListWorkflowSchedulesRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{19} } func (x *ListWorkflowSchedulesRequest) GetDomain() string { if x != nil { return x.Domain } return "" } type Schedule struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty" toml:"instance_name,omitempty" mapstructure:"instance_name,omitempty"` WorkflowName string `protobuf:"bytes,3,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty" toml:"workflow_name,omitempty" mapstructure:"workflow_name,omitempty"` Parameters []byte `protobuf:"bytes,4,opt,name=parameters,proto3" json:"parameters,omitempty" toml:"parameters,omitempty" mapstructure:"parameters,omitempty"` Recurrence string `protobuf:"bytes,5,opt,name=recurrence,proto3" json:"recurrence,omitempty" toml:"recurrence,omitempty" mapstructure:"recurrence,omitempty"` NextDueAt *timestamp.Timestamp `protobuf:"bytes,6,opt,name=next_due_at,json=nextDueAt,proto3" json:"next_due_at,omitempty" toml:"next_due_at,omitempty" mapstructure:"next_due_at,omitempty"` LastEnqueuedAt *timestamp.Timestamp `protobuf:"bytes,7,opt,name=last_enqueued_at,json=lastEnqueuedAt,proto3" json:"last_enqueued_at,omitempty" toml:"last_enqueued_at,omitempty" mapstructure:"last_enqueued_at,omitempty"` Enabled bool `protobuf:"varint,8,opt,name=enabled,proto3" json:"enabled,omitempty" toml:"enabled,omitempty" mapstructure:"enabled,omitempty"` LastStart *timestamp.Timestamp `protobuf:"bytes,9,opt,name=last_start,json=lastStart,proto3" json:"last_start,omitempty" toml:"last_start,omitempty" mapstructure:"last_start,omitempty"` LastEnd *timestamp.Timestamp `protobuf:"bytes,10,opt,name=last_end,json=lastEnd,proto3" json:"last_end,omitempty" toml:"last_end,omitempty" mapstructure:"last_end,omitempty"` } func (x *Schedule) Reset() { *x = Schedule{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[20] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *Schedule) String() string { return protoimpl.X.MessageStringOf(x) } func (*Schedule) ProtoMessage() {} func (x *Schedule) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[20] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use Schedule.ProtoReflect.Descriptor instead. func (*Schedule) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{20} } func (x *Schedule) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *Schedule) GetInstanceName() string { if x != nil { return x.InstanceName } return "" } func (x *Schedule) GetWorkflowName() string { if x != nil { return x.WorkflowName } return "" } func (x *Schedule) GetParameters() []byte { if x != nil { return x.Parameters } return nil } func (x *Schedule) GetRecurrence() string { if x != nil { return x.Recurrence } return "" } func (x *Schedule) GetNextDueAt() *timestamp.Timestamp { if x != nil { return x.NextDueAt } return nil } func (x *Schedule) GetLastEnqueuedAt() *timestamp.Timestamp { if x != nil { return x.LastEnqueuedAt } return nil } func (x *Schedule) GetEnabled() bool { if x != nil { return x.Enabled } return false } func (x *Schedule) GetLastStart() *timestamp.Timestamp { if x != nil { return x.LastStart } return nil } func (x *Schedule) GetLastEnd() *timestamp.Timestamp { if x != nil { return x.LastEnd } return nil } type ListWorkflowSchedulesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields NumSchedules int32 `protobuf:"varint,1,opt,name=num_schedules,json=numSchedules,proto3" json:"num_schedules,omitempty" toml:"num_schedules,omitempty" mapstructure:"num_schedules,omitempty"` Schedule *Schedule `protobuf:"bytes,2,opt,name=schedule,proto3" json:"schedule,omitempty" toml:"schedule,omitempty" mapstructure:"schedule,omitempty"` } func (x *ListWorkflowSchedulesResponse) Reset() { *x = ListWorkflowSchedulesResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[21] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListWorkflowSchedulesResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListWorkflowSchedulesResponse) ProtoMessage() {} func (x *ListWorkflowSchedulesResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[21] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListWorkflowSchedulesResponse.ProtoReflect.Descriptor instead. func (*ListWorkflowSchedulesResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{21} } func (x *ListWorkflowSchedulesResponse) GetNumSchedules() int32 { if x != nil { return x.NumSchedules } return 0 } func (x *ListWorkflowSchedulesResponse) GetSchedule() *Schedule { if x != nil { return x.Schedule } return nil } type GetWorkflowScheduleByNameRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty" toml:"instance_name,omitempty" mapstructure:"instance_name,omitempty"` WorkflowName string `protobuf:"bytes,3,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty" toml:"workflow_name,omitempty" mapstructure:"workflow_name,omitempty"` } func (x *GetWorkflowScheduleByNameRequest) Reset() { *x = GetWorkflowScheduleByNameRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[22] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetWorkflowScheduleByNameRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetWorkflowScheduleByNameRequest) ProtoMessage() {} func (x *GetWorkflowScheduleByNameRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[22] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetWorkflowScheduleByNameRequest.ProtoReflect.Descriptor instead. func (*GetWorkflowScheduleByNameRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{22} } func (x *GetWorkflowScheduleByNameRequest) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *GetWorkflowScheduleByNameRequest) GetInstanceName() string { if x != nil { return x.InstanceName } return "" } func (x *GetWorkflowScheduleByNameRequest) GetWorkflowName() string { if x != nil { return x.WorkflowName } return "" } type GetWorkflowScheduleByNameResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Schedule *Schedule `protobuf:"bytes,1,opt,name=schedule,proto3" json:"schedule,omitempty" toml:"schedule,omitempty" mapstructure:"schedule,omitempty"` } func (x *GetWorkflowScheduleByNameResponse) Reset() { *x = GetWorkflowScheduleByNameResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[23] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetWorkflowScheduleByNameResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetWorkflowScheduleByNameResponse) ProtoMessage() {} func (x *GetWorkflowScheduleByNameResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[23] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetWorkflowScheduleByNameResponse.ProtoReflect.Descriptor instead. func (*GetWorkflowScheduleByNameResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{23} } func (x *GetWorkflowScheduleByNameResponse) GetSchedule() *Schedule { if x != nil { return x.Schedule } return nil } type UpdateWorkflowScheduleByNameRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty" toml:"instance_name,omitempty" mapstructure:"instance_name,omitempty"` WorkflowName string `protobuf:"bytes,3,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty" toml:"workflow_name,omitempty" mapstructure:"workflow_name,omitempty"` Enabled *wrappers.BoolValue `protobuf:"bytes,4,opt,name=enabled,proto3" json:"enabled,omitempty" toml:"enabled,omitempty" mapstructure:"enabled,omitempty"` Parameters *wrappers.BytesValue `protobuf:"bytes,5,opt,name=parameters,proto3" json:"parameters,omitempty" toml:"parameters,omitempty" mapstructure:"parameters,omitempty"` Recurrence *wrappers.StringValue `protobuf:"bytes,6,opt,name=recurrence,proto3" json:"recurrence,omitempty" toml:"recurrence,omitempty" mapstructure:"recurrence,omitempty"` NextRunAt *timestamp.Timestamp `protobuf:"bytes,7,opt,name=next_run_at,json=nextRunAt,proto3" json:"next_run_at,omitempty" toml:"next_run_at,omitempty" mapstructure:"next_run_at,omitempty"` } func (x *UpdateWorkflowScheduleByNameRequest) Reset() { *x = UpdateWorkflowScheduleByNameRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[24] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *UpdateWorkflowScheduleByNameRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*UpdateWorkflowScheduleByNameRequest) ProtoMessage() {} func (x *UpdateWorkflowScheduleByNameRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[24] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use UpdateWorkflowScheduleByNameRequest.ProtoReflect.Descriptor instead. func (*UpdateWorkflowScheduleByNameRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{24} } func (x *UpdateWorkflowScheduleByNameRequest) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *UpdateWorkflowScheduleByNameRequest) GetInstanceName() string { if x != nil { return x.InstanceName } return "" } func (x *UpdateWorkflowScheduleByNameRequest) GetWorkflowName() string { if x != nil { return x.WorkflowName } return "" } func (x *UpdateWorkflowScheduleByNameRequest) GetEnabled() *wrappers.BoolValue { if x != nil { return x.Enabled } return nil } func (x *UpdateWorkflowScheduleByNameRequest) GetParameters() *wrappers.BytesValue { if x != nil { return x.Parameters } return nil } func (x *UpdateWorkflowScheduleByNameRequest) GetRecurrence() *wrappers.StringValue { if x != nil { return x.Recurrence } return nil } func (x *UpdateWorkflowScheduleByNameRequest) GetNextRunAt() *timestamp.Timestamp { if x != nil { return x.NextRunAt } return nil } type UpdateWorkflowScheduleByNameResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *UpdateWorkflowScheduleByNameResponse) Reset() { *x = UpdateWorkflowScheduleByNameResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[25] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *UpdateWorkflowScheduleByNameResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*UpdateWorkflowScheduleByNameResponse) ProtoMessage() {} func (x *UpdateWorkflowScheduleByNameResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[25] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use UpdateWorkflowScheduleByNameResponse.ProtoReflect.Descriptor instead. func (*UpdateWorkflowScheduleByNameResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{25} } type GetWorkflowInstanceByNameRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` InstanceName string `protobuf:"bytes,2,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty" toml:"instance_name,omitempty" mapstructure:"instance_name,omitempty"` WorkflowName string `protobuf:"bytes,3,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty" toml:"workflow_name,omitempty" mapstructure:"workflow_name,omitempty"` } func (x *GetWorkflowInstanceByNameRequest) Reset() { *x = GetWorkflowInstanceByNameRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[26] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetWorkflowInstanceByNameRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetWorkflowInstanceByNameRequest) ProtoMessage() {} func (x *GetWorkflowInstanceByNameRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[26] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetWorkflowInstanceByNameRequest.ProtoReflect.Descriptor instead. func (*GetWorkflowInstanceByNameRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{26} } func (x *GetWorkflowInstanceByNameRequest) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *GetWorkflowInstanceByNameRequest) GetInstanceName() string { if x != nil { return x.InstanceName } return "" } func (x *GetWorkflowInstanceByNameRequest) GetWorkflowName() string { if x != nil { return x.WorkflowName } return "" } type GetWorkflowInstanceByNameResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields WorkflowInstance *WorkflowInstance `protobuf:"bytes,1,opt,name=workflow_instance,json=workflowInstance,proto3" json:"workflow_instance,omitempty" toml:"workflow_instance,omitempty" mapstructure:"workflow_instance,omitempty"` } func (x *GetWorkflowInstanceByNameResponse) Reset() { *x = GetWorkflowInstanceByNameResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[27] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *GetWorkflowInstanceByNameResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*GetWorkflowInstanceByNameResponse) ProtoMessage() {} func (x *GetWorkflowInstanceByNameResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[27] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use GetWorkflowInstanceByNameResponse.ProtoReflect.Descriptor instead. func (*GetWorkflowInstanceByNameResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{27} } func (x *GetWorkflowInstanceByNameResponse) GetWorkflowInstance() *WorkflowInstance { if x != nil { return x.WorkflowInstance } return nil } type ListWorkflowInstancesRequest struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` WorkflowName *wrappers.StringValue `protobuf:"bytes,2,opt,name=workflow_name,json=workflowName,proto3" json:"workflow_name,omitempty" toml:"workflow_name,omitempty" mapstructure:"workflow_name,omitempty"` InstanceName *wrappers.StringValue `protobuf:"bytes,3,opt,name=instance_name,json=instanceName,proto3" json:"instance_name,omitempty" toml:"instance_name,omitempty" mapstructure:"instance_name,omitempty"` IsRunning *wrappers.BoolValue `protobuf:"bytes,4,opt,name=is_running,json=isRunning,proto3" json:"is_running,omitempty" toml:"is_running,omitempty" mapstructure:"is_running,omitempty"` } func (x *ListWorkflowInstancesRequest) Reset() { *x = ListWorkflowInstancesRequest{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[28] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListWorkflowInstancesRequest) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListWorkflowInstancesRequest) ProtoMessage() {} func (x *ListWorkflowInstancesRequest) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[28] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListWorkflowInstancesRequest.ProtoReflect.Descriptor instead. func (*ListWorkflowInstancesRequest) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{28} } func (x *ListWorkflowInstancesRequest) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *ListWorkflowInstancesRequest) GetWorkflowName() *wrappers.StringValue { if x != nil { return x.WorkflowName } return nil } func (x *ListWorkflowInstancesRequest) GetInstanceName() *wrappers.StringValue { if x != nil { return x.InstanceName } return nil } func (x *ListWorkflowInstancesRequest) GetIsRunning() *wrappers.BoolValue { if x != nil { return x.IsRunning } return nil } type ListWorkflowInstancesResponse struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields WorkflowInstance *WorkflowInstance `protobuf:"bytes,1,opt,name=workflow_instance,json=workflowInstance,proto3" json:"workflow_instance,omitempty" toml:"workflow_instance,omitempty" mapstructure:"workflow_instance,omitempty"` } func (x *ListWorkflowInstancesResponse) Reset() { *x = ListWorkflowInstancesResponse{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[29] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *ListWorkflowInstancesResponse) String() string { return protoimpl.X.MessageStringOf(x) } func (*ListWorkflowInstancesResponse) ProtoMessage() {} func (x *ListWorkflowInstancesResponse) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[29] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use ListWorkflowInstancesResponse.ProtoReflect.Descriptor instead. func (*ListWorkflowInstancesResponse) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{29} } func (x *ListWorkflowInstancesResponse) GetWorkflowInstance() *WorkflowInstance { if x != nil { return x.WorkflowInstance } return nil } type DequeueWorkflowRequest_Dequeue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` WorkflowNames []string `protobuf:"bytes,2,rep,name=workflow_names,json=workflowNames,proto3" json:"workflow_names,omitempty" toml:"workflow_names,omitempty" mapstructure:"workflow_names,omitempty"` } func (x *DequeueWorkflowRequest_Dequeue) Reset() { *x = DequeueWorkflowRequest_Dequeue{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[30] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowRequest_Dequeue) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowRequest_Dequeue) ProtoMessage() {} func (x *DequeueWorkflowRequest_Dequeue) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[30] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowRequest_Dequeue.ProtoReflect.Descriptor instead. func (*DequeueWorkflowRequest_Dequeue) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{5, 0} } func (x *DequeueWorkflowRequest_Dequeue) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *DequeueWorkflowRequest_Dequeue) GetWorkflowNames() []string { if x != nil { return x.WorkflowNames } return nil } type DequeueWorkflowRequest_Continue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty" toml:"payload,omitempty" mapstructure:"payload,omitempty"` Tasks []*Task `protobuf:"bytes,2,rep,name=tasks,proto3" json:"tasks,omitempty" toml:"tasks,omitempty" mapstructure:"tasks,omitempty"` } func (x *DequeueWorkflowRequest_Continue) Reset() { *x = DequeueWorkflowRequest_Continue{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[31] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowRequest_Continue) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowRequest_Continue) ProtoMessage() {} func (x *DequeueWorkflowRequest_Continue) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[31] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowRequest_Continue.ProtoReflect.Descriptor instead. func (*DequeueWorkflowRequest_Continue) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{5, 1} } func (x *DequeueWorkflowRequest_Continue) GetPayload() []byte { if x != nil { return x.Payload } return nil } func (x *DequeueWorkflowRequest_Continue) GetTasks() []*Task { if x != nil { return x.Tasks } return nil } type DequeueWorkflowRequest_Fail struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty" toml:"err,omitempty" mapstructure:"err,omitempty"` } func (x *DequeueWorkflowRequest_Fail) Reset() { *x = DequeueWorkflowRequest_Fail{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[32] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowRequest_Fail) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowRequest_Fail) ProtoMessage() {} func (x *DequeueWorkflowRequest_Fail) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[32] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowRequest_Fail.ProtoReflect.Descriptor instead. func (*DequeueWorkflowRequest_Fail) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{5, 2} } func (x *DequeueWorkflowRequest_Fail) GetErr() string { if x != nil { return x.Err } return "" } type DequeueWorkflowRequest_Done struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Result []byte `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty" toml:"result,omitempty" mapstructure:"result,omitempty"` } func (x *DequeueWorkflowRequest_Done) Reset() { *x = DequeueWorkflowRequest_Done{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[33] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowRequest_Done) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowRequest_Done) ProtoMessage() {} func (x *DequeueWorkflowRequest_Done) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[33] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowRequest_Done.ProtoReflect.Descriptor instead. func (*DequeueWorkflowRequest_Done) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{5, 3} } func (x *DequeueWorkflowRequest_Done) GetResult() []byte { if x != nil { return x.Result } return nil } type DequeueWorkflowChunkRequest_Dequeue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` WorkflowNames []string `protobuf:"bytes,2,rep,name=workflow_names,json=workflowNames,proto3" json:"workflow_names,omitempty" toml:"workflow_names,omitempty" mapstructure:"workflow_names,omitempty"` } func (x *DequeueWorkflowChunkRequest_Dequeue) Reset() { *x = DequeueWorkflowChunkRequest_Dequeue{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[34] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowChunkRequest_Dequeue) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowChunkRequest_Dequeue) ProtoMessage() {} func (x *DequeueWorkflowChunkRequest_Dequeue) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[34] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowChunkRequest_Dequeue.ProtoReflect.Descriptor instead. func (*DequeueWorkflowChunkRequest_Dequeue) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{6, 0} } func (x *DequeueWorkflowChunkRequest_Dequeue) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *DequeueWorkflowChunkRequest_Dequeue) GetWorkflowNames() []string { if x != nil { return x.WorkflowNames } return nil } type DequeueWorkflowChunkRequest_Continue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Payload []byte `protobuf:"bytes,1,opt,name=payload,proto3" json:"payload,omitempty" toml:"payload,omitempty" mapstructure:"payload,omitempty"` Tasks []*Task `protobuf:"bytes,2,rep,name=tasks,proto3" json:"tasks,omitempty" toml:"tasks,omitempty" mapstructure:"tasks,omitempty"` } func (x *DequeueWorkflowChunkRequest_Continue) Reset() { *x = DequeueWorkflowChunkRequest_Continue{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[35] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowChunkRequest_Continue) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowChunkRequest_Continue) ProtoMessage() {} func (x *DequeueWorkflowChunkRequest_Continue) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[35] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowChunkRequest_Continue.ProtoReflect.Descriptor instead. func (*DequeueWorkflowChunkRequest_Continue) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{6, 1} } func (x *DequeueWorkflowChunkRequest_Continue) GetPayload() []byte { if x != nil { return x.Payload } return nil } func (x *DequeueWorkflowChunkRequest_Continue) GetTasks() []*Task { if x != nil { return x.Tasks } return nil } type DequeueWorkflowChunkRequest_Fail struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty" toml:"err,omitempty" mapstructure:"err,omitempty"` } func (x *DequeueWorkflowChunkRequest_Fail) Reset() { *x = DequeueWorkflowChunkRequest_Fail{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[36] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowChunkRequest_Fail) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowChunkRequest_Fail) ProtoMessage() {} func (x *DequeueWorkflowChunkRequest_Fail) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[36] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowChunkRequest_Fail.ProtoReflect.Descriptor instead. func (*DequeueWorkflowChunkRequest_Fail) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{6, 2} } func (x *DequeueWorkflowChunkRequest_Fail) GetErr() string { if x != nil { return x.Err } return "" } type DequeueWorkflowChunkRequest_Done struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Result []byte `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty" toml:"result,omitempty" mapstructure:"result,omitempty"` } func (x *DequeueWorkflowChunkRequest_Done) Reset() { *x = DequeueWorkflowChunkRequest_Done{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[37] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowChunkRequest_Done) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowChunkRequest_Done) ProtoMessage() {} func (x *DequeueWorkflowChunkRequest_Done) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[37] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowChunkRequest_Done.ProtoReflect.Descriptor instead. func (*DequeueWorkflowChunkRequest_Done) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{6, 3} } func (x *DequeueWorkflowChunkRequest_Done) GetResult() []byte { if x != nil { return x.Result } return nil } type DequeueWorkflowResponse_Dequeue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Instance *WorkflowInstance `protobuf:"bytes,1,opt,name=instance,proto3" json:"instance,omitempty" toml:"instance,omitempty" mapstructure:"instance,omitempty"` Event *WorkflowEvent `protobuf:"bytes,2,opt,name=event,proto3" json:"event,omitempty" toml:"event,omitempty" mapstructure:"event,omitempty"` } func (x *DequeueWorkflowResponse_Dequeue) Reset() { *x = DequeueWorkflowResponse_Dequeue{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[38] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowResponse_Dequeue) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowResponse_Dequeue) ProtoMessage() {} func (x *DequeueWorkflowResponse_Dequeue) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[38] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowResponse_Dequeue.ProtoReflect.Descriptor instead. func (*DequeueWorkflowResponse_Dequeue) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{7, 0} } func (x *DequeueWorkflowResponse_Dequeue) GetInstance() *WorkflowInstance { if x != nil { return x.Instance } return nil } func (x *DequeueWorkflowResponse_Dequeue) GetEvent() *WorkflowEvent { if x != nil { return x.Event } return nil } type DequeueWorkflowResponse_Status struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty" toml:"err,omitempty" mapstructure:"err,omitempty"` } func (x *DequeueWorkflowResponse_Status) Reset() { *x = DequeueWorkflowResponse_Status{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[39] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowResponse_Status) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowResponse_Status) ProtoMessage() {} func (x *DequeueWorkflowResponse_Status) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[39] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowResponse_Status.ProtoReflect.Descriptor instead. func (*DequeueWorkflowResponse_Status) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{7, 1} } func (x *DequeueWorkflowResponse_Status) GetErr() string { if x != nil { return x.Err } return "" } type DequeueWorkflowResponse_Committed struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *DequeueWorkflowResponse_Committed) Reset() { *x = DequeueWorkflowResponse_Committed{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[40] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueWorkflowResponse_Committed) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueWorkflowResponse_Committed) ProtoMessage() {} func (x *DequeueWorkflowResponse_Committed) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[40] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueWorkflowResponse_Committed.ProtoReflect.Descriptor instead. func (*DequeueWorkflowResponse_Committed) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{7, 2} } type DequeueTaskRequest_Dequeue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Domain string `protobuf:"bytes,1,opt,name=domain,proto3" json:"domain,omitempty" toml:"domain,omitempty" mapstructure:"domain,omitempty"` TaskName string `protobuf:"bytes,2,opt,name=task_name,json=taskName,proto3" json:"task_name,omitempty" toml:"task_name,omitempty" mapstructure:"task_name,omitempty"` } func (x *DequeueTaskRequest_Dequeue) Reset() { *x = DequeueTaskRequest_Dequeue{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[41] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueTaskRequest_Dequeue) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueTaskRequest_Dequeue) ProtoMessage() {} func (x *DequeueTaskRequest_Dequeue) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[41] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueTaskRequest_Dequeue.ProtoReflect.Descriptor instead. func (*DequeueTaskRequest_Dequeue) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{15, 0} } func (x *DequeueTaskRequest_Dequeue) GetDomain() string { if x != nil { return x.Domain } return "" } func (x *DequeueTaskRequest_Dequeue) GetTaskName() string { if x != nil { return x.TaskName } return "" } type DequeueTaskRequest_Fail struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty" toml:"error,omitempty" mapstructure:"error,omitempty"` } func (x *DequeueTaskRequest_Fail) Reset() { *x = DequeueTaskRequest_Fail{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[42] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueTaskRequest_Fail) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueTaskRequest_Fail) ProtoMessage() {} func (x *DequeueTaskRequest_Fail) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[42] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueTaskRequest_Fail.ProtoReflect.Descriptor instead. func (*DequeueTaskRequest_Fail) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{15, 1} } func (x *DequeueTaskRequest_Fail) GetError() string { if x != nil { return x.Error } return "" } type DequeueTaskRequest_Succeed struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Result []byte `protobuf:"bytes,1,opt,name=result,proto3" json:"result,omitempty" toml:"result,omitempty" mapstructure:"result,omitempty"` } func (x *DequeueTaskRequest_Succeed) Reset() { *x = DequeueTaskRequest_Succeed{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[43] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueTaskRequest_Succeed) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueTaskRequest_Succeed) ProtoMessage() {} func (x *DequeueTaskRequest_Succeed) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[43] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueTaskRequest_Succeed.ProtoReflect.Descriptor instead. func (*DequeueTaskRequest_Succeed) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{15, 2} } func (x *DequeueTaskRequest_Succeed) GetResult() []byte { if x != nil { return x.Result } return nil } type DequeueTaskResponse_Dequeue struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Task *Task `protobuf:"bytes,1,opt,name=task,proto3" json:"task,omitempty" toml:"task,omitempty" mapstructure:"task,omitempty"` } func (x *DequeueTaskResponse_Dequeue) Reset() { *x = DequeueTaskResponse_Dequeue{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[44] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueTaskResponse_Dequeue) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueTaskResponse_Dequeue) ProtoMessage() {} func (x *DequeueTaskResponse_Dequeue) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[44] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueTaskResponse_Dequeue.ProtoReflect.Descriptor instead. func (*DequeueTaskResponse_Dequeue) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{16, 0} } func (x *DequeueTaskResponse_Dequeue) GetTask() *Task { if x != nil { return x.Task } return nil } type DequeueTaskResponse_Cancel struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields Err string `protobuf:"bytes,1,opt,name=err,proto3" json:"err,omitempty" toml:"err,omitempty" mapstructure:"err,omitempty"` } func (x *DequeueTaskResponse_Cancel) Reset() { *x = DequeueTaskResponse_Cancel{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[45] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueTaskResponse_Cancel) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueTaskResponse_Cancel) ProtoMessage() {} func (x *DequeueTaskResponse_Cancel) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[45] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueTaskResponse_Cancel.ProtoReflect.Descriptor instead. func (*DequeueTaskResponse_Cancel) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{16, 1} } func (x *DequeueTaskResponse_Cancel) GetErr() string { if x != nil { return x.Err } return "" } type DequeueTaskResponse_Committed struct { state protoimpl.MessageState sizeCache protoimpl.SizeCache unknownFields protoimpl.UnknownFields } func (x *DequeueTaskResponse_Committed) Reset() { *x = DequeueTaskResponse_Committed{} if protoimpl.UnsafeEnabled { mi := &file_interservice_cereal_cereal_proto_msgTypes[46] ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) ms.StoreMessageInfo(mi) } } func (x *DequeueTaskResponse_Committed) String() string { return protoimpl.X.MessageStringOf(x) } func (*DequeueTaskResponse_Committed) ProtoMessage() {} func (x *DequeueTaskResponse_Committed) ProtoReflect() protoreflect.Message { mi := &file_interservice_cereal_cereal_proto_msgTypes[46] if protoimpl.UnsafeEnabled && x != nil { ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) if ms.LoadMessageInfo() == nil { ms.StoreMessageInfo(mi) } return ms } return mi.MessageOf(x) } // Deprecated: Use DequeueTaskResponse_Committed.ProtoReflect.Descriptor instead. func (*DequeueTaskResponse_Committed) Descriptor() ([]byte, []int) { return file_interservice_cereal_cereal_proto_rawDescGZIP(), []int{16, 2} } var File_interservice_cereal_cereal_proto protoreflect.FileDescriptor var file_interservice_cereal_cereal_proto_rawDesc = []byte{ 0x0a, 0x20, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2f, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1a, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x1a, 0x1f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x1e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x77, 0x72, 0x61, 0x70, 0x70, 0x65, 0x72, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0xbd, 0x01, 0x0a, 0x04, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3b, 0x0a, 0x0b, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x61, 0x66, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x73, 0x74, 0x61, 0x72, 0x74, 0x41, 0x66, 0x74, 0x65, 0x72, 0x12, 0x44, 0x0a, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x28, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x52, 0x08, 0x6d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x22, 0x4b, 0x0a, 0x0c, 0x54, 0x61, 0x73, 0x6b, 0x4d, 0x65, 0x74, 0x61, 0x64, 0x61, 0x74, 0x61, 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x41, 0x74, 0x22, 0xf0, 0x01, 0x0a, 0x10, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x08, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x22, 0x98, 0x01, 0x0a, 0x0a, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x5f, 0x74, 0x65, 0x78, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x54, 0x65, 0x78, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x22, 0x8b, 0x02, 0x0a, 0x0d, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x12, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x2e, 0x0a, 0x13, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x03, 0x52, 0x11, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x30, 0x0a, 0x14, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x03, 0x20, 0x01, 0x28, 0x03, 0x52, 0x12, 0x63, 0x6f, 0x6d, 0x70, 0x6c, 0x65, 0x74, 0x65, 0x64, 0x54, 0x61, 0x73, 0x6b, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x47, 0x0a, 0x0b, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x26, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x52, 0x0a, 0x74, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x12, 0x3b, 0x0a, 0x0b, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0a, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x41, 0x74, 0x22, 0xd2, 0x04, 0x0a, 0x16, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x56, 0x0a, 0x07, 0x64, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x48, 0x00, 0x52, 0x07, 0x64, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x59, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x12, 0x4d, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x44, 0x6f, 0x6e, 0x65, 0x48, 0x00, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x4d, 0x0a, 0x04, 0x66, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x48, 0x00, 0x52, 0x04, 0x66, 0x61, 0x69, 0x6c, 0x1a, 0x48, 0x0a, 0x07, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x5c, 0x0a, 0x08, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x36, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x1a, 0x18, 0x0a, 0x04, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x1a, 0x1e, 0x0a, 0x04, 0x44, 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x05, 0x0a, 0x03, 0x63, 0x6d, 0x64, 0x22, 0xeb, 0x04, 0x0a, 0x1b, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x5b, 0x0a, 0x07, 0x64, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3f, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x48, 0x00, 0x52, 0x07, 0x64, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x5e, 0x0a, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x40, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x48, 0x00, 0x52, 0x08, 0x63, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x12, 0x52, 0x0a, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x44, 0x6f, 0x6e, 0x65, 0x48, 0x00, 0x52, 0x04, 0x64, 0x6f, 0x6e, 0x65, 0x12, 0x52, 0x0a, 0x04, 0x66, 0x61, 0x69, 0x6c, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3c, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x48, 0x00, 0x52, 0x04, 0x66, 0x61, 0x69, 0x6c, 0x1a, 0x48, 0x0a, 0x07, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x09, 0x52, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x73, 0x1a, 0x5c, 0x0a, 0x08, 0x43, 0x6f, 0x6e, 0x74, 0x69, 0x6e, 0x75, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x36, 0x0a, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x05, 0x74, 0x61, 0x73, 0x6b, 0x73, 0x1a, 0x18, 0x0a, 0x04, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x1a, 0x1e, 0x0a, 0x04, 0x44, 0x6f, 0x6e, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x05, 0x0a, 0x03, 0x63, 0x6d, 0x64, 0x22, 0xee, 0x03, 0x0a, 0x17, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x57, 0x0a, 0x07, 0x64, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3b, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x48, 0x00, 0x52, 0x07, 0x64, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x54, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3a, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x48, 0x00, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x5d, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x3d, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x1a, 0x94, 0x01, 0x0a, 0x07, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x48, 0x0a, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x08, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x12, 0x3f, 0x0a, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x05, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x1a, 0x1a, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x1a, 0x0b, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x42, 0x05, 0x0a, 0x03, 0x63, 0x6d, 0x64, 0x22, 0x34, 0x0a, 0x1c, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x05, 0x63, 0x68, 0x75, 0x6e, 0x6b, 0x22, 0x9a, 0x01, 0x0a, 0x16, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x22, 0x19, 0x0a, 0x17, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x79, 0x0a, 0x15, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x18, 0x0a, 0x16, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x77, 0x0a, 0x13, 0x4b, 0x69, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x16, 0x0a, 0x14, 0x4b, 0x69, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x8f, 0x03, 0x0a, 0x12, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x07, 0x64, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x48, 0x00, 0x52, 0x07, 0x64, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x52, 0x0a, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x48, 0x00, 0x52, 0x07, 0x73, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x12, 0x49, 0x0a, 0x04, 0x66, 0x61, 0x69, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x33, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x2e, 0x46, 0x61, 0x69, 0x6c, 0x48, 0x00, 0x52, 0x04, 0x66, 0x61, 0x69, 0x6c, 0x1a, 0x3e, 0x0a, 0x07, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x1b, 0x0a, 0x09, 0x74, 0x61, 0x73, 0x6b, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x08, 0x74, 0x61, 0x73, 0x6b, 0x4e, 0x61, 0x6d, 0x65, 0x1a, 0x1c, 0x0a, 0x04, 0x46, 0x61, 0x69, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x65, 0x72, 0x72, 0x6f, 0x72, 0x1a, 0x21, 0x0a, 0x07, 0x53, 0x75, 0x63, 0x63, 0x65, 0x65, 0x64, 0x12, 0x16, 0x0a, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x72, 0x65, 0x73, 0x75, 0x6c, 0x74, 0x42, 0x05, 0x0a, 0x03, 0x63, 0x6d, 0x64, 0x22, 0x88, 0x03, 0x0a, 0x13, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x53, 0x0a, 0x07, 0x64, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x37, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x48, 0x00, 0x52, 0x07, 0x64, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x50, 0x0a, 0x06, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x36, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x48, 0x00, 0x52, 0x06, 0x63, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x59, 0x0a, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x2e, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x48, 0x00, 0x52, 0x09, 0x63, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x1a, 0x3f, 0x0a, 0x07, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x61, 0x73, 0x6b, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x04, 0x74, 0x61, 0x73, 0x6b, 0x1a, 0x1a, 0x0a, 0x06, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x12, 0x10, 0x0a, 0x03, 0x65, 0x72, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x65, 0x72, 0x72, 0x1a, 0x0b, 0x0a, 0x09, 0x43, 0x6f, 0x6d, 0x6d, 0x69, 0x74, 0x74, 0x65, 0x64, 0x42, 0x05, 0x0a, 0x03, 0x63, 0x6d, 0x64, 0x22, 0xb1, 0x02, 0x0a, 0x1d, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x07, 0x70, 0x61, 0x79, 0x6c, 0x6f, 0x61, 0x64, 0x12, 0x3a, 0x0a, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x52, 0x75, 0x6e, 0x41, 0x74, 0x22, 0x20, 0x0a, 0x1e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x36, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x22, 0xba, 0x03, 0x0a, 0x08, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x1e, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x1e, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x3a, 0x0a, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x64, 0x75, 0x65, 0x5f, 0x61, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x44, 0x75, 0x65, 0x41, 0x74, 0x12, 0x44, 0x0a, 0x10, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x65, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x0e, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x64, 0x41, 0x74, 0x12, 0x18, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x08, 0x20, 0x01, 0x28, 0x08, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x39, 0x0a, 0x0a, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x73, 0x74, 0x61, 0x72, 0x74, 0x18, 0x09, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x6c, 0x61, 0x73, 0x74, 0x53, 0x74, 0x61, 0x72, 0x74, 0x12, 0x35, 0x0a, 0x08, 0x6c, 0x61, 0x73, 0x74, 0x5f, 0x65, 0x6e, 0x64, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x07, 0x6c, 0x61, 0x73, 0x74, 0x45, 0x6e, 0x64, 0x22, 0x86, 0x01, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x6e, 0x75, 0x6d, 0x5f, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x05, 0x52, 0x0c, 0x6e, 0x75, 0x6d, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x40, 0x0a, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x22, 0x84, 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x65, 0x0a, 0x21, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x40, 0x0a, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x08, 0x73, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x22, 0xf4, 0x02, 0x0a, 0x23, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x34, 0x0a, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x07, 0x65, 0x6e, 0x61, 0x62, 0x6c, 0x65, 0x64, 0x12, 0x3b, 0x0a, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x79, 0x74, 0x65, 0x73, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x70, 0x61, 0x72, 0x61, 0x6d, 0x65, 0x74, 0x65, 0x72, 0x73, 0x12, 0x3c, 0x0a, 0x0a, 0x72, 0x65, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x72, 0x65, 0x63, 0x75, 0x72, 0x72, 0x65, 0x6e, 0x63, 0x65, 0x12, 0x3a, 0x0a, 0x0b, 0x6e, 0x65, 0x78, 0x74, 0x5f, 0x72, 0x75, 0x6e, 0x5f, 0x61, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x6e, 0x65, 0x78, 0x74, 0x52, 0x75, 0x6e, 0x41, 0x74, 0x22, 0x26, 0x0a, 0x24, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x22, 0x84, 0x01, 0x0a, 0x20, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x23, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x23, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x22, 0x7e, 0x0a, 0x21, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x22, 0xf7, 0x01, 0x0a, 0x1c, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x06, 0x64, 0x6f, 0x6d, 0x61, 0x69, 0x6e, 0x12, 0x41, 0x0a, 0x0d, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x41, 0x0a, 0x0d, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x5f, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x53, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0c, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x39, 0x0a, 0x0a, 0x69, 0x73, 0x5f, 0x72, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x42, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x09, 0x69, 0x73, 0x52, 0x75, 0x6e, 0x6e, 0x69, 0x6e, 0x67, 0x22, 0x7a, 0x0a, 0x1d, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x59, 0x0a, 0x11, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x5f, 0x69, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2c, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x52, 0x10, 0x77, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x32, 0x89, 0x0d, 0x0a, 0x0d, 0x43, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x7a, 0x0a, 0x0f, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x32, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x45, 0x6e, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x7e, 0x0a, 0x0f, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x32, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x33, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x8d, 0x01, 0x0a, 0x14, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x12, 0x37, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x38, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x43, 0x68, 0x75, 0x6e, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x77, 0x0a, 0x0e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x31, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x32, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x43, 0x61, 0x6e, 0x63, 0x65, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x71, 0x0a, 0x0c, 0x4b, 0x69, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x12, 0x2f, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x4b, 0x69, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x30, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x4b, 0x69, 0x6c, 0x6c, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x72, 0x0a, 0x0b, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x12, 0x2e, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x2f, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x44, 0x65, 0x71, 0x75, 0x65, 0x75, 0x65, 0x54, 0x61, 0x73, 0x6b, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x28, 0x01, 0x30, 0x01, 0x12, 0x8f, 0x01, 0x0a, 0x16, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x12, 0x39, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3a, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x43, 0x72, 0x65, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x12, 0x38, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x12, 0x98, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0xa1, 0x01, 0x0a, 0x1c, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3f, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x40, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x55, 0x70, 0x64, 0x61, 0x74, 0x65, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x53, 0x63, 0x68, 0x65, 0x64, 0x75, 0x6c, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x98, 0x01, 0x0a, 0x19, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x12, 0x3c, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x3d, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x47, 0x65, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x42, 0x79, 0x4e, 0x61, 0x6d, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x12, 0x8e, 0x01, 0x0a, 0x15, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x12, 0x38, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x39, 0x2e, 0x63, 0x68, 0x65, 0x66, 0x2e, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2e, 0x69, 0x6e, 0x66, 0x72, 0x61, 0x2e, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x2e, 0x4c, 0x69, 0x73, 0x74, 0x57, 0x6f, 0x72, 0x6b, 0x66, 0x6c, 0x6f, 0x77, 0x49, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x63, 0x65, 0x73, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x30, 0x01, 0x42, 0x32, 0x5a, 0x30, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x63, 0x68, 0x65, 0x66, 0x2f, 0x61, 0x75, 0x74, 0x6f, 0x6d, 0x61, 0x74, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x69, 0x6e, 0x74, 0x65, 0x72, 0x73, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2f, 0x63, 0x65, 0x72, 0x65, 0x61, 0x6c, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, } var ( file_interservice_cereal_cereal_proto_rawDescOnce sync.Once file_interservice_cereal_cereal_proto_rawDescData = file_interservice_cereal_cereal_proto_rawDesc ) func file_interservice_cereal_cereal_proto_rawDescGZIP() []byte { file_interservice_cereal_cereal_proto_rawDescOnce.Do(func() { file_interservice_cereal_cereal_proto_rawDescData = protoimpl.X.CompressGZIP(file_interservice_cereal_cereal_proto_rawDescData) }) return file_interservice_cereal_cereal_proto_rawDescData } var file_interservice_cereal_cereal_proto_msgTypes = make([]protoimpl.MessageInfo, 47) var file_interservice_cereal_cereal_proto_goTypes = []interface{}{ (*Task)(nil), // 0: chef.automate.infra.cereal.Task (*TaskMetadata)(nil), // 1: chef.automate.infra.cereal.TaskMetadata (*WorkflowInstance)(nil), // 2: chef.automate.infra.cereal.WorkflowInstance (*TaskResult)(nil), // 3: chef.automate.infra.cereal.TaskResult (*WorkflowEvent)(nil), // 4: chef.automate.infra.cereal.WorkflowEvent (*DequeueWorkflowRequest)(nil), // 5: chef.automate.infra.cereal.DequeueWorkflowRequest (*DequeueWorkflowChunkRequest)(nil), // 6: chef.automate.infra.cereal.DequeueWorkflowChunkRequest (*DequeueWorkflowResponse)(nil), // 7: chef.automate.infra.cereal.DequeueWorkflowResponse (*DequeueWorkflowChunkResponse)(nil), // 8: chef.automate.infra.cereal.DequeueWorkflowChunkResponse (*EnqueueWorkflowRequest)(nil), // 9: chef.automate.infra.cereal.EnqueueWorkflowRequest (*EnqueueWorkflowResponse)(nil), // 10: chef.automate.infra.cereal.EnqueueWorkflowResponse (*CancelWorkflowRequest)(nil), // 11: chef.automate.infra.cereal.CancelWorkflowRequest (*CancelWorkflowResponse)(nil), // 12: chef.automate.infra.cereal.CancelWorkflowResponse (*KillWorkflowRequest)(nil), // 13: chef.automate.infra.cereal.KillWorkflowRequest (*KillWorkflowResponse)(nil), // 14: chef.automate.infra.cereal.KillWorkflowResponse (*DequeueTaskRequest)(nil), // 15: chef.automate.infra.cereal.DequeueTaskRequest (*DequeueTaskResponse)(nil), // 16: chef.automate.infra.cereal.DequeueTaskResponse (*CreateWorkflowScheduleRequest)(nil), // 17: chef.automate.infra.cereal.CreateWorkflowScheduleRequest (*CreateWorkflowScheduleResponse)(nil), // 18: chef.automate.infra.cereal.CreateWorkflowScheduleResponse (*ListWorkflowSchedulesRequest)(nil), // 19: chef.automate.infra.cereal.ListWorkflowSchedulesRequest (*Schedule)(nil), // 20: chef.automate.infra.cereal.Schedule (*ListWorkflowSchedulesResponse)(nil), // 21: chef.automate.infra.cereal.ListWorkflowSchedulesResponse (*GetWorkflowScheduleByNameRequest)(nil), // 22: chef.automate.infra.cereal.GetWorkflowScheduleByNameRequest (*GetWorkflowScheduleByNameResponse)(nil), // 23: chef.automate.infra.cereal.GetWorkflowScheduleByNameResponse (*UpdateWorkflowScheduleByNameRequest)(nil), // 24: chef.automate.infra.cereal.UpdateWorkflowScheduleByNameRequest (*UpdateWorkflowScheduleByNameResponse)(nil), // 25: chef.automate.infra.cereal.UpdateWorkflowScheduleByNameResponse (*GetWorkflowInstanceByNameRequest)(nil), // 26: chef.automate.infra.cereal.GetWorkflowInstanceByNameRequest (*GetWorkflowInstanceByNameResponse)(nil), // 27: chef.automate.infra.cereal.GetWorkflowInstanceByNameResponse (*ListWorkflowInstancesRequest)(nil), // 28: chef.automate.infra.cereal.ListWorkflowInstancesRequest (*ListWorkflowInstancesResponse)(nil), // 29: chef.automate.infra.cereal.ListWorkflowInstancesResponse (*DequeueWorkflowRequest_Dequeue)(nil), // 30: chef.automate.infra.cereal.DequeueWorkflowRequest.Dequeue (*DequeueWorkflowRequest_Continue)(nil), // 31: chef.automate.infra.cereal.DequeueWorkflowRequest.Continue (*DequeueWorkflowRequest_Fail)(nil), // 32: chef.automate.infra.cereal.DequeueWorkflowRequest.Fail (*DequeueWorkflowRequest_Done)(nil), // 33: chef.automate.infra.cereal.DequeueWorkflowRequest.Done (*DequeueWorkflowChunkRequest_Dequeue)(nil), // 34: chef.automate.infra.cereal.DequeueWorkflowChunkRequest.Dequeue (*DequeueWorkflowChunkRequest_Continue)(nil), // 35: chef.automate.infra.cereal.DequeueWorkflowChunkRequest.Continue (*DequeueWorkflowChunkRequest_Fail)(nil), // 36: chef.automate.infra.cereal.DequeueWorkflowChunkRequest.Fail (*DequeueWorkflowChunkRequest_Done)(nil), // 37: chef.automate.infra.cereal.DequeueWorkflowChunkRequest.Done (*DequeueWorkflowResponse_Dequeue)(nil), // 38: chef.automate.infra.cereal.DequeueWorkflowResponse.Dequeue (*DequeueWorkflowResponse_Status)(nil), // 39: chef.automate.infra.cereal.DequeueWorkflowResponse.Status (*DequeueWorkflowResponse_Committed)(nil), // 40: chef.automate.infra.cereal.DequeueWorkflowResponse.Committed (*DequeueTaskRequest_Dequeue)(nil), // 41: chef.automate.infra.cereal.DequeueTaskRequest.Dequeue (*DequeueTaskRequest_Fail)(nil), // 42: chef.automate.infra.cereal.DequeueTaskRequest.Fail (*DequeueTaskRequest_Succeed)(nil), // 43: chef.automate.infra.cereal.DequeueTaskRequest.Succeed (*DequeueTaskResponse_Dequeue)(nil), // 44: chef.automate.infra.cereal.DequeueTaskResponse.Dequeue (*DequeueTaskResponse_Cancel)(nil), // 45: chef.automate.infra.cereal.DequeueTaskResponse.Cancel (*DequeueTaskResponse_Committed)(nil), // 46: chef.automate.infra.cereal.DequeueTaskResponse.Committed (*timestamp.Timestamp)(nil), // 47: google.protobuf.Timestamp (*wrappers.BoolValue)(nil), // 48: google.protobuf.BoolValue (*wrappers.BytesValue)(nil), // 49: google.protobuf.BytesValue (*wrappers.StringValue)(nil), // 50: google.protobuf.StringValue } var file_interservice_cereal_cereal_proto_depIdxs = []int32{ 47, // 0: chef.automate.infra.cereal.Task.start_after:type_name -> google.protobuf.Timestamp 1, // 1: chef.automate.infra.cereal.Task.metadata:type_name -> chef.automate.infra.cereal.TaskMetadata 47, // 2: chef.automate.infra.cereal.TaskMetadata.enqueued_at:type_name -> google.protobuf.Timestamp 3, // 3: chef.automate.infra.cereal.WorkflowEvent.task_result:type_name -> chef.automate.infra.cereal.TaskResult 47, // 4: chef.automate.infra.cereal.WorkflowEvent.enqueued_at:type_name -> google.protobuf.Timestamp 30, // 5: chef.automate.infra.cereal.DequeueWorkflowRequest.dequeue:type_name -> chef.automate.infra.cereal.DequeueWorkflowRequest.Dequeue 31, // 6: chef.automate.infra.cereal.DequeueWorkflowRequest.continue:type_name -> chef.automate.infra.cereal.DequeueWorkflowRequest.Continue 33, // 7: chef.automate.infra.cereal.DequeueWorkflowRequest.done:type_name -> chef.automate.infra.cereal.DequeueWorkflowRequest.Done 32, // 8: chef.automate.infra.cereal.DequeueWorkflowRequest.fail:type_name -> chef.automate.infra.cereal.DequeueWorkflowRequest.Fail 34, // 9: chef.automate.infra.cereal.DequeueWorkflowChunkRequest.dequeue:type_name -> chef.automate.infra.cereal.DequeueWorkflowChunkRequest.Dequeue 35, // 10: chef.automate.infra.cereal.DequeueWorkflowChunkRequest.continue:type_name -> chef.automate.infra.cereal.DequeueWorkflowChunkRequest.Continue 37, // 11: chef.automate.infra.cereal.DequeueWorkflowChunkRequest.done:type_name -> chef.automate.infra.cereal.DequeueWorkflowChunkRequest.Done 36, // 12: chef.automate.infra.cereal.DequeueWorkflowChunkRequest.fail:type_name -> chef.automate.infra.cereal.DequeueWorkflowChunkRequest.Fail 38, // 13: chef.automate.infra.cereal.DequeueWorkflowResponse.dequeue:type_name -> chef.automate.infra.cereal.DequeueWorkflowResponse.Dequeue 39, // 14: chef.automate.infra.cereal.DequeueWorkflowResponse.status:type_name -> chef.automate.infra.cereal.DequeueWorkflowResponse.Status 40, // 15: chef.automate.infra.cereal.DequeueWorkflowResponse.committed:type_name -> chef.automate.infra.cereal.DequeueWorkflowResponse.Committed 41, // 16: chef.automate.infra.cereal.DequeueTaskRequest.dequeue:type_name -> chef.automate.infra.cereal.DequeueTaskRequest.Dequeue 43, // 17: chef.automate.infra.cereal.DequeueTaskRequest.succeed:type_name -> chef.automate.infra.cereal.DequeueTaskRequest.Succeed 42, // 18: chef.automate.infra.cereal.DequeueTaskRequest.fail:type_name -> chef.automate.infra.cereal.DequeueTaskRequest.Fail 44, // 19: chef.automate.infra.cereal.DequeueTaskResponse.dequeue:type_name -> chef.automate.infra.cereal.DequeueTaskResponse.Dequeue 45, // 20: chef.automate.infra.cereal.DequeueTaskResponse.cancel:type_name -> chef.automate.infra.cereal.DequeueTaskResponse.Cancel 46, // 21: chef.automate.infra.cereal.DequeueTaskResponse.committed:type_name -> chef.automate.infra.cereal.DequeueTaskResponse.Committed 47, // 22: chef.automate.infra.cereal.CreateWorkflowScheduleRequest.next_run_at:type_name -> google.protobuf.Timestamp 47, // 23: chef.automate.infra.cereal.Schedule.next_due_at:type_name -> google.protobuf.Timestamp 47, // 24: chef.automate.infra.cereal.Schedule.last_enqueued_at:type_name -> google.protobuf.Timestamp 47, // 25: chef.automate.infra.cereal.Schedule.last_start:type_name -> google.protobuf.Timestamp 47, // 26: chef.automate.infra.cereal.Schedule.last_end:type_name -> google.protobuf.Timestamp 20, // 27: chef.automate.infra.cereal.ListWorkflowSchedulesResponse.schedule:type_name -> chef.automate.infra.cereal.Schedule 20, // 28: chef.automate.infra.cereal.GetWorkflowScheduleByNameResponse.schedule:type_name -> chef.automate.infra.cereal.Schedule 48, // 29: chef.automate.infra.cereal.UpdateWorkflowScheduleByNameRequest.enabled:type_name -> google.protobuf.BoolValue 49, // 30: chef.automate.infra.cereal.UpdateWorkflowScheduleByNameRequest.parameters:type_name -> google.protobuf.BytesValue 50, // 31: chef.automate.infra.cereal.UpdateWorkflowScheduleByNameRequest.recurrence:type_name -> google.protobuf.StringValue 47, // 32: chef.automate.infra.cereal.UpdateWorkflowScheduleByNameRequest.next_run_at:type_name -> google.protobuf.Timestamp 2, // 33: chef.automate.infra.cereal.GetWorkflowInstanceByNameResponse.workflow_instance:type_name -> chef.automate.infra.cereal.WorkflowInstance 50, // 34: chef.automate.infra.cereal.ListWorkflowInstancesRequest.workflow_name:type_name -> google.protobuf.StringValue 50, // 35: chef.automate.infra.cereal.ListWorkflowInstancesRequest.instance_name:type_name -> google.protobuf.StringValue 48, // 36: chef.automate.infra.cereal.ListWorkflowInstancesRequest.is_running:type_name -> google.protobuf.BoolValue 2, // 37: chef.automate.infra.cereal.ListWorkflowInstancesResponse.workflow_instance:type_name -> chef.automate.infra.cereal.WorkflowInstance 0, // 38: chef.automate.infra.cereal.DequeueWorkflowRequest.Continue.tasks:type_name -> chef.automate.infra.cereal.Task 0, // 39: chef.automate.infra.cereal.DequeueWorkflowChunkRequest.Continue.tasks:type_name -> chef.automate.infra.cereal.Task 2, // 40: chef.automate.infra.cereal.DequeueWorkflowResponse.Dequeue.instance:type_name -> chef.automate.infra.cereal.WorkflowInstance 4, // 41: chef.automate.infra.cereal.DequeueWorkflowResponse.Dequeue.event:type_name -> chef.automate.infra.cereal.WorkflowEvent 0, // 42: chef.automate.infra.cereal.DequeueTaskResponse.Dequeue.task:type_name -> chef.automate.infra.cereal.Task 9, // 43: chef.automate.infra.cereal.CerealService.EnqueueWorkflow:input_type -> chef.automate.infra.cereal.EnqueueWorkflowRequest 5, // 44: chef.automate.infra.cereal.CerealService.DequeueWorkflow:input_type -> chef.automate.infra.cereal.DequeueWorkflowRequest 6, // 45: chef.automate.infra.cereal.CerealService.DequeueWorkflowChunk:input_type -> chef.automate.infra.cereal.DequeueWorkflowChunkRequest 11, // 46: chef.automate.infra.cereal.CerealService.CancelWorkflow:input_type -> chef.automate.infra.cereal.CancelWorkflowRequest 13, // 47: chef.automate.infra.cereal.CerealService.KillWorkflow:input_type -> chef.automate.infra.cereal.KillWorkflowRequest 15, // 48: chef.automate.infra.cereal.CerealService.DequeueTask:input_type -> chef.automate.infra.cereal.DequeueTaskRequest 17, // 49: chef.automate.infra.cereal.CerealService.CreateWorkflowSchedule:input_type -> chef.automate.infra.cereal.CreateWorkflowScheduleRequest 19, // 50: chef.automate.infra.cereal.CerealService.ListWorkflowSchedules:input_type -> chef.automate.infra.cereal.ListWorkflowSchedulesRequest 22, // 51: chef.automate.infra.cereal.CerealService.GetWorkflowScheduleByName:input_type -> chef.automate.infra.cereal.GetWorkflowScheduleByNameRequest 24, // 52: chef.automate.infra.cereal.CerealService.UpdateWorkflowScheduleByName:input_type -> chef.automate.infra.cereal.UpdateWorkflowScheduleByNameRequest 26, // 53: chef.automate.infra.cereal.CerealService.GetWorkflowInstanceByName:input_type -> chef.automate.infra.cereal.GetWorkflowInstanceByNameRequest 28, // 54: chef.automate.infra.cereal.CerealService.ListWorkflowInstances:input_type -> chef.automate.infra.cereal.ListWorkflowInstancesRequest 10, // 55: chef.automate.infra.cereal.CerealService.EnqueueWorkflow:output_type -> chef.automate.infra.cereal.EnqueueWorkflowResponse 7, // 56: chef.automate.infra.cereal.CerealService.DequeueWorkflow:output_type -> chef.automate.infra.cereal.DequeueWorkflowResponse 8, // 57: chef.automate.infra.cereal.CerealService.DequeueWorkflowChunk:output_type -> chef.automate.infra.cereal.DequeueWorkflowChunkResponse 12, // 58: chef.automate.infra.cereal.CerealService.CancelWorkflow:output_type -> chef.automate.infra.cereal.CancelWorkflowResponse 14, // 59: chef.automate.infra.cereal.CerealService.KillWorkflow:output_type -> chef.automate.infra.cereal.KillWorkflowResponse 16, // 60: chef.automate.infra.cereal.CerealService.DequeueTask:output_type -> chef.automate.infra.cereal.DequeueTaskResponse 18, // 61: chef.automate.infra.cereal.CerealService.CreateWorkflowSchedule:output_type -> chef.automate.infra.cereal.CreateWorkflowScheduleResponse 21, // 62: chef.automate.infra.cereal.CerealService.ListWorkflowSchedules:output_type -> chef.automate.infra.cereal.ListWorkflowSchedulesResponse 23, // 63: chef.automate.infra.cereal.CerealService.GetWorkflowScheduleByName:output_type -> chef.automate.infra.cereal.GetWorkflowScheduleByNameResponse 25, // 64: chef.automate.infra.cereal.CerealService.UpdateWorkflowScheduleByName:output_type -> chef.automate.infra.cereal.UpdateWorkflowScheduleByNameResponse 27, // 65: chef.automate.infra.cereal.CerealService.GetWorkflowInstanceByName:output_type -> chef.automate.infra.cereal.GetWorkflowInstanceByNameResponse 29, // 66: chef.automate.infra.cereal.CerealService.ListWorkflowInstances:output_type -> chef.automate.infra.cereal.ListWorkflowInstancesResponse 55, // [55:67] is the sub-list for method output_type 43, // [43:55] is the sub-list for method input_type 43, // [43:43] is the sub-list for extension type_name 43, // [43:43] is the sub-list for extension extendee 0, // [0:43] is the sub-list for field type_name } func init() { file_interservice_cereal_cereal_proto_init() } func file_interservice_cereal_cereal_proto_init() { if File_interservice_cereal_cereal_proto != nil { return } if !protoimpl.UnsafeEnabled { file_interservice_cereal_cereal_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Task); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TaskMetadata); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowInstance); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*TaskResult); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*WorkflowEvent); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowChunkRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[7].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[8].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowChunkResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[9].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EnqueueWorkflowRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[10].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*EnqueueWorkflowResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[11].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CancelWorkflowRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[12].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CancelWorkflowResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[13].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*KillWorkflowRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[14].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*KillWorkflowResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[15].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueTaskRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[16].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueTaskResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[17].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateWorkflowScheduleRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[18].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*CreateWorkflowScheduleResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[19].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListWorkflowSchedulesRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[20].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*Schedule); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[21].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListWorkflowSchedulesResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[22].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetWorkflowScheduleByNameRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[23].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetWorkflowScheduleByNameResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[24].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpdateWorkflowScheduleByNameRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[25].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*UpdateWorkflowScheduleByNameResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[26].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetWorkflowInstanceByNameRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[27].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*GetWorkflowInstanceByNameResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[28].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListWorkflowInstancesRequest); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[29].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*ListWorkflowInstancesResponse); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[30].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowRequest_Dequeue); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[31].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowRequest_Continue); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[32].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowRequest_Fail); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[33].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowRequest_Done); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[34].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowChunkRequest_Dequeue); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[35].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowChunkRequest_Continue); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[36].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowChunkRequest_Fail); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[37].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowChunkRequest_Done); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[38].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowResponse_Dequeue); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[39].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowResponse_Status); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[40].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueWorkflowResponse_Committed); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[41].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueTaskRequest_Dequeue); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[42].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueTaskRequest_Fail); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[43].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueTaskRequest_Succeed); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[44].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueTaskResponse_Dequeue); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[45].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueTaskResponse_Cancel); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } file_interservice_cereal_cereal_proto_msgTypes[46].Exporter = func(v interface{}, i int) interface{} { switch v := v.(*DequeueTaskResponse_Committed); i { case 0: return &v.state case 1: return &v.sizeCache case 2: return &v.unknownFields default: return nil } } } file_interservice_cereal_cereal_proto_msgTypes[5].OneofWrappers = []interface{}{ (*DequeueWorkflowRequest_Dequeue_)(nil), (*DequeueWorkflowRequest_Continue_)(nil), (*DequeueWorkflowRequest_Done_)(nil), (*DequeueWorkflowRequest_Fail_)(nil), } file_interservice_cereal_cereal_proto_msgTypes[6].OneofWrappers = []interface{}{ (*DequeueWorkflowChunkRequest_Dequeue_)(nil), (*DequeueWorkflowChunkRequest_Continue_)(nil), (*DequeueWorkflowChunkRequest_Done_)(nil), (*DequeueWorkflowChunkRequest_Fail_)(nil), } file_interservice_cereal_cereal_proto_msgTypes[7].OneofWrappers = []interface{}{ (*DequeueWorkflowResponse_Dequeue_)(nil), (*DequeueWorkflowResponse_Status_)(nil), (*DequeueWorkflowResponse_Committed_)(nil), } file_interservice_cereal_cereal_proto_msgTypes[15].OneofWrappers = []interface{}{ (*DequeueTaskRequest_Dequeue_)(nil), (*DequeueTaskRequest_Succeed_)(nil), (*DequeueTaskRequest_Fail_)(nil), } file_interservice_cereal_cereal_proto_msgTypes[16].OneofWrappers = []interface{}{ (*DequeueTaskResponse_Dequeue_)(nil), (*DequeueTaskResponse_Cancel_)(nil), (*DequeueTaskResponse_Committed_)(nil), } type x struct{} out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), RawDescriptor: file_interservice_cereal_cereal_proto_rawDesc, NumEnums: 0, NumMessages: 47, NumExtensions: 0, NumServices: 1, }, GoTypes: file_interservice_cereal_cereal_proto_goTypes, DependencyIndexes: file_interservice_cereal_cereal_proto_depIdxs, MessageInfos: file_interservice_cereal_cereal_proto_msgTypes, }.Build() File_interservice_cereal_cereal_proto = out.File file_interservice_cereal_cereal_proto_rawDesc = nil file_interservice_cereal_cereal_proto_goTypes = nil file_interservice_cereal_cereal_proto_depIdxs = nil } // Reference imports to suppress errors if they are not otherwise used. var _ context.Context var _ grpc.ClientConnInterface // This is a compile-time assertion to ensure that this generated file // is compatible with the grpc package it is being compiled against. const _ = grpc.SupportPackageIsVersion6 // CerealServiceClient is the client API for CerealService service. // // For semantics around ctx use and closing/ending streaming RPCs, please refer to https://godoc.org/google.golang.org/grpc#ClientConn.NewStream. type CerealServiceClient interface { EnqueueWorkflow(ctx context.Context, in *EnqueueWorkflowRequest, opts ...grpc.CallOption) (*EnqueueWorkflowResponse, error) DequeueWorkflow(ctx context.Context, opts ...grpc.CallOption) (CerealService_DequeueWorkflowClient, error) DequeueWorkflowChunk(ctx context.Context, opts ...grpc.CallOption) (CerealService_DequeueWorkflowChunkClient, error) CancelWorkflow(ctx context.Context, in *CancelWorkflowRequest, opts ...grpc.CallOption) (*CancelWorkflowResponse, error) KillWorkflow(ctx context.Context, in *KillWorkflowRequest, opts ...grpc.CallOption) (*KillWorkflowResponse, error) DequeueTask(ctx context.Context, opts ...grpc.CallOption) (CerealService_DequeueTaskClient, error) CreateWorkflowSchedule(ctx context.Context, in *CreateWorkflowScheduleRequest, opts ...grpc.CallOption) (*CreateWorkflowScheduleResponse, error) ListWorkflowSchedules(ctx context.Context, in *ListWorkflowSchedulesRequest, opts ...grpc.CallOption) (CerealService_ListWorkflowSchedulesClient, error) GetWorkflowScheduleByName(ctx context.Context, in *GetWorkflowScheduleByNameRequest, opts ...grpc.CallOption) (*GetWorkflowScheduleByNameResponse, error) UpdateWorkflowScheduleByName(ctx context.Context, in *UpdateWorkflowScheduleByNameRequest, opts ...grpc.CallOption) (*UpdateWorkflowScheduleByNameResponse, error) GetWorkflowInstanceByName(ctx context.Context, in *GetWorkflowInstanceByNameRequest, opts ...grpc.CallOption) (*GetWorkflowInstanceByNameResponse, error) ListWorkflowInstances(ctx context.Context, in *ListWorkflowInstancesRequest, opts ...grpc.CallOption) (CerealService_ListWorkflowInstancesClient, error) } type cerealServiceClient struct { cc grpc.ClientConnInterface } func NewCerealServiceClient(cc grpc.ClientConnInterface) CerealServiceClient { return &cerealServiceClient{cc} } func (c *cerealServiceClient) EnqueueWorkflow(ctx context.Context, in *EnqueueWorkflowRequest, opts ...grpc.CallOption) (*EnqueueWorkflowResponse, error) { out := new(EnqueueWorkflowResponse) err := c.cc.Invoke(ctx, "/chef.automate.infra.cereal.CerealService/EnqueueWorkflow", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *cerealServiceClient) DequeueWorkflow(ctx context.Context, opts ...grpc.CallOption) (CerealService_DequeueWorkflowClient, error) { stream, err := c.cc.NewStream(ctx, &_CerealService_serviceDesc.Streams[0], "/chef.automate.infra.cereal.CerealService/DequeueWorkflow", opts...) if err != nil { return nil, err } x := &cerealServiceDequeueWorkflowClient{stream} return x, nil } type CerealService_DequeueWorkflowClient interface { Send(*DequeueWorkflowRequest) error Recv() (*DequeueWorkflowResponse, error) grpc.ClientStream } type cerealServiceDequeueWorkflowClient struct { grpc.ClientStream } func (x *cerealServiceDequeueWorkflowClient) Send(m *DequeueWorkflowRequest) error { return x.ClientStream.SendMsg(m) } func (x *cerealServiceDequeueWorkflowClient) Recv() (*DequeueWorkflowResponse, error) { m := new(DequeueWorkflowResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *cerealServiceClient) DequeueWorkflowChunk(ctx context.Context, opts ...grpc.CallOption) (CerealService_DequeueWorkflowChunkClient, error) { stream, err := c.cc.NewStream(ctx, &_CerealService_serviceDesc.Streams[1], "/chef.automate.infra.cereal.CerealService/DequeueWorkflowChunk", opts...) if err != nil { return nil, err } x := &cerealServiceDequeueWorkflowChunkClient{stream} return x, nil } type CerealService_DequeueWorkflowChunkClient interface { Send(*DequeueWorkflowChunkRequest) error Recv() (*DequeueWorkflowChunkResponse, error) grpc.ClientStream } type cerealServiceDequeueWorkflowChunkClient struct { grpc.ClientStream } func (x *cerealServiceDequeueWorkflowChunkClient) Send(m *DequeueWorkflowChunkRequest) error { return x.ClientStream.SendMsg(m) } func (x *cerealServiceDequeueWorkflowChunkClient) Recv() (*DequeueWorkflowChunkResponse, error) { m := new(DequeueWorkflowChunkResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *cerealServiceClient) CancelWorkflow(ctx context.Context, in *CancelWorkflowRequest, opts ...grpc.CallOption) (*CancelWorkflowResponse, error) { out := new(CancelWorkflowResponse) err := c.cc.Invoke(ctx, "/chef.automate.infra.cereal.CerealService/CancelWorkflow", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *cerealServiceClient) KillWorkflow(ctx context.Context, in *KillWorkflowRequest, opts ...grpc.CallOption) (*KillWorkflowResponse, error) { out := new(KillWorkflowResponse) err := c.cc.Invoke(ctx, "/chef.automate.infra.cereal.CerealService/KillWorkflow", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *cerealServiceClient) DequeueTask(ctx context.Context, opts ...grpc.CallOption) (CerealService_DequeueTaskClient, error) { stream, err := c.cc.NewStream(ctx, &_CerealService_serviceDesc.Streams[2], "/chef.automate.infra.cereal.CerealService/DequeueTask", opts...) if err != nil { return nil, err } x := &cerealServiceDequeueTaskClient{stream} return x, nil } type CerealService_DequeueTaskClient interface { Send(*DequeueTaskRequest) error Recv() (*DequeueTaskResponse, error) grpc.ClientStream } type cerealServiceDequeueTaskClient struct { grpc.ClientStream } func (x *cerealServiceDequeueTaskClient) Send(m *DequeueTaskRequest) error { return x.ClientStream.SendMsg(m) } func (x *cerealServiceDequeueTaskClient) Recv() (*DequeueTaskResponse, error) { m := new(DequeueTaskResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *cerealServiceClient) CreateWorkflowSchedule(ctx context.Context, in *CreateWorkflowScheduleRequest, opts ...grpc.CallOption) (*CreateWorkflowScheduleResponse, error) { out := new(CreateWorkflowScheduleResponse) err := c.cc.Invoke(ctx, "/chef.automate.infra.cereal.CerealService/CreateWorkflowSchedule", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *cerealServiceClient) ListWorkflowSchedules(ctx context.Context, in *ListWorkflowSchedulesRequest, opts ...grpc.CallOption) (CerealService_ListWorkflowSchedulesClient, error) { stream, err := c.cc.NewStream(ctx, &_CerealService_serviceDesc.Streams[3], "/chef.automate.infra.cereal.CerealService/ListWorkflowSchedules", opts...) if err != nil { return nil, err } x := &cerealServiceListWorkflowSchedulesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CerealService_ListWorkflowSchedulesClient interface { Recv() (*ListWorkflowSchedulesResponse, error) grpc.ClientStream } type cerealServiceListWorkflowSchedulesClient struct { grpc.ClientStream } func (x *cerealServiceListWorkflowSchedulesClient) Recv() (*ListWorkflowSchedulesResponse, error) { m := new(ListWorkflowSchedulesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func (c *cerealServiceClient) GetWorkflowScheduleByName(ctx context.Context, in *GetWorkflowScheduleByNameRequest, opts ...grpc.CallOption) (*GetWorkflowScheduleByNameResponse, error) { out := new(GetWorkflowScheduleByNameResponse) err := c.cc.Invoke(ctx, "/chef.automate.infra.cereal.CerealService/GetWorkflowScheduleByName", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *cerealServiceClient) UpdateWorkflowScheduleByName(ctx context.Context, in *UpdateWorkflowScheduleByNameRequest, opts ...grpc.CallOption) (*UpdateWorkflowScheduleByNameResponse, error) { out := new(UpdateWorkflowScheduleByNameResponse) err := c.cc.Invoke(ctx, "/chef.automate.infra.cereal.CerealService/UpdateWorkflowScheduleByName", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *cerealServiceClient) GetWorkflowInstanceByName(ctx context.Context, in *GetWorkflowInstanceByNameRequest, opts ...grpc.CallOption) (*GetWorkflowInstanceByNameResponse, error) { out := new(GetWorkflowInstanceByNameResponse) err := c.cc.Invoke(ctx, "/chef.automate.infra.cereal.CerealService/GetWorkflowInstanceByName", in, out, opts...) if err != nil { return nil, err } return out, nil } func (c *cerealServiceClient) ListWorkflowInstances(ctx context.Context, in *ListWorkflowInstancesRequest, opts ...grpc.CallOption) (CerealService_ListWorkflowInstancesClient, error) { stream, err := c.cc.NewStream(ctx, &_CerealService_serviceDesc.Streams[4], "/chef.automate.infra.cereal.CerealService/ListWorkflowInstances", opts...) if err != nil { return nil, err } x := &cerealServiceListWorkflowInstancesClient{stream} if err := x.ClientStream.SendMsg(in); err != nil { return nil, err } if err := x.ClientStream.CloseSend(); err != nil { return nil, err } return x, nil } type CerealService_ListWorkflowInstancesClient interface { Recv() (*ListWorkflowInstancesResponse, error) grpc.ClientStream } type cerealServiceListWorkflowInstancesClient struct { grpc.ClientStream } func (x *cerealServiceListWorkflowInstancesClient) Recv() (*ListWorkflowInstancesResponse, error) { m := new(ListWorkflowInstancesResponse) if err := x.ClientStream.RecvMsg(m); err != nil { return nil, err } return m, nil } // CerealServiceServer is the server API for CerealService service. type CerealServiceServer interface { EnqueueWorkflow(context.Context, *EnqueueWorkflowRequest) (*EnqueueWorkflowResponse, error) DequeueWorkflow(CerealService_DequeueWorkflowServer) error DequeueWorkflowChunk(CerealService_DequeueWorkflowChunkServer) error CancelWorkflow(context.Context, *CancelWorkflowRequest) (*CancelWorkflowResponse, error) KillWorkflow(context.Context, *KillWorkflowRequest) (*KillWorkflowResponse, error) DequeueTask(CerealService_DequeueTaskServer) error CreateWorkflowSchedule(context.Context, *CreateWorkflowScheduleRequest) (*CreateWorkflowScheduleResponse, error) ListWorkflowSchedules(*ListWorkflowSchedulesRequest, CerealService_ListWorkflowSchedulesServer) error GetWorkflowScheduleByName(context.Context, *GetWorkflowScheduleByNameRequest) (*GetWorkflowScheduleByNameResponse, error) UpdateWorkflowScheduleByName(context.Context, *UpdateWorkflowScheduleByNameRequest) (*UpdateWorkflowScheduleByNameResponse, error) GetWorkflowInstanceByName(context.Context, *GetWorkflowInstanceByNameRequest) (*GetWorkflowInstanceByNameResponse, error) ListWorkflowInstances(*ListWorkflowInstancesRequest, CerealService_ListWorkflowInstancesServer) error } // UnimplementedCerealServiceServer can be embedded to have forward compatible implementations. type UnimplementedCerealServiceServer struct { } func (*UnimplementedCerealServiceServer) EnqueueWorkflow(context.Context, *EnqueueWorkflowRequest) (*EnqueueWorkflowResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method EnqueueWorkflow not implemented") } func (*UnimplementedCerealServiceServer) DequeueWorkflow(CerealService_DequeueWorkflowServer) error { return status.Errorf(codes.Unimplemented, "method DequeueWorkflow not implemented") } func (*UnimplementedCerealServiceServer) DequeueWorkflowChunk(CerealService_DequeueWorkflowChunkServer) error { return status.Errorf(codes.Unimplemented, "method DequeueWorkflowChunk not implemented") } func (*UnimplementedCerealServiceServer) CancelWorkflow(context.Context, *CancelWorkflowRequest) (*CancelWorkflowResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CancelWorkflow not implemented") } func (*UnimplementedCerealServiceServer) KillWorkflow(context.Context, *KillWorkflowRequest) (*KillWorkflowResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method KillWorkflow not implemented") } func (*UnimplementedCerealServiceServer) DequeueTask(CerealService_DequeueTaskServer) error { return status.Errorf(codes.Unimplemented, "method DequeueTask not implemented") } func (*UnimplementedCerealServiceServer) CreateWorkflowSchedule(context.Context, *CreateWorkflowScheduleRequest) (*CreateWorkflowScheduleResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method CreateWorkflowSchedule not implemented") } func (*UnimplementedCerealServiceServer) ListWorkflowSchedules(*ListWorkflowSchedulesRequest, CerealService_ListWorkflowSchedulesServer) error { return status.Errorf(codes.Unimplemented, "method ListWorkflowSchedules not implemented") } func (*UnimplementedCerealServiceServer) GetWorkflowScheduleByName(context.Context, *GetWorkflowScheduleByNameRequest) (*GetWorkflowScheduleByNameResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetWorkflowScheduleByName not implemented") } func (*UnimplementedCerealServiceServer) UpdateWorkflowScheduleByName(context.Context, *UpdateWorkflowScheduleByNameRequest) (*UpdateWorkflowScheduleByNameResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method UpdateWorkflowScheduleByName not implemented") } func (*UnimplementedCerealServiceServer) GetWorkflowInstanceByName(context.Context, *GetWorkflowInstanceByNameRequest) (*GetWorkflowInstanceByNameResponse, error) { return nil, status.Errorf(codes.Unimplemented, "method GetWorkflowInstanceByName not implemented") } func (*UnimplementedCerealServiceServer) ListWorkflowInstances(*ListWorkflowInstancesRequest, CerealService_ListWorkflowInstancesServer) error { return status.Errorf(codes.Unimplemented, "method ListWorkflowInstances not implemented") } func RegisterCerealServiceServer(s *grpc.Server, srv CerealServiceServer) { s.RegisterService(&_CerealService_serviceDesc, srv) } func _CerealService_EnqueueWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(EnqueueWorkflowRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CerealServiceServer).EnqueueWorkflow(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/chef.automate.infra.cereal.CerealService/EnqueueWorkflow", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CerealServiceServer).EnqueueWorkflow(ctx, req.(*EnqueueWorkflowRequest)) } return interceptor(ctx, in, info, handler) } func _CerealService_DequeueWorkflow_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(CerealServiceServer).DequeueWorkflow(&cerealServiceDequeueWorkflowServer{stream}) } type CerealService_DequeueWorkflowServer interface { Send(*DequeueWorkflowResponse) error Recv() (*DequeueWorkflowRequest, error) grpc.ServerStream } type cerealServiceDequeueWorkflowServer struct { grpc.ServerStream } func (x *cerealServiceDequeueWorkflowServer) Send(m *DequeueWorkflowResponse) error { return x.ServerStream.SendMsg(m) } func (x *cerealServiceDequeueWorkflowServer) Recv() (*DequeueWorkflowRequest, error) { m := new(DequeueWorkflowRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _CerealService_DequeueWorkflowChunk_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(CerealServiceServer).DequeueWorkflowChunk(&cerealServiceDequeueWorkflowChunkServer{stream}) } type CerealService_DequeueWorkflowChunkServer interface { Send(*DequeueWorkflowChunkResponse) error Recv() (*DequeueWorkflowChunkRequest, error) grpc.ServerStream } type cerealServiceDequeueWorkflowChunkServer struct { grpc.ServerStream } func (x *cerealServiceDequeueWorkflowChunkServer) Send(m *DequeueWorkflowChunkResponse) error { return x.ServerStream.SendMsg(m) } func (x *cerealServiceDequeueWorkflowChunkServer) Recv() (*DequeueWorkflowChunkRequest, error) { m := new(DequeueWorkflowChunkRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _CerealService_CancelWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CancelWorkflowRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CerealServiceServer).CancelWorkflow(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/chef.automate.infra.cereal.CerealService/CancelWorkflow", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CerealServiceServer).CancelWorkflow(ctx, req.(*CancelWorkflowRequest)) } return interceptor(ctx, in, info, handler) } func _CerealService_KillWorkflow_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(KillWorkflowRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CerealServiceServer).KillWorkflow(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/chef.automate.infra.cereal.CerealService/KillWorkflow", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CerealServiceServer).KillWorkflow(ctx, req.(*KillWorkflowRequest)) } return interceptor(ctx, in, info, handler) } func _CerealService_DequeueTask_Handler(srv interface{}, stream grpc.ServerStream) error { return srv.(CerealServiceServer).DequeueTask(&cerealServiceDequeueTaskServer{stream}) } type CerealService_DequeueTaskServer interface { Send(*DequeueTaskResponse) error Recv() (*DequeueTaskRequest, error) grpc.ServerStream } type cerealServiceDequeueTaskServer struct { grpc.ServerStream } func (x *cerealServiceDequeueTaskServer) Send(m *DequeueTaskResponse) error { return x.ServerStream.SendMsg(m) } func (x *cerealServiceDequeueTaskServer) Recv() (*DequeueTaskRequest, error) { m := new(DequeueTaskRequest) if err := x.ServerStream.RecvMsg(m); err != nil { return nil, err } return m, nil } func _CerealService_CreateWorkflowSchedule_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CreateWorkflowScheduleRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CerealServiceServer).CreateWorkflowSchedule(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/chef.automate.infra.cereal.CerealService/CreateWorkflowSchedule", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CerealServiceServer).CreateWorkflowSchedule(ctx, req.(*CreateWorkflowScheduleRequest)) } return interceptor(ctx, in, info, handler) } func _CerealService_ListWorkflowSchedules_Handler(srv interface{}, stream grpc.ServerStream) error { m := new(ListWorkflowSchedulesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CerealServiceServer).ListWorkflowSchedules(m, &cerealServiceListWorkflowSchedulesServer{stream}) } type CerealService_ListWorkflowSchedulesServer interface { Send(*ListWorkflowSchedulesResponse) error grpc.ServerStream } type cerealServiceListWorkflowSchedulesServer struct { grpc.ServerStream } func (x *cerealServiceListWorkflowSchedulesServer) Send(m *ListWorkflowSchedulesResponse) error { return x.ServerStream.SendMsg(m) } func _CerealService_GetWorkflowScheduleByName_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetWorkflowScheduleByNameRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CerealServiceServer).GetWorkflowScheduleByName(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/chef.automate.infra.cereal.CerealService/GetWorkflowScheduleByName", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CerealServiceServer).GetWorkflowScheduleByName(ctx, req.(*GetWorkflowScheduleByNameRequest)) } return interceptor(ctx, in, info, handler) } func _CerealService_UpdateWorkflowScheduleByName_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(UpdateWorkflowScheduleByNameRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CerealServiceServer).UpdateWorkflowScheduleByName(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/chef.automate.infra.cereal.CerealService/UpdateWorkflowScheduleByName", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CerealServiceServer).UpdateWorkflowScheduleByName(ctx, req.(*UpdateWorkflowScheduleByNameRequest)) } return interceptor(ctx, in, info, handler) } func _CerealService_GetWorkflowInstanceByName_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(GetWorkflowInstanceByNameRequest) if err := dec(in); err != nil { return nil, err } if interceptor == nil { return srv.(CerealServiceServer).GetWorkflowInstanceByName(ctx, in) } info := &grpc.UnaryServerInfo{ Server: srv, FullMethod: "/chef.automate.infra.cereal.CerealService/GetWorkflowInstanceByName", } handler := func(ctx context.Context, req interface{}) (interface{}, error) { return srv.(CerealServiceServer).GetWorkflowInstanceByName(ctx, req.(*GetWorkflowInstanceByNameRequest)) } return interceptor(ctx, in, info, handler) } func
(srv interface{}, stream grpc.ServerStream) error { m := new(ListWorkflowInstancesRequest) if err := stream.RecvMsg(m); err != nil { return err } return srv.(CerealServiceServer).ListWorkflowInstances(m, &cerealServiceListWorkflowInstancesServer{stream}) } type CerealService_ListWorkflowInstancesServer interface { Send(*ListWorkflowInstancesResponse) error grpc.ServerStream } type cerealServiceListWorkflowInstancesServer struct { grpc.ServerStream } func (x *cerealServiceListWorkflowInstancesServer) Send(m *ListWorkflowInstancesResponse) error { return x.ServerStream.SendMsg(m) } var _CerealService_serviceDesc = grpc.ServiceDesc{ ServiceName: "chef.automate.infra.cereal.CerealService", HandlerType: (*CerealServiceServer)(nil), Methods: []grpc.MethodDesc{ { MethodName: "EnqueueWorkflow", Handler: _CerealService_EnqueueWorkflow_Handler, }, { MethodName: "CancelWorkflow", Handler: _CerealService_CancelWorkflow_Handler, }, { MethodName: "KillWorkflow", Handler: _CerealService_KillWorkflow_Handler, }, { MethodName: "CreateWorkflowSchedule", Handler: _CerealService_CreateWorkflowSchedule_Handler, }, { MethodName: "GetWorkflowScheduleByName", Handler: _CerealService_GetWorkflowScheduleByName_Handler, }, { MethodName: "UpdateWorkflowScheduleByName", Handler: _CerealService_UpdateWorkflowScheduleByName_Handler, }, { MethodName: "GetWorkflowInstanceByName", Handler: _CerealService_GetWorkflowInstanceByName_Handler, }, }, Streams: []grpc.StreamDesc{ { StreamName: "DequeueWorkflow", Handler: _CerealService_DequeueWorkflow_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "DequeueWorkflowChunk", Handler: _CerealService_DequeueWorkflowChunk_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "DequeueTask", Handler: _CerealService_DequeueTask_Handler, ServerStreams: true, ClientStreams: true, }, { StreamName: "ListWorkflowSchedules", Handler: _CerealService_ListWorkflowSchedules_Handler, ServerStreams: true, }, { StreamName: "ListWorkflowInstances", Handler: _CerealService_ListWorkflowInstances_Handler, ServerStreams: true, }, }, Metadata: "interservice/cereal/cereal.proto", }
_CerealService_ListWorkflowInstances_Handler
backupentry.go
/* Copyright (c) 2021 SAP SE or an SAP affiliate company. All rights reserved. This file is licensed under the Apache Software License, v. 2 except as noted otherwise in the LICENSE file Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Code generated by lister-gen. DO NOT EDIT. package internalversion import ( core "github.com/gardener/gardener/pkg/apis/core" "k8s.io/apimachinery/pkg/api/errors" "k8s.io/apimachinery/pkg/labels" "k8s.io/client-go/tools/cache" ) // BackupEntryLister helps list BackupEntries. type BackupEntryLister interface { // List lists all BackupEntries in the indexer. List(selector labels.Selector) (ret []*core.BackupEntry, err error) // BackupEntries returns an object that can list and get BackupEntries. BackupEntries(namespace string) BackupEntryNamespaceLister BackupEntryListerExpansion } // backupEntryLister implements the BackupEntryLister interface. type backupEntryLister struct { indexer cache.Indexer } // NewBackupEntryLister returns a new BackupEntryLister. func NewBackupEntryLister(indexer cache.Indexer) BackupEntryLister { return &backupEntryLister{indexer: indexer} } // List lists all BackupEntries in the indexer. func (s *backupEntryLister) List(selector labels.Selector) (ret []*core.BackupEntry, err error) { err = cache.ListAll(s.indexer, selector, func(m interface{}) { ret = append(ret, m.(*core.BackupEntry)) }) return ret, err } // BackupEntries returns an object that can list and get BackupEntries. func (s *backupEntryLister) BackupEntries(namespace string) BackupEntryNamespaceLister { return backupEntryNamespaceLister{indexer: s.indexer, namespace: namespace} } // BackupEntryNamespaceLister helps list and get BackupEntries. type BackupEntryNamespaceLister interface { // List lists all BackupEntries in the indexer for a given namespace. List(selector labels.Selector) (ret []*core.BackupEntry, err error) // Get retrieves the BackupEntry from the indexer for a given namespace and name. Get(name string) (*core.BackupEntry, error) BackupEntryNamespaceListerExpansion } // backupEntryNamespaceLister implements the BackupEntryNamespaceLister // interface. type backupEntryNamespaceLister struct { indexer cache.Indexer namespace string } // List lists all BackupEntries in the indexer for a given namespace. func (s backupEntryNamespaceLister) List(selector labels.Selector) (ret []*core.BackupEntry, err error) { err = cache.ListAllByNamespace(s.indexer, s.namespace, selector, func(m interface{}) { ret = append(ret, m.(*core.BackupEntry)) }) return ret, err } // Get retrieves the BackupEntry from the indexer for a given namespace and name. func (s backupEntryNamespaceLister) Get(name string) (*core.BackupEntry, error) { obj, exists, err := s.indexer.GetByKey(s.namespace + "/" + name) if err != nil { return nil, err } if !exists { return nil, errors.NewNotFound(core.Resource("backupentry"), name) } return obj.(*core.BackupEntry), nil }
distributed under the License is distributed on an "AS IS" BASIS,
nrelastic.go
// Copyright 2020 New Relic Corporation. All rights reserved. // SPDX-License-Identifier: Apache-2.0 // Package nrelasticsearch instruments https://github.com/elastic/go-elasticsearch. // // Use this package to instrument your elasticsearch v7 calls without having to // manually create DatastoreSegments. package nrelasticsearch import ( "net/http" "strings" "github.com/Easypay/go-agent/v3/internal" newrelic "github.com/Easypay/go-agent/v3/newrelic" ) func init() { internal.TrackUsage("integration", "datastore", "elasticsearch") } func parseRequest(r *http.Request) (segment newrelic.DatastoreSegment) { segment.StartTime = newrelic.FromContext(r.Context()).StartSegmentNow() segment.Product = newrelic.DatastoreElasticsearch path := strings.TrimPrefix(r.URL.Path, "/") method := r.Method if "" == path { switch method { case "GET": segment.Operation = "info" case "HEAD": segment.Operation = "ping" } return } segments := strings.Split(path, "/") for idx, s := range segments { switch s { case "_alias", "_aliases", "_analyze", "_bulk", "_cache", "_cat", "_clone", "_close", "_cluster", "_count", "_create", "_delete_by_query", "_explain", "_field_caps", "_flush", "_forcemerge", "_ingest", "_mapping", "_mappings", "_mget", "_msearch", "_mtermvectors", "_nodes", "_open", "_rank_eval", "_recovery", "_refresh", "_reindex", "_remote", "_render", "_rollover", "_scripts", "_search_shards", "_segments", "_settings", "_shard_stores", "_shrink", "_snapshot", "_source", "_split", "_stats", "_tasks", "_template", "_termvectors", "_update", "_update_by_query", "_upgrade", "_validate": segment.Operation = strings.TrimPrefix(s, "_") if idx > 0 { segment.Collection = segments[0] } return case "_doc": switch method { case "DELETE": segment.Operation = "delete" case "HEAD": segment.Operation = "exists" case "GET": segment.Operation = "get" case "PUT": segment.Operation = "update" case "POST": segment.Operation = "create" } if idx > 0 { segment.Collection = segments[0] } return case "_search": // clear_scroll.json DELETE /_search/scroll // clear_scroll.json DELETE /_search/scroll/{scroll_id} // scroll.json GET /_search/scroll // scroll.json GET /_search/scroll/{scroll_id} // scroll.json POST /_search/scroll // scroll.json POST /_search/scroll/{scroll_id} // search.json GET /_search // search.json GET /{index}/_search // search.json GET /{index}/{type}/_search // search.json POST /_search // search.json POST /{index}/_search // search.json POST /{index}/{type}/_search // search_template.json GET /_search/template // search_template.json GET /{index}/_search/template // search_template.json GET /{index}/{type}/_search/template // search_template.json POST /_search/template // search_template.json POST /{index}/_search/template // search_template.json POST /{index}/{type}/_search/template if method == "DELETE" { segment.Operation = "clear_scroll" return } if idx == len(segments)-1 { segment.Operation = "search" if idx > 0 { segment.Collection = segments[0] } return } next := segments[idx+1] if next == "scroll" { segment.Operation = "scroll" return } if next == "template" { segment.Operation = "search_template" if idx > 0 { segment.Collection = segments[0] } return } return } } return } type roundtripper struct{ original http.RoundTripper } func (t roundtripper) RoundTrip(r *http.Request) (*http.Response, error) { segment := parseRequest(r) defer segment.End() return t.original.RoundTrip(r) } // NewRoundTripper creates a new http.RoundTripper to instrument elasticsearch // calls. If an http.RoundTripper parameter is not provided, then the returned // http.RoundTripper will delegate to http.DefaultTransport. func
(original http.RoundTripper) http.RoundTripper { if nil == original { original = http.DefaultTransport } return roundtripper{original: original} }
NewRoundTripper
fields.go
// Licensed to Elasticsearch B.V. under one or more contributor // license agreements. See the NOTICE file distributed with // this work for additional information regarding copyright // ownership. Elasticsearch B.V. licenses this file to you under // the Apache License, Version 2.0 (the "License"); you may // not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, // software distributed under the License is distributed on an // "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY // KIND, either express or implied. See the License for the // specific language governing permissions and limitations // under the License. // Code generated by beats/dev-tools/cmd/asset/asset.go - DO NOT EDIT. package apache import ( "github.com/wangjia184/beats/libbeat/asset" ) func init() { if err := asset.SetFields("metricbeat", "apache", asset.ModuleFieldsPri, AssetApache); err != nil { panic(err) } } // AssetApache returns asset data. // This is the base64 encoded gzipped contents of ../metricbeat/module/apache. func AssetApache() string
{ return "eJzMl8GO2zYQhu9+ioHOjZE95OJDgTQB2qKLNtjdoIeiUGhqJBOmOSyHWsNvX5CUVa1XUmzXDKLTwlr9/zfD4Qz5BrZ4WIGwQm5wAeCV17iC4n38oVgAVMjSKesVmRX8uAAASC/hl6enTx+B0T2jgx16pySjZ5CkNUqPFdSOduA3ePxij+vu/5cLAN6Q86UkU6tmBbXQHAgcahSMK2hE+B/0XpmGV/BXwayLH6DYeG+LvxcAtUJd8SoivQEjdjgIJDz+YIOOo9Z2v4wEE54v6bMvIMl4oQxH6C4k8BvhYY8OgaUT9hhXimnZiQxhhkDshW+5/3kMKjwvoz4+E7gROQlfhjxYivQ5WNH0IYyFMQxlQ+zDXy9eHgPa4mFPrjp5NxPAoJCOwstRW09e6FJIiczIo+aaTHOZ81MQBdPu1uiAakjy4PCfFtnzHMl2ffDZOLZKU9RP+6QaBzlilhZdyShHYVgKjVVZaxL+MqiHTh4sOmCUZCY4ImkmiJ9iFs4n6HKSjaLTH8fYk9ui4+W65cONKuP3viaCaO8wa68qPb47/499EJ23b61XE23htM+d4fw5qsUGdeI33p6GJGm4lKNAs9k4gys8j2nYJX1QpqvNV5xfyc3NUcbXRdr2Vovy4dPn61ZEkzgdCnDOvjwzC4HrnkQ1swCMLjNAsIiBTlPwgT3uMnI8RoOw5NMQcqN05dCU3yAnVPd2MT9nUGXP0SlXMpzYO2QMyqA2Puqv2UO95HVbKR4/MnSSdAYZBDy9VoIPRi73ToVDeQaU90F/gAKd1deItoi2FFo95+i0CSpYYAXR5JJkSU2cNVmaGKtpoplGfHUlh44L4hmdaPDiMi7uimv3eHitTFPWQnpyK7h7+/a61A0DgJpcvBlpwR52yrQepxe1ePc907/r+Geqsrj7riO4mwihH6SSHK5JvLpqXl3Mj73i8fp88anTCxe6VPnKGm5x0OvUobXTq+pQVIFg/BZ0C4yH5DBxRX6RDjQdi9Wnt6GbJCTpw0PQn8YIDTvXSPgN0aZRMO1fGS410TZLUXw0DPdRfGYhutFT/jcbMpB8SCbnjURNTZNnGN5PKB+dGyck1q3Wh7JWRvEmD8bPvQ30NtPpCFfrUmoUJkuR/Bpu7p38zKKQDYdvTTm6xh82nLM1zfWLvYhnvLIml7dU/0xGceCcVa5ZT9zLxb8BAAD//2GFvEU=" }
pauli_string_pybind_test.py
# Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import stim import pytest def test_identity(): p = stim.PauliString(3) assert len(p) == 3 assert p[0] == p[1] == p[2] == 0 assert p.sign == +1 def test_from_str(): p = stim.PauliString("-_XYZ_ZYX") assert len(p) == 8 assert p[0] == 0 assert p[1] == 1 assert p[2] == 2 assert p[3] == 3 assert p[4] == 0 assert p[5] == 3 assert p[6] == 2 assert p[7] == 1 assert p.sign == -1 p = stim.PauliString("") assert len(p) == 0 assert p.sign == +1 p = stim.PauliString("X") assert len(p) == 1 assert p[0] == 1 assert p.sign == +1 p = stim.PauliString("+X") assert len(p) == 1 assert p[0] == 1 assert p.sign == +1 p = stim.PauliString("iX") assert len(p) == 1 assert p[0] == 1 assert p.sign == 1j p = stim.PauliString("+iX") assert len(p) == 1 assert p[0] == 1 assert p.sign == 1j p = stim.PauliString("-iX") assert len(p) == 1 assert p[0] == 1 assert p.sign == -1j def test_equality(): assert not (stim.PauliString(4) == None) assert not (stim.PauliString(4) == "other object") assert not (stim.PauliString(4) == object()) assert stim.PauliString(4) != None assert stim.PauliString(4) != "other object" assert stim.PauliString(4) != object() assert stim.PauliString(4) == stim.PauliString(4) assert stim.PauliString(3) != stim.PauliString(4) assert not (stim.PauliString(4) != stim.PauliString(4)) assert not (stim.PauliString(3) == stim.PauliString(4)) assert stim.PauliString("+X") == stim.PauliString("+X") assert stim.PauliString("+X") != stim.PauliString("-X") assert stim.PauliString("+X") != stim.PauliString("+Y") assert stim.PauliString("+X") != stim.PauliString("-Y") assert stim.PauliString("+X") != stim.PauliString("+iX") assert stim.PauliString("+X") != stim.PauliString("-iX") assert stim.PauliString("__") != stim.PauliString("_X") assert stim.PauliString("__") != stim.PauliString("X_") assert stim.PauliString("__") != stim.PauliString("XX") assert stim.PauliString("__") == stim.PauliString("__") def test_random(): p1 = stim.PauliString.random(100) p2 = stim.PauliString.random(100) assert p1 != p2 seen_signs = {stim.PauliString.random(1).sign for _ in range(200)} assert seen_signs == {1, -1} seen_signs = {stim.PauliString.random(1, allow_imaginary=True).sign for _ in range(200)} assert seen_signs == {1, -1, 1j, -1j} def test_str(): assert str(stim.PauliString(3)) == "+___" assert str(stim.PauliString("XYZ")) == "+XYZ" assert str(stim.PauliString("-XYZ")) == "-XYZ" assert str(stim.PauliString("iXYZ")) == "+iXYZ" assert str(stim.PauliString("-iXYZ")) == "-iXYZ" def test_repr(): assert repr(stim.PauliString(3)) == 'stim.PauliString("+___")' assert repr(stim.PauliString("-XYZ")) == 'stim.PauliString("-XYZ")' vs = [ stim.PauliString(""), stim.PauliString("ZXYZZ"), stim.PauliString("-XYZ"), stim.PauliString("I"), stim.PauliString("iIXYZ"), stim.PauliString("-iIXYZ"), ] for v in vs: r = repr(v) assert eval(r, {'stim': stim}) == v def test_commutes(): def c(a: str, b: str) -> bool: return stim.PauliString(a).commutes(stim.PauliString(b)) assert c("", "") assert c("X", "_") assert c("X", "X") assert not c("X", "Y") assert not c("X", "Z") assert c("XXXX", "YYYY") assert c("XXXX", "YYYZ") assert not c("XXXX", "XXXZ") assert not c("XXXX", "___Z") assert not c("XXXX", "Z___") assert c("XXXX", "Z_Z_") def test_product(): assert stim.PauliString("") * stim.PauliString("") == stim.PauliString("") assert stim.PauliString("i") * stim.PauliString("i") == stim.PauliString("-") assert stim.PauliString("i") * stim.PauliString("-i") == stim.PauliString("+") assert stim.PauliString("-i") * stim.PauliString("-i") == stim.PauliString("-") assert stim.PauliString("i") * stim.PauliString("-") == stim.PauliString("-i") x = stim.PauliString("X") y = stim.PauliString("Y") z = stim.PauliString("Z") assert x == +1 * x == x * +1 == +x assert x * -1 == -x == -1 * x assert (-x)[0] == 1 assert (-x).sign == -1 assert -(-x) == x assert stim.PauliString(10) * stim.PauliString(11) == stim.PauliString(11) assert x * z == stim.PauliString("-iY") assert x * x == stim.PauliString(1) assert x * y == stim.PauliString("iZ") assert y * x == stim.PauliString("-iZ") assert x * y == 1j * z assert y * x == z * -1j assert x.extended_product(y) == (1, 1j * z) assert y.extended_product(x) == (1, -1j * z) assert x.extended_product(x) == (1, stim.PauliString(1)) xx = stim.PauliString("+XX") yy = stim.PauliString("+YY") zz = stim.PauliString("+ZZ") assert xx * zz == -yy assert xx.extended_product(zz) == (1, -yy) def
(): p = stim.PauliString("X") alias = p p *= 1j assert alias == stim.PauliString("iX") assert alias is p p *= 1j assert alias == stim.PauliString("-X") p *= 1j assert alias == stim.PauliString("-iX") p *= 1j assert alias == stim.PauliString("+X") p *= stim.PauliString("Z") assert alias == stim.PauliString("-iY") p *= -1j assert alias == stim.PauliString("-Y") p *= -1j assert alias == stim.PauliString("iY") p *= -1j assert alias == stim.PauliString("+Y") p *= -1j assert alias == stim.PauliString("-iY") p *= stim.PauliString("i_") assert alias == stim.PauliString("+Y") p *= stim.PauliString("i_") assert alias == stim.PauliString("iY") p *= stim.PauliString("i_") assert alias == stim.PauliString("-Y") p *= stim.PauliString("i_") assert alias == stim.PauliString("-iY") p *= stim.PauliString("-i_") assert alias == stim.PauliString("-Y") p *= stim.PauliString("-i_") assert alias == stim.PauliString("iY") p *= stim.PauliString("-i_") assert alias == stim.PauliString("+Y") p *= stim.PauliString("-i_") assert alias == stim.PauliString("-iY") assert alias is p def test_imaginary_phase(): p = stim.PauliString("IXYZ") ip = stim.PauliString("iIXYZ") assert 1j * p == p * 1j == ip == -stim.PauliString("-iIXYZ") assert p.sign == 1 assert (-p).sign == -1 assert ip.sign == 1j assert (-ip).sign == -1j assert stim.PauliString("X") * stim.PauliString("Y") == 1j * stim.PauliString("Z") assert stim.PauliString("Y") * stim.PauliString("X") == -1j * stim.PauliString("Z") def test_get_set_sign(): p = stim.PauliString(2) assert p.sign == +1 p.sign = -1 assert str(p) == "-__" assert p.sign == -1 p.sign = +1 assert str(p) == "+__" assert p.sign == +1 with pytest.raises(ValueError, match="new_sign"): p.sign = 5 p.sign = 1j assert str(p) == "+i__" assert p.sign == 1j p.sign = -1j assert str(p) == "-i__" assert p.sign == -1j def test_get_set_item(): p = stim.PauliString(5) assert list(p) == [0, 0, 0, 0, 0] assert p[0] == 0 p[0] = 1 assert p[0] == 1 p[0] = 'Y' assert p[0] == 2 p[0] = 'Z' assert p[0] == 3 with pytest.raises(IndexError, match="new_pauli"): p[0] = 't' with pytest.raises(IndexError, match="new_pauli"): p[0] = 10 assert p[1] == 0 p[1] = 2 assert p[1] == 2 def test_get_slice(): p = stim.PauliString("XXXX__YYYY__ZZZZX") assert p[:7] == stim.PauliString("XXXX__Y") assert p[:-3] == stim.PauliString("XXXX__YYYY__ZZ") assert p[::2] == stim.PauliString("XX_YY_ZZX") assert p[::-1] == stim.PauliString("XZZZZ__YYYY__XXXX") assert p[-3:3] == stim.PauliString("") assert p[-6:-1] == stim.PauliString("_ZZZZ") assert p[3:5:-1] == stim.PauliString("") assert p[5:3:-1] == stim.PauliString("__") assert p[4:2:-1] == stim.PauliString("_X") assert p[2:0:-1] == stim.PauliString("XX") def test_copy(): p = stim.PauliString(3) p2 = p.copy() assert p == p2 assert p is not p2 p = stim.PauliString("-i_XYZ") p2 = p.copy() assert p == p2 assert p is not p2 def test_hash(): # stim.PauliString is mutable. It must not also be value-hashable. # Defining __hash__ requires defining a FrozenPauliString variant instead. with pytest.raises(TypeError, match="unhashable"): _ = hash(stim.PauliString(1)) def test_add(): ps = stim.PauliString assert ps(0) + ps(0) == ps(0) assert ps(3) + ps(1000) == ps(1003) assert ps(1000) + ps(3) == ps(1003) assert ps("_XYZ") + ps("_ZZZ_") == ps("_XYZ_ZZZ_") p = ps("_XYZ") p += p assert p == ps("_XYZ_XYZ") for k in range(1, 8): p += p assert p == ps("_XYZ_XYZ" * 2**k) p = ps("_XXX") p += ps("Y") assert p == ps("_XXXY") p = ps("") alias = p p += ps("X") assert alias is p assert alias == ps("X") p += p assert alias is p assert alias == ps("XX") def test_mul_different_sizes(): ps = stim.PauliString assert ps("") * ps("X" * 1000) == ps("X" * 1000) assert ps("X" * 1000) * ps("") == ps("X" * 1000) assert ps("Z" * 1000) * ps("") == ps("Z" * 1000) p = ps("Z") alias = p p *= ps("ZZZ") assert p == ps("_ZZ") p *= ps("Z") assert p == ps("ZZZ") assert alias is p def test_div(): assert stim.PauliString("+XYZ") / +1 == stim.PauliString("+XYZ") assert stim.PauliString("+XYZ") / -1 == stim.PauliString("-XYZ") assert stim.PauliString("+XYZ") / 1j == stim.PauliString("-iXYZ") assert stim.PauliString("+XYZ") / -1j == stim.PauliString("iXYZ") assert stim.PauliString("iXYZ") / 1j == stim.PauliString("XYZ") p = stim.PauliString("__") alias = p assert p / -1 == stim.PauliString("-__") assert alias == stim.PauliString("__") p /= -1 assert alias == stim.PauliString("-__") p /= 1j assert alias == stim.PauliString("i__") p /= 1j assert alias == stim.PauliString("__") p /= -1j assert alias == stim.PauliString("i__") p /= 1 assert alias == stim.PauliString("i__") def test_mul_repeat(): ps = stim.PauliString assert ps("") * 100 == ps("") assert ps("X") * 100 == ps("X" * 100) assert ps("XYZ_") * 1000 == ps("XYZ_" * 1000) assert ps("XYZ_") * 1 == ps("XYZ_") assert ps("XYZ_") * 0 == ps("") assert 100 * ps("") == ps("") assert 100 * ps("X") == ps("X" * 100) assert 1000 * ps("XYZ_") == ps("XYZ_" * 1000) assert 1 * ps("XYZ_") == ps("XYZ_") assert 0 * ps("XYZ_") == ps("") assert ps("i") * 0 == ps("+") assert ps("i") * 1 == ps("i") assert ps("i") * 2 == ps("-") assert ps("i") * 3 == ps("-i") assert ps("i") * 4 == ps("+") assert ps("i") * 5 == ps("i") assert ps("-i") * 0 == ps("+") assert ps("-i") * 1 == ps("-i") assert ps("-i") * 2 == ps("-") assert ps("-i") * 3 == ps("i") assert ps("-i") * 4 == ps("+") assert ps("-i") * 5 == ps("-i") assert ps("-") * 0 == ps("+") assert ps("-") * 1 == ps("-") assert ps("-") * 2 == ps("+") assert ps("-") * 3 == ps("-") assert ps("-") * 4 == ps("+") assert ps("-") * 5 == ps("-") p = ps("XYZ") alias = p p *= 1000 assert p == ps("XYZ" * 1000) assert alias is p def test_init_list(): assert stim.PauliString([]) == stim.PauliString(0) assert stim.PauliString([0, 1, 2, 3]) == stim.PauliString("_XYZ") with pytest.raises(ValueError, match="pauli"): _ = stim.PauliString([-1]) with pytest.raises(ValueError, match="pauli"): _ = stim.PauliString([4]) with pytest.raises(TypeError): _ = stim.PauliString([2**500]) def test_init_copy(): p = stim.PauliString("_XYZ") p2 = stim.PauliString(p) assert p is not p2 assert p == p2 p = stim.PauliString("-i_XYZ") p2 = stim.PauliString(p) assert p is not p2 assert p == p2 def test_commutes_different_lengths(): x1000 = stim.PauliString("X" * 1000) z1000 = stim.PauliString("Z" * 1000) x1 = stim.PauliString("X") z1 = stim.PauliString("Z") assert x1.commutes(x1000) assert x1000.commutes(x1) assert z1.commutes(z1000) assert z1000.commutes(z1) assert not z1.commutes(x1000) assert not x1000.commutes(z1) assert not x1.commutes(z1000) assert not z1000.commutes(x1) def test_pickle(): import pickle t = stim.PauliString.random(4) a = pickle.dumps(t) assert pickle.loads(a) == t t = stim.PauliString("i_XYZ") a = pickle.dumps(t) assert pickle.loads(a) == t
test_inplace_product
controller.py
import os from fastapi import APIRouter, Depends
from sqlalchemy.orm import Session from starlette.responses import FileResponse from histocat.api.db import get_db from histocat.core.panorama import service router = APIRouter() @router.get("/panoramas/{id}/image", responses={200: {"content": {"image/png": {}}}}) async def read_panorama_image( id: int, # user: User = Depends(get_current_active_user), db: Session = Depends(get_db), ): """ Get panorama image by id """ item = service.get(db, id=id) slide = item.slide return FileResponse( os.path.join( item.slide.location, "origin", f"{slide.name}_s{slide.origin_id}_p{item.origin_id}_pano.png", ), media_type="image/png", )
remote_client_test.go
package cmd_test import ( "context" "errors" "flag" "fmt" "math/big" "net/http" "os" "strconv" "testing" "time" "github.com/pelletier/go-toml" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "github.com/urfave/cli" "gopkg.in/guregu/null.v4" "github.com/smartcontractkit/chainlink/core/auth" "github.com/smartcontractkit/chainlink/core/bridges" evmmocks "github.com/smartcontractkit/chainlink/core/chains/evm/mocks" "github.com/smartcontractkit/chainlink/core/cmd" "github.com/smartcontractkit/chainlink/core/config" "github.com/smartcontractkit/chainlink/core/internal/cltest" "github.com/smartcontractkit/chainlink/core/internal/testutils/configtest" "github.com/smartcontractkit/chainlink/core/internal/testutils/pgtest" "github.com/smartcontractkit/chainlink/core/logger" "github.com/smartcontractkit/chainlink/core/services/job" "github.com/smartcontractkit/chainlink/core/sessions" "github.com/smartcontractkit/chainlink/core/testdata/testspecs" "github.com/smartcontractkit/chainlink/core/web" ) var ( nilContext = cli.NewContext(nil, nil, nil) ) type startOptions struct { // Set the config options SetConfig func(cfg *configtest.TestGeneralConfig) // Use to set up mocks on the app FlagsAndDeps []interface{} // Add a key on start up WithKey bool } func startNewApplication(t *testing.T, setup ...func(opts *startOptions)) *cltest.TestApplication { t.Helper() sopts := &startOptions{ FlagsAndDeps: []interface{}{}, } for _, fn := range setup { fn(sopts) } // Setup config config := cltest.NewTestGeneralConfig(t) config.Overrides.SetDefaultHTTPTimeout(30 * time.Millisecond) // Generally speaking, most tests that use startNewApplication don't // actually need ChainSets loaded. We can greatly reduce test // overhead by disabling EVM here. If you need EVM interactions in // your tests, you can manually override and turn it on using // withConfigSet. config.Overrides.EVMEnabled = null.BoolFrom(false) if sopts.SetConfig != nil { sopts.SetConfig(config) } app := cltest.NewApplicationWithConfigAndKey(t, config, sopts.FlagsAndDeps...) require.NoError(t, app.Start()) return app } // withConfig is a function option which sets config on the app func withConfigSet(cfgSet func(*configtest.TestGeneralConfig)) func(opts *startOptions) { return func(opts *startOptions) { opts.SetConfig = cfgSet } } func withMocks(mks ...interface{}) func(opts *startOptions) { return func(opts *startOptions) { opts.FlagsAndDeps = mks } } func withKey() func(opts *startOptions) { return func(opts *startOptions) { opts.WithKey = true } } func newEthMock(t *testing.T) (*evmmocks.Client, func()) { t.Helper() ethClient, _, assertMocksCalled := cltest.NewEthMocksWithStartupAssertions(t) return ethClient, assertMocksCalled } func newEthMockWithTransactionsOnBlocksAssertions(t *testing.T) (*evmmocks.Client, func()) { t.Helper() ethClient, _, assertMocksCalled := cltest.NewEthMocksWithTransactionsOnBlocksAssertions(t) return ethClient, assertMocksCalled } func keyNameForTest(t *testing.T) string { return fmt.Sprintf("%s_test_key.json", t.Name()) } func deleteKeyExportFile(t *testing.T) { keyName := keyNameForTest(t) err := os.Remove(keyName) if err == nil || os.IsNotExist(err) { return } require.NoError(t, err) } func TestClient_ReplayBlocks(t *testing.T) { t.Parallel() app := startNewApplication(t, withConfigSet(func(c *configtest.TestGeneralConfig) { c.Overrides.EVMEnabled = null.BoolFrom(true) c.Overrides.GlobalEvmNonceAutoSync = null.BoolFrom(false) c.Overrides.GlobalBalanceMonitorEnabled = null.BoolFrom(false) c.Overrides.GlobalGasEstimatorMode = null.StringFrom("FixedPrice") })) client, _ := app.NewClientAndRenderer() set := flag.NewFlagSet("flagset", 0) set.Int64("block-number", 42, "") c := cli.NewContext(nil, set, nil) assert.NoError(t, client.ReplayFromBlock(c)) } func TestClient_CreateExternalInitiator(t *testing.T) { t.Parallel() tests := []struct { name string args []string }{ {"create external initiator", []string{"exi", "http://testing.com/external_initiators"}}, {"create external initiator w/ query params", []string{"exiqueryparams", "http://testing.com/external_initiators?query=param"}}, {"create external initiator w/o url", []string{"exi_no_url"}}, } for _, tt := range tests { test := tt t.Run(test.name, func(t *testing.T) { app := startNewApplication(t) client, _ := app.NewClientAndRenderer() set := flag.NewFlagSet("create", 0) assert.NoError(t, set.Parse(test.args)) c := cli.NewContext(nil, set, nil) err := client.CreateExternalInitiator(c) require.NoError(t, err) var exi bridges.ExternalInitiator err = app.GetSqlxDB().Get(&exi, `SELECT * FROM external_initiators WHERE name = $1`, test.args[0]) require.NoError(t, err) if len(test.args) > 1 { assert.Equal(t, test.args[1], exi.URL.String()) } }) } } func TestClient_CreateExternalInitiator_Errors(t *testing.T) { t.Parallel() tests := []struct { name string args []string }{ {"no arguments", []string{}}, {"too many arguments", []string{"bitcoin", "https://valid.url", "extra arg"}}, {"invalid url", []string{"bitcoin", "not a url"}}, } for _, tt := range tests { test := tt t.Run(test.name, func(t *testing.T) { app := startNewApplication(t) client, _ := app.NewClientAndRenderer() initialExis := len(cltest.AllExternalInitiators(t, app.GetSqlxDB())) set := flag.NewFlagSet("create", 0) assert.NoError(t, set.Parse(test.args)) c := cli.NewContext(nil, set, nil) err := client.CreateExternalInitiator(c) assert.Error(t, err) exis := cltest.AllExternalInitiators(t, app.GetSqlxDB()) assert.Len(t, exis, initialExis) }) } } func TestClient_DestroyExternalInitiator(t *testing.T) { t.Parallel() app := startNewApplication(t) client, r := app.NewClientAndRenderer() token := auth.NewToken() exi, err := bridges.NewExternalInitiator(token, &bridges.ExternalInitiatorRequest{Name: "name"}, ) require.NoError(t, err) err = app.BridgeORM().CreateExternalInitiator(exi) require.NoError(t, err) set := flag.NewFlagSet("test", 0) set.Parse([]string{exi.Name}) c := cli.NewContext(nil, set, nil) assert.NoError(t, client.DeleteExternalInitiator(c)) assert.Empty(t, r.Renders) } func TestClient_DestroyExternalInitiator_NotFound(t *testing.T)
func TestClient_RemoteLogin(t *testing.T) { t.Parallel() app := startNewApplication(t, withConfigSet(func(c *configtest.TestGeneralConfig) { c.Overrides.AdminCredentialsFile = null.StringFrom("") })) tests := []struct { name, file string email, pwd string wantError bool }{ {"success prompt", "", cltest.APIEmail, cltest.Password, false}, {"success file", "../internal/fixtures/apicredentials", "", "", false}, {"failure prompt", "", "[email protected]", "wrongpwd", true}, {"failure file", "/tmp/doesntexist", "", "", true}, {"failure file w correct prompt", "/tmp/doesntexist", cltest.APIEmail, cltest.Password, true}, } for _, test := range tests { t.Run(test.name, func(t *testing.T) { enteredStrings := []string{test.email, test.pwd} prompter := &cltest.MockCountingPrompter{EnteredStrings: enteredStrings} client := app.NewAuthenticatingClient(prompter) set := flag.NewFlagSet("test", 0) set.String("file", test.file, "") c := cli.NewContext(nil, set, nil) err := client.RemoteLogin(c) if test.wantError { assert.Error(t, err) } else { assert.NoError(t, err) } }) } } func TestClient_ChangePassword(t *testing.T) { t.Parallel() app := startNewApplication(t) enteredStrings := []string{cltest.APIEmail, cltest.Password} prompter := &cltest.MockCountingPrompter{EnteredStrings: enteredStrings} client := app.NewAuthenticatingClient(prompter) otherClient := app.NewAuthenticatingClient(prompter) set := flag.NewFlagSet("test", 0) set.String("file", "../internal/fixtures/apicredentials", "") c := cli.NewContext(nil, set, nil) err := client.RemoteLogin(c) require.NoError(t, err) err = otherClient.RemoteLogin(c) require.NoError(t, err) client.ChangePasswordPrompter = cltest.MockChangePasswordPrompter{ UpdatePasswordRequest: web.UpdatePasswordRequest{ OldPassword: cltest.Password, NewPassword: "_p4SsW0rD1!@#", }, } err = client.ChangePassword(cli.NewContext(nil, nil, nil)) assert.NoError(t, err) // otherClient should now be logged out err = otherClient.IndexBridges(c) require.Error(t, err) require.Contains(t, err.Error(), "Unauthorized") } func TestClient_SetDefaultGasPrice(t *testing.T) { t.Parallel() ethMock, assertMocksCalled := newEthMock(t) defer assertMocksCalled() app := startNewApplication(t, withKey(), withMocks(ethMock), withConfigSet(func(c *configtest.TestGeneralConfig) { c.Overrides.EVMEnabled = null.BoolFrom(true) c.Overrides.GlobalEvmNonceAutoSync = null.BoolFrom(false) c.Overrides.GlobalBalanceMonitorEnabled = null.BoolFrom(false) }), ) client, _ := app.NewClientAndRenderer() t.Run("without specifying chain id setting value", func(t *testing.T) { set := flag.NewFlagSet("setgasprice", 0) set.Parse([]string{"8616460799"}) c := cli.NewContext(nil, set, nil) assert.NoError(t, client.SetEvmGasPriceDefault(c)) ch, err := app.GetChains().EVM.Default() require.NoError(t, err) cfg := ch.Config() assert.Equal(t, big.NewInt(8616460799), cfg.EvmGasPriceDefault()) client, _ = app.NewClientAndRenderer() set = flag.NewFlagSet("setgasprice", 0) set.String("amount", "", "") set.Bool("gwei", true, "") set.Parse([]string{"-gwei", "861.6460799"}) c = cli.NewContext(nil, set, nil) assert.NoError(t, client.SetEvmGasPriceDefault(c)) assert.Equal(t, big.NewInt(861646079900), cfg.EvmGasPriceDefault()) }) t.Run("specifying wrong chain id", func(t *testing.T) { set := flag.NewFlagSet("setgasprice", 0) set.String("evmChainID", "", "") set.Parse([]string{"-evmChainID", "985435435435", "8616460799"}) c := cli.NewContext(nil, set, nil) err := client.SetEvmGasPriceDefault(c) assert.Error(t, err) assert.Contains(t, err.Error(), "evmChainID does not match any local chains") ch, err := app.GetChains().EVM.Default() require.NoError(t, err) cfg := ch.Config() assert.Equal(t, big.NewInt(861646079900), cfg.EvmGasPriceDefault()) }) t.Run("specifying correct chain id", func(t *testing.T) { set := flag.NewFlagSet("setgasprice", 0) set.String("evmChainID", "", "") set.Parse([]string{"-evmChainID", "0", "12345678900"}) c := cli.NewContext(nil, set, nil) assert.NoError(t, client.SetEvmGasPriceDefault(c)) ch, err := app.GetChains().EVM.Default() require.NoError(t, err) cfg := ch.Config() assert.Equal(t, big.NewInt(12345678900), cfg.EvmGasPriceDefault()) }) } func TestClient_GetConfiguration(t *testing.T) { t.Parallel() app := startNewApplication(t) client, r := app.NewClientAndRenderer() cfg := app.GetConfig() assert.NoError(t, client.GetConfiguration(cltest.EmptyCLIContext())) require.Equal(t, 1, len(r.Renders)) cp := *r.Renders[0].(*config.ConfigPrinter) assert.Equal(t, cp.EnvPrinter.BridgeResponseURL, cfg.BridgeResponseURL().String()) assert.Equal(t, cp.EnvPrinter.DefaultChainID, cfg.DefaultChainID().String()) assert.Equal(t, cp.EnvPrinter.Dev, cfg.Dev()) assert.Equal(t, cp.EnvPrinter.LogLevel, cfg.LogLevel()) assert.Equal(t, cp.EnvPrinter.LogSQL, cfg.LogSQL()) assert.Equal(t, cp.EnvPrinter.RootDir, cfg.RootDir()) assert.Equal(t, cp.EnvPrinter.SessionTimeout, cfg.SessionTimeout()) } func TestClient_RunOCRJob_HappyPath(t *testing.T) { t.Parallel() app := startNewApplication(t, withConfigSet(func(c *configtest.TestGeneralConfig) { c.Overrides.EVMEnabled = null.BoolFrom(true) c.Overrides.FeatureOffchainReporting = null.BoolFrom(true) c.Overrides.GlobalGasEstimatorMode = null.StringFrom("FixedPrice") })) client, _ := app.NewClientAndRenderer() app.KeyStore.OCR().Add(cltest.DefaultOCRKey) app.KeyStore.P2P().Add(cltest.DefaultP2PKey) _, bridge := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig()) _, bridge2 := cltest.MustCreateBridge(t, app.GetSqlxDB(), cltest.BridgeOpts{}, app.GetConfig()) var jb job.Job ocrspec := testspecs.GenerateOCRSpec(testspecs.OCRSpecParams{DS1BridgeName: bridge.Name.String(), DS2BridgeName: bridge2.Name.String()}) err := toml.Unmarshal([]byte(ocrspec.Toml()), &jb) require.NoError(t, err) var ocrSpec job.OffchainReportingOracleSpec err = toml.Unmarshal([]byte(ocrspec.Toml()), &ocrspec) require.NoError(t, err) jb.OffchainreportingOracleSpec = &ocrSpec key, _ := cltest.MustInsertRandomKey(t, app.KeyStore.Eth()) jb.OffchainreportingOracleSpec.TransmitterAddress = &key.Address err = app.AddJobV2(context.Background(), &jb) require.NoError(t, err) set := flag.NewFlagSet("test", 0) set.Parse([]string{strconv.FormatInt(int64(jb.ID), 10)}) c := cli.NewContext(nil, set, nil) require.NoError(t, client.RemoteLogin(c)) require.NoError(t, client.TriggerPipelineRun(c)) } func TestClient_RunOCRJob_MissingJobID(t *testing.T) { t.Parallel() app := startNewApplication(t) client, _ := app.NewClientAndRenderer() set := flag.NewFlagSet("test", 0) c := cli.NewContext(nil, set, nil) require.NoError(t, client.RemoteLogin(c)) assert.EqualError(t, client.TriggerPipelineRun(c), "Must pass the job id to trigger a run") } func TestClient_RunOCRJob_JobNotFound(t *testing.T) { t.Parallel() app := startNewApplication(t) client, _ := app.NewClientAndRenderer() set := flag.NewFlagSet("test", 0) set.Parse([]string{"1"}) c := cli.NewContext(nil, set, nil) require.NoError(t, client.RemoteLogin(c)) err := client.TriggerPipelineRun(c) assert.Contains(t, err.Error(), "parseResponse error: Error; job ID 1") } func TestClient_AutoLogin(t *testing.T) { t.Parallel() app := startNewApplication(t) user := cltest.MustRandomUser(t) require.NoError(t, app.SessionORM().CreateUser(&user)) sr := sessions.SessionRequest{ Email: user.Email, Password: cltest.Password, } client, _ := app.NewClientAndRenderer() client.CookieAuthenticator = cmd.NewSessionCookieAuthenticator(app.GetConfig(), &cmd.MemoryCookieStore{}, logger.TestLogger(t)) client.HTTP = cmd.NewAuthenticatedHTTPClient(app.Config, client.CookieAuthenticator, sr) fs := flag.NewFlagSet("", flag.ExitOnError) err := client.ListJobs(cli.NewContext(nil, fs, nil)) require.NoError(t, err) // Expire the session and then try again pgtest.MustExec(t, app.GetSqlxDB(), "TRUNCATE sessions") err = client.ListJobs(cli.NewContext(nil, fs, nil)) require.NoError(t, err) } func TestClient_AutoLogin_AuthFails(t *testing.T) { t.Parallel() app := startNewApplication(t) user := cltest.MustRandomUser(t) require.NoError(t, app.SessionORM().CreateUser(&user)) sr := sessions.SessionRequest{ Email: user.Email, Password: cltest.Password, } client, _ := app.NewClientAndRenderer() client.CookieAuthenticator = FailingAuthenticator{} client.HTTP = cmd.NewAuthenticatedHTTPClient(app.Config, client.CookieAuthenticator, sr) fs := flag.NewFlagSet("", flag.ExitOnError) err := client.ListJobs(cli.NewContext(nil, fs, nil)) require.Error(t, err) } type FailingAuthenticator struct{} func (FailingAuthenticator) Cookie() (*http.Cookie, error) { return &http.Cookie{}, nil } // Authenticate retrieves a session ID via a cookie and saves it to disk. func (FailingAuthenticator) Authenticate(sessionRequest sessions.SessionRequest) (*http.Cookie, error) { return nil, errors.New("no luck") } func TestClient_SetLogConfig(t *testing.T) { t.Parallel() app := startNewApplication(t) client, _ := app.NewClientAndRenderer() logLevel := "warn" set := flag.NewFlagSet("loglevel", 0) set.String("level", logLevel, "") c := cli.NewContext(nil, set, nil) err := client.SetLogLevel(c) require.NoError(t, err) assert.Equal(t, logLevel, app.Config.LogLevel().String()) sqlEnabled := true set = flag.NewFlagSet("logsql", 0) set.Bool("enable", sqlEnabled, "") c = cli.NewContext(nil, set, nil) err = client.SetLogSQL(c) assert.NoError(t, err) assert.Equal(t, sqlEnabled, app.Config.LogSQL()) sqlEnabled = false set = flag.NewFlagSet("logsql", 0) set.Bool("disable", true, "") c = cli.NewContext(nil, set, nil) err = client.SetLogSQL(c) assert.NoError(t, err) assert.Equal(t, sqlEnabled, app.Config.LogSQL()) } func TestClient_SetPkgLogLevel(t *testing.T) { t.Parallel() app := startNewApplication(t) client, _ := app.NewClientAndRenderer() logPkg := logger.HeadTracker logLevel := "warn" set := flag.NewFlagSet("logpkg", 0) set.String("pkg", logPkg, "") set.String("level", logLevel, "") c := cli.NewContext(nil, set, nil) err := client.SetLogPkg(c) require.NoError(t, err) level, ok := logger.NewORM(app.GetSqlxDB(), logger.TestLogger(t)).GetServiceLogLevel(logPkg) require.True(t, ok) assert.Equal(t, logLevel, level) }
{ t.Parallel() app := startNewApplication(t) client, r := app.NewClientAndRenderer() set := flag.NewFlagSet("test", 0) set.Parse([]string{"bogus-ID"}) c := cli.NewContext(nil, set, nil) assert.Error(t, client.DeleteExternalInitiator(c)) assert.Empty(t, r.Renders) }
sandbox.rs
// Copyright (c) 2021 Quark Container Authors / 2018 The gVisor Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. use alloc::string::String; use libc::*; use std::{thread, time}; use alloc::str; use lazy_static::lazy_static; use spin::Mutex; use nix::sys::signal; use super::super::super::qlib::*; use super::super::super::qlib::common::*; use super::super::super::qlib::linux_def::*; use super::super::super::qlib::control_msg::*; use super::super::super::ucall::ucall::*; use super::super::super::ucall::ucall_client::*; use super::super::super::vmspace::syscall::*; use super::super::runtime::console::*; use super::super::cgroup::*; use super::super::oci::*; use super::super::container::container::*; use super::super::cmd::config::*; use super::super::runtime::sandbox_process::*; lazy_static! { static ref SIGNAL_STRUCT : Mutex<Option<SignalStruct>> = Mutex::new(None); } extern fn handle_sigint(signal :i32) { if SIGNAL_STRUCT.lock().is_none() { return } error!("exec signal {}", signal); SIGNAL_STRUCT.lock().as_ref().unwrap().SignalProcess(signal).unwrap(); } // numSignals is the number of normal (non-realtime) signals on Linux. pub const NUM_SIGNALS : usize = 32; pub struct SignalStruct { pub cid: String, pub pid: i32, } impl SignalStruct { pub fn New(cid: &str, pid: i32) { let data = Self { cid: cid.to_string(), pid: pid, }; error!("enable forward signal in exec"); unsafe { libc::ioctl( 0, libc::TIOCSCTTY, 0); } *SIGNAL_STRUCT.lock() = Some(data); let sig_action = signal::SigAction::new(signal::SigHandler::Handler(handle_sigint), signal::SaFlags::empty(), signal::SigSet::empty()); for i in 1..NUM_SIGNALS { if i == 9 //SIGKILL || i == 19 { //SIGSTOP continue } unsafe { signal::sigaction(signal::Signal::from_c_int(i as i32).unwrap(), &sig_action) .map_err(|e| Error::Common(format!("sigaction fail with err {:?} for signal {}", e, i))) .unwrap(); } } } pub fn StopSignal() { *SIGNAL_STRUCT.lock() = None; } pub fn SignalProcess(&self, signo: i32) -> Result<()> { return SignalProcess(&self.cid, self.pid, signo, true); } } pub fn SignalProcess(cid: &str, pid: i32, signo: i32, fgProcess: bool) -> Result<()> { info!("Signal sandbox {}", cid); let addr = ControlSocketAddr(cid); info!("SandboxConnect connect address is {}", &addr); let client = UCallClient::Init(&addr)?; let mut mode = SignalDeliveryMode::DeliverToProcess; if fgProcess { mode = SignalDeliveryMode::DeliverToForegroundProcessGroup; } let req = UCallReq::Signal(SignalArgs{ Signo: signo, PID: pid, Mode: mode, }); let resp = client.Call(&req)?; match resp { UCallResp::SignalResp => { return Ok(()) }, resp => { panic!("SignalProcess get unknow resp {:?}", resp); } } } // Sandbox wraps a sandbox process. // // Note: Sandbox must be immutable because a copy of it is saved for each // container and changes would not be synchronized to all of them. #[derive(Serialize, Deserialize, Debug, Default)] pub struct Sandbox { // ID is the id of the sandbox (immutable). By convention, this is the same // ID as the first container run in the sandbox. pub ID: String, // Pid is the pid of the running sandbox (immutable). May be 0 is the sandbox // is not running. pub Pid: i32, // Cgroup has the cgroup configuration for the sandbox. pub Cgroup: Option<Cgroup>, // child is set if a sandbox process is a child of the current process. // // This field isn't saved to json, because only a creator of sandbox // will have it as a child process. #[serde(default, skip_serializing, skip_deserializing)] pub child: bool, // status is an exit status of a sandbox process. #[serde(default, skip_serializing)] pub status: u32, #[serde(default, skip_serializing, skip_deserializing)] pub autoStart: bool, #[serde(default, skip_serializing, skip_deserializing)] pub pivot: bool, #[serde(skip_serializing, skip_deserializing)] pub console: Console, } impl Sandbox { pub fn New(id: &str, action: RunAction, spec: &Spec, conf: &GlobalConfig, bundleDir: &str, consoleSocket: &str, _userlog: &str, cg: Option<Cgroup>, detach: bool, pivot: bool) -> Result<Self> { let mut s = Self { ID: id.to_string(), Cgroup: cg, ..Default::default() }; //let pid = CreateSandboxProcess(conf, id, bundleDir, ptyfd, autoStart)?; let process = &spec.process; let terminal = process.terminal; let process = SandboxProcess::New(conf, action, id, bundleDir, pivot)?; //let pid = process.Fork()?; let (pid, console) = process.Execv(terminal, consoleSocket, detach)?; s.console = console; s.child = true; s.Pid = pid; return Ok(s) } pub fn ForwardSignals(&self, pid: i32) { SignalStruct::New(&self.ID, pid); } pub fn StopSignal(&self) { SignalStruct::StopSignal(); } pub fn GetCwd(&self, buf: u64, size: u64) -> i64 { let nr = SysCallID::sys_getcwd as usize; unsafe { let res = syscall2(nr, buf as usize, size as usize) as i64; return res } } pub fn Pause(&self, cid: &str) -> Result<()> { info!("Pause sandbox {}", cid); let client = self.SandboxConnect()?; let req = UCallReq::Pause; let _resp = client.Call(&req)?; return Ok(()); } pub fn Unpause(&self, cid: &str) -> Result<()> { info!("Unpause sandbox {}", cid); let client = self.SandboxConnect()?; let req = UCallReq::Unpause; let _resp = client.Call(&req)?; return Ok(()); } pub fn Processes(&self, cid: &str) -> Result<Vec<ProcessInfo>> { info!("Getting processes for container {} in sandbox {}", cid, self.ID); let client = self.SandboxConnect()?; let req = UCallReq::Ps(cid.to_string()); let resp = client.Call(&req)?; match resp { UCallResp::PsResp(ps) => Ok(ps), resp => { panic!("Processes get unknow resp {:?}", resp); } } } pub fn StartRootContainer(&self) -> Result<()> { let client = self.SandboxConnect()?; let req = UCallReq::RootContainerStart(RootContainerStart{ cid: self.ID.to_string(), }); let _resp = client.Call(&req)?; return Ok(()); } pub fn Execute(&self, mut args: ExecArgs) -> Result<i32> { info!("Executing new process in container {} in sandbox {}", &args.ContainerID, &self.ID); args.Fds.push(0); args.Fds.push(1); args.Fds.push(2); let client = self.SandboxConnect()?; let req = UCallReq::ExecProcess(args); let pid = match client.Call(&req)? { UCallResp::ExecProcessResp(pid) => pid, resp => panic!("sandbox::Execute get error {:?}", resp), }; return Ok(pid) } pub fn Destroy(&mut self) -> Result<()> { info!("Destroy sandbox {}", &self.ID); if self.Pid != 0 { info!("Killing sandbox {}", &self.ID); let ret = unsafe { kill(self.Pid, SIGKILL) }; if ret < 0 && errno::errno().0 != ESRCH { return Err(Error::Common(format!("killing sandbox {} PID {}: {}", &self.ID, &self.Pid, errno::errno().0))); } return self.WaitForStopped(); } return Ok(()) } pub fn WaitPID(&mut self, _cid: &str, pid: i32, clearStatus: bool) -> Result<u32> { let client = self.SandboxConnect()?; let req = UCallReq::WaitPid(WaitPid{ pid: pid, clearStatus: clearStatus, }); let resp = client.Call(&req)?; match resp { UCallResp::WaitPidResp(status) => { println!("WaitPID status is {}", WaitStatus(status).ExitStatus()); return Ok(status); }, resp => { panic!("WaitPID get unknow resp {:?}", resp); } } } pub fn Wait(&mut self, _cid: &str) -> Result<u32> { match self.SandboxConnect() { Ok(client) => { let req = UCallReq::WaitContainer; match client.Call(&req) { Ok(UCallResp::WaitContainerResp(status)) => { println!("Wait status is {}", WaitStatus(status).ExitStatus()); } Ok(resp) => { println!("wait resp is {:?}", resp); } Err(e) => { println!("wait resp error is {:?}", e); } }; } //the container has exited Err(Error::SysError(SysErr::ECONNREFUSED)) =>{ info!("Wait: connect fail...."); }, Err(e) => return Err(e), } // The sandbox may have already exited, or exited while handling the // Wait RPC. The best we can do is ask Linux what the sandbox exit // status was, since in most cases that will be the same as the // container exit status. self.WaitForStopped()?; return Ok(self.status) } pub fn WaitForStopped(&mut self) -> Result<()> { info!("self child is {}, pid is {}", self.child, self.Pid); let ms = 5 * 1000; //5 sec for _i in 0 .. (ms/10) as usize { if self.child { if self.Pid == 0 { return Ok(()) } // The sandbox process is a child of the current process, // so we can wait it and collect its zombie. //info!("start to wait pid {}", self.Pid); let ret = unsafe { wait4(self.Pid, &mut self.status as * mut _ as * mut i32, WNOHANG, 0 as *mut rusage) }; if ret > 0 { self.Pid = 0; return Ok(()) } if ret < 0 { info!("wait sandbox fail use error {}", errno::errno().0); } } else if self.IsRunning() { continue; } else { return Ok(()) } let ten_millis = time::Duration::from_millis(10); thread::sleep(ten_millis); } return Err(Error::Common(format!("wait sandbox {} timeout", self.ID))); } pub fn SandboxConnect(&self) -> Result<UCallClient> { let addr = ControlSocketAddr(&self.ID); info!("SandboxConnect connect address is {}", &addr); let client = UCallClient::Init(&addr)?; return Ok(client) } // IsRunning returns true if the sandbox is running. pub fn IsRunning(&self) -> bool { if self.Pid != 0 { // Send a signal 0 to the sandbox process. let ret = unsafe { kill(self.Pid, 0) }; if ret == 0 { // Succeeded, process is running. return true; } } return false; } pub fn SignalContainer(&self, _cid: &str, signo: i32, all: bool) -> Result<()> { info!("Signal container sandbox {}", &self.ID); let client = self.SandboxConnect()?; let mut mode = SignalDeliveryMode::DeliverToProcess; if all { mode = SignalDeliveryMode::DeliverToAllProcesses; } let req = UCallReq::Signal(SignalArgs{ Signo: signo, PID: 0, Mode: mode, }); let resp = client.Call(&req)?; match resp { UCallResp::SignalResp => { return Ok(()) }, resp => { panic!("SignalContainer get unknow resp {:?}", resp); } } } pub fn SignalProcess(&self, _cid: &str, pid: i32, signo: i32, fgProcess: bool) -> Result<()> { return SignalProcess(&self.ID, pid, signo, fgProcess) } pub fn DestroyContainer(&self, _cid: &str) -> Result<()> { let client = self.SandboxConnect()?; let req = UCallReq::ContainerDestroy; let resp = client.Call(&req)?; match resp { UCallResp::ContainerDestroyResp => { return Ok(()) }, resp => {
} pub fn IsRootContainer(&self, cid: &str) -> bool { return self.ID.as_str() == cid } }
panic!("DestroyContainer get unknow resp {:?}", resp); } }
wrappers.rs
use std::mem; use std::ops::{Deref, DerefMut}; use core_foundation::base::{mach_port_t, kCFNull, kCFAllocatorDefault, CFType, TCFType}; use core_foundation::dictionary::{CFDictionary, CFMutableDictionary, CFMutableDictionaryRef}; use core_foundation::string::CFString; use mach::{port, mach_port, kern_return, traps}; use crate::Result; use super::{sys}; #[derive(Debug)] pub struct IoMasterPort(mach_port_t); impl IoMasterPort { pub fn
() -> Result<IoMasterPort> { let mut master_port: port::mach_port_t = port::MACH_PORT_NULL; unsafe { kern_try!(sys::IOMasterPort(sys::kIOMasterPortDefault, &mut master_port)); } Ok(IoMasterPort(master_port)) } pub fn get_services(&self) -> Result<IoIterator> { let service = unsafe { let ret = sys::IOServiceMatching(sys::IOPM_SERVICE_NAME); assert_ne!(ret as *const _, kCFNull); ret }; let mut iterator = IoIterator::default(); unsafe { kern_try!(sys::IOServiceGetMatchingServices(self.0, service, &mut *iterator)); } Ok(iterator) } } impl Drop for IoMasterPort { fn drop(&mut self) { let result = unsafe { mach_port::mach_port_deallocate(traps::mach_task_self(), self.0) }; assert_eq!(result, kern_return::KERN_SUCCESS); } } #[derive(Debug)] pub struct IoObject(sys::io_object_t); impl IoObject { /// Returns typed dictionary with this object properties. /// In our case all keys are CFStrings, so there is no need to return /// untyped dict here. pub fn properties(&self) -> Result<CFDictionary<CFString, CFType>> { unsafe { let mut props: CFMutableDictionaryRef = mem::uninitialized(); kern_try!(sys::IORegistryEntryCreateCFProperties(self.0, &mut props, kCFAllocatorDefault, 0)); Ok(CFMutableDictionary::wrap_under_create_rule(props).to_immutable()) } } } impl Drop for IoObject { fn drop(&mut self) { let result = unsafe { sys::IOObjectRelease(self.0) }; assert_eq!(result, kern_return::KERN_SUCCESS); } } #[derive(Debug)] pub struct IoIterator(sys::io_iterator_t); impl Deref for IoIterator { type Target = sys::io_iterator_t; fn deref(&self) -> &Self::Target { &self.0 } } impl DerefMut for IoIterator { fn deref_mut(&mut self) -> &mut Self::Target { &mut self.0 } } impl Iterator for IoIterator { type Item = IoObject; fn next(&mut self) -> Option<Self::Item> { match unsafe { sys::IOIteratorNext(self.0) } { 0 => None, // TODO: Should not there be some `NULL`? io_object => Some(IoObject(io_object)) } } } impl Drop for IoIterator { fn drop(&mut self) { let result = unsafe { sys::IOObjectRelease(self.0) }; assert_eq!(result, kern_return::KERN_SUCCESS); } } impl Default for IoIterator { // It is extremely unsafe and inner field MUST BE initialized // before the further `Drop::drop` call fn default() -> IoIterator { let inner = unsafe { mem::zeroed() }; IoIterator(inner) } }
new
index.js
"use strict"; /* * Copyright (c) 2011 Vinay Pulim <[email protected]> * MIT Licensed * */ /*jshint proto:true*/ exports.__esModule = true; var assert_1 = require("assert"); var debugBuilder = require("debug"); var fs = require("fs"); var _ = require("lodash"); var path = require("path"); var sax = require("sax"); var stripBom = require("strip-bom"); var url = require("url"); var http_1 = require("../http"); var nscontext_1 = require("../nscontext"); var utils_1 = require("../utils"); var elements = require("./elements"); var debug = debugBuilder('node-soap'); var XSI_URI = 'http://www.w3.org/2001/XMLSchema-instance'; function xmlEscape (obj) { if (typeof (obj) === 'string') { if (obj.substr(0, 9) === '<![CDATA[' && obj.substr(-3) === ']]>') { return obj; } return obj .replace(/&/g, '&amp;') .replace(/</g, '&lt;') .replace(/>/g, '&gt;') .replace(/"/g, '&quot;') .replace(/'/g, '&apos;'); } return obj; } var trimLeft = /^[\s\xA0]+/; var trimRight = /[\s\xA0]+$/; function trim (text) { return text.replace(trimLeft, '').replace(trimRight, ''); } function deepMerge (destination, source) { return _.mergeWith(destination, source, function (a, b) { return _.isArray(a) ? a.concat(b) : undefined; }); } function appendColon (ns) { return (ns && ns.charAt(ns.length - 1) !== ':') ? ns + ':' : ns; } function noColonNameSpace (ns) { return (ns && ns.charAt(ns.length - 1) === ':') ? ns.substring(0, ns.length - 1) : ns; } var WSDL = /** @class */ (function () { function WSDL (definition, uri, options) { var _this = this; this.ignoredNamespaces = ['tns', 'targetNamespace', 'typedNamespace']; this.ignoreBaseNameSpaces = false; this.valueKey = '$value'; this.xmlKey = '$xml'; var fromFunc; this.uri = uri; this.callback = function () { }; this._includesWsdl = []; // initialize WSDL cache this.WSDL_CACHE = {}; if (options && options.WSDL_CACHE) { this.WSDL_CACHE = options.WSDL_CACHE; } this._initializeOptions(options); if (typeof definition === 'string') { definition = stripBom(definition); fromFunc = this._fromXML; } else if (typeof definition === 'object') { fromFunc = this._fromServices; } else { throw new Error('WSDL constructor takes either an XML string or service definition'); } process.nextTick(function () { try { fromFunc.call(_this, definition); } catch (e) { return _this.callback(e); } _this.processIncludes(function (err) { var name; if (err) { return _this.callback(err); } _this.definitions.deleteFixedAttrs(); var services = _this.services = _this.definitions.services; if (services) { for (name in services) { services[name].postProcess(_this.definitions); } } var complexTypes = _this.definitions.complexTypes; if (complexTypes) { for (name in complexTypes) { complexTypes[name].deleteFixedAttrs(); } } // for document style, for every binding, prepare input message element name to (methodName, output message element name) mapping var bindings = _this.definitions.bindings; for (var bindingName in bindings) { var binding = bindings[bindingName]; if (typeof binding.style === 'undefined') { binding.style = 'document'; } if (binding.style !== 'document') { continue; } var methods = binding.methods; var topEls = binding.topElements = {}; for (var methodName in methods) { if (methods[methodName].input) { var inputName = methods[methodName].input.$name; var outputName = ''; if (methods[methodName].output) { outputName = methods[methodName].output.$name; } topEls[inputName] = { methodName: methodName, outputName: outputName }; } } } // prepare soap envelope xmlns definition string _this.xmlnsInEnvelope = _this._xmlnsMap(); _this.callback(err, _this); }); }); } WSDL.prototype.onReady = function (callback) { if (callback) { this.callback = callback; } }; WSDL.prototype.processIncludes = function (callback) { var schemas = this.definitions.schemas; var includes = []; for (var ns in schemas) { var schema = schemas[ns]; includes = includes.concat(schema.includes || []); } this._processNextInclude(includes, callback); }; WSDL.prototype.describeServices = function () { var services = {}; for (var name_1 in this.services) { var service = this.services[name_1]; services[name_1] = service.description(this.definitions); } return services; }; WSDL.prototype.toXML = function () { return this.xml || ''; }; WSDL.prototype.xmlToObject = function (xml, callback) { var _this = this; var p = typeof callback === 'function' ? {} : sax.parser(true, null); var objectName = null; var root = {}; var schema = { Envelope: { Header: { Security: { UsernameToken: { Username: 'string', Password: 'string' } } }, Body: { Fault: { faultcode: 'string', faultstring: 'string', detail: 'string' } } } }; var stack = [{ name: null, object: root, schema: schema }]; var xmlns = {}; var refs = {}; var id; // {id:{hrefs:[],obj:}, ...} p.onopentag = function (node) { var nsName = node.name; var attrs = node.attributes; var name = utils_1.splitQName(nsName).name; var attributeName; var top = stack[stack.length - 1]; var topSchema = top.schema; var elementAttributes = {}; var hasNonXmlnsAttribute = false; var hasNilAttribute = false; var obj = {}; var originalName = name; if (!objectName && top.name === 'Body' && name !== 'Fault') { var message = _this.definitions.messages[name]; // Support RPC/literal messages where response body contains one element named // after the operation + 'Response'. See http://www.w3.org/TR/wsdl#_names if (!message) { try { // Determine if this is request or response var isInput = false; var isOutput = false; if ((/Response$/).test(name)) { isOutput = true; name = name.replace(/Response$/, ''); } else if ((/Request$/).test(name)) { isInput = true; name = name.replace(/Request$/, ''); } else if ((/Solicit$/).test(name)) { isInput = true; name = name.replace(/Solicit$/, ''); } // Look up the appropriate message as given in the portType's operations var portTypes = _this.definitions.portTypes; var portTypeNames = Object.keys(portTypes); // Currently this supports only one portType definition. var portType = portTypes[portTypeNames[0]]; if (isInput) { name = portType.methods[name].input.$name; } else { name = portType.methods[name].output.$name; } message = _this.definitions.messages[name]; // 'cache' this alias to speed future lookups _this.definitions.messages[originalName] = _this.definitions.messages[name]; } catch (e) { if (_this.options.returnFault) { p.onerror(e); } } } topSchema = message.description(_this.definitions); objectName = originalName; } if (attrs.href) { id = attrs.href.substr(1); if (!refs[id]) { refs[id] = { hrefs: [], obj: null }; } refs[id].hrefs.push({ par: top.object, key: name, obj: obj }); } if (id = attrs.id) { if (!refs[id]) { refs[id] = { hrefs: [], obj: null }; } } // Handle element attributes for (attributeName in attrs) { if (/^xmlns:|^xmlns$/.test(attributeName)) { xmlns[utils_1.splitQName(attributeName).name] = attrs[attributeName]; continue; } hasNonXmlnsAttribute = true; elementAttributes[attributeName] = attrs[attributeName]; } for (attributeName in elementAttributes) { var res = utils_1.splitQName(attributeName); if (res.name === 'nil' && xmlns[res.prefix] === XSI_URI && elementAttributes[attributeName] && (elementAttributes[attributeName].toLowerCase() === 'true' || elementAttributes[attributeName] === '1')) { hasNilAttribute = true; break; } } if (hasNonXmlnsAttribute) { obj[_this.options.attributesKey] = elementAttributes; } // Pick up the schema for the type specified in element's xsi:type attribute. var xsiTypeSchema; var xsiType; for (var prefix in xmlns) { if (xmlns[prefix] === XSI_URI && (prefix + ":type" in elementAttributes)) { xsiType = elementAttributes[prefix + ":type"]; break; } } if (xsiType) { var type = utils_1.splitQName(xsiType); var typeURI = void 0; if (type.prefix === utils_1.TNS_PREFIX) { // In case of xsi:type = "MyType" typeURI = xmlns[type.prefix] || xmlns.xmlns; } else { typeURI = xmlns[type.prefix]; } var typeDef = _this.findSchemaObject(typeURI, type.name); if (typeDef) { xsiTypeSchema = typeDef.description(_this.definitions); } } if (topSchema && topSchema[name + '[]']) { name = name + '[]'; } stack.push({ name: originalName, object: obj, schema: (xsiTypeSchema || (topSchema && topSchema[name])), id: attrs.id, nil: hasNilAttribute }); }; p.onclosetag = function (nsName) { var cur = stack.pop(); var obj = cur.object; var top = stack[stack.length - 1]; var topObject = top.object; var topSchema = top.schema; var name = utils_1.splitQName(nsName).name; if (typeof cur.schema === 'string' && (cur.schema === 'string' || cur.schema.split(':')[1] === 'string')) { if (typeof obj === 'object' && Object.keys(obj).length === 0) { obj = cur.object = ''; } } if (cur.nil === true) { if (_this.options.handleNilAsNull) { obj = null; } else { return; } } if (_.isPlainObject(obj) && !Object.keys(obj).length) { obj = null; } if (topSchema && topSchema[name + '[]']) { if (!topObject[name]) { topObject[name] = []; } topObject[name].push(obj); } else if (name in topObject) { if (!Array.isArray(topObject[name])) { topObject[name] = [topObject[name]]; } topObject[name].push(obj); } else { topObject[name] = obj; } if (cur.id) { refs[cur.id].obj = obj; } }; p.oncdata = function (text) { var originalText = text; text = trim(text); if (!text.length) { return; } if (/<\?xml[\s\S]+\?>/.test(text)) { var top_1 = stack[stack.length - 1]; var value = _this.xmlToObject(text); if (top_1.object[_this.options.attributesKey]) { top_1.object[_this.options.valueKey] = value; } else { top_1.object = value; } } else { p.ontext(originalText); } }; p.onerror = function (e) { p.resume(); throw { Fault: { faultcode: 500, faultstring: 'Invalid XML', detail: new Error(e).message, statusCode: 500 } }; }; p.ontext = function (text) { var originalText = text; text = trim(text); if (!text.length) { return; } var top = stack[stack.length - 1]; var name = utils_1.splitQName(top.schema).name; var value; if (_this.options && _this.options.customDeserializer && _this.options.customDeserializer[name]) { value = _this.options.customDeserializer[name](text, top); } else { if (name === 'int' || name === 'integer') { value = parseInt(text, 10); } else if (name === 'double' || name === 'float') { value = Number(text); } else if (name === 'bool' || name === 'boolean') { value = text.toLowerCase() === 'true' || text === '1'; } else if (name === 'dateTime' || name === 'date') { value = new Date(text); } else { if (_this.options.preserveWhitespace) { text = originalText; } // handle string or other types if (typeof top.object !== 'string') { value = text; } else { value = top.object + text; } } } if (top.object[_this.options.attributesKey]) { top.object[_this.options.valueKey] = value; } else { top.object = value; } }; if (typeof callback === 'function') { // we be streaming var saxStream = sax.createStream(true, null); saxStream.on('opentag', p.onopentag); saxStream.on('closetag', p.onclosetag); saxStream.on('cdata', p.oncdata); saxStream.on('text', p.ontext); xml.pipe(saxStream) .on('error', function (err) { callback(err); }) .on('end', function () { var r; try { r = finish(); } catch (e) { return callback(e); } callback(null, r); }); return; } p.write(xml).close(); return finish(); function finish () { // MultiRef support: merge objects instead of replacing for (var n in refs) { var ref = refs[n]; for (var _i = 0, _a = ref.hrefs; _i < _a.length; _i++) { var href = _a[_i]; _.assign(href.obj, ref.obj); } } if (root.Envelope) { var body = root.Envelope.Body; if (body && body.Fault) { var code = body.Fault.faultcode && body.Fault.faultcode.$value; var string = body.Fault.faultstring && body.Fault.faultstring.$value; var detail = body.Fault.detail && body.Fault.detail.$value; code = code || body.Fault.faultcode; string = string || body.Fault.faultstring; detail = detail || body.Fault.detail; var error = new Error(code + ': ' + string + (detail ? ': ' + JSON.stringify(detail) : '')); error.root = root; throw error; } return root.Envelope; } return root; } }; /** * Look up a XSD type or element by namespace URI and name * @param {String} nsURI Namespace URI * @param {String} qname Local or qualified name * @returns {*} The XSD type/element definition */ WSDL.prototype.findSchemaObject = function (nsURI, qname) { if (!nsURI || !qname) { return null; } var def = null; if (this.definitions.schemas) { var schema = this.definitions.schemas[nsURI]; if (schema) { if (qname.indexOf(':') !== -1) { qname = qname.substring(qname.indexOf(':') + 1, qname.length); } // if the client passed an input element which has a `$lookupType` property instead of `$type` // the `def` is found in `schema.elements`. def = schema.complexTypes[qname] || schema.types[qname] || schema.elements[qname]; } } return def; }; /** * Create document style xml string from the parameters * @param {String} name * @param {*} params * @param {String} nsPrefix * @param {String} nsURI * @param {String} type */ WSDL.prototype.objectToDocumentXML = function (name, params, nsPrefix, nsURI, type) { // If user supplies XML already, just use that. XML Declaration should not be present. if (params && params._xml) { return params._xml; } var args = {}; args[name] = params; var parameterTypeObj = type ? this.findSchemaObject(nsURI, type) : null; return this.objectToXML(args, null, nsPrefix, nsURI, true, null, parameterTypeObj); }; /** * Create RPC style xml string from the parameters * @param {String} name * @param {*} params * @param {String} nsPrefix * @param {String} nsURI * @returns {string} */ WSDL.prototype.objectToRpcXML = function (name, params, nsPrefix, nsURI, isParts) { var parts = []; var defs = this.definitions; var nsAttrName = '_xmlns'; nsPrefix = nsPrefix || utils_1.findPrefix(defs.xmlns, nsURI); nsURI = nsURI || defs.xmlns[nsPrefix]; nsPrefix = nsPrefix === utils_1.TNS_PREFIX ? '' : (nsPrefix + ':'); parts.push(['<', nsPrefix, name, '>'].join('')); for (var key in params) { if (!params.hasOwnProperty(key)) { continue; } if (key !== nsAttrName) { var value = params[key]; var prefixedKey = (isParts ? '' : nsPrefix) + key; var attributes = []; if (typeof value === 'object' && value.hasOwnProperty(this.options.attributesKey)) { var attrs = value[this.options.attributesKey]; for (var n in attrs) { attributes.push(' ' + n + '=' + '"' + attrs[n] + '"'); } } parts.push(['<', prefixedKey].concat(attributes).concat('>').join('')); parts.push((typeof value === 'object') ? this.objectToXML(value, key, nsPrefix, nsURI) : xmlEscape(value)); parts.push(['</', prefixedKey, '>'].join('')); } } parts.push(['</', nsPrefix, name, '>'].join('')); return parts.join(''); }; WSDL.prototype.isIgnoredNameSpace = function (ns) { return this.options.ignoredNamespaces.indexOf(ns) > -1; }; WSDL.prototype.filterOutIgnoredNameSpace = function (ns) { var namespace = noColonNameSpace(ns); return this.isIgnoredNameSpace(namespace) ? '' : namespace; }; /** * Convert an object to XML. This is a recursive method as it calls itself. * * @param {Object} obj the object to convert. * @param {String} name the name of the element (if the object being traversed is * an element). * @param {String} nsPrefix the namespace prefix of the object I.E. xsd. * @param {String} nsURI the full namespace of the object I.E. http://w3.org/schema. * @param {Boolean} isFirst whether or not this is the first item being traversed. * @param {?} xmlnsAttr * @param {?} parameterTypeObject * @param {NamespaceContext} nsContext Namespace context */ WSDL.prototype.objectToXML = function (obj, name, nsPrefix, nsURI, isFirst, xmlnsAttr, schemaObject, nsContext) { var schema = this.definitions.schemas[nsURI]; var parentNsPrefix = nsPrefix ? nsPrefix.parent : undefined; if (typeof parentNsPrefix !== 'undefined') { // we got the parentNsPrefix for our array. setting the namespace-variable back to the current namespace string nsPrefix = nsPrefix.current; } parentNsPrefix = noColonNameSpace(parentNsPrefix); if (this.isIgnoredNameSpace(parentNsPrefix)) { parentNsPrefix = ''; } var soapHeader = !schema; var qualified = schema && schema.$elementFormDefault === 'qualified'; var parts = []; var prefixNamespace = (nsPrefix || qualified) && nsPrefix !== utils_1.TNS_PREFIX; var xmlnsAttrib = ''; if (nsURI && isFirst) { if (this.options.overrideRootElement && this.options.overrideRootElement.xmlnsAttributes) { this.options.overrideRootElement.xmlnsAttributes.forEach(function (attribute) { xmlnsAttrib += ' ' + attribute.name + '="' + attribute.value + '"'; }); } else { if (prefixNamespace && !this.isIgnoredNameSpace(nsPrefix)) { // resolve the prefix namespace xmlnsAttrib += ' xmlns:' + nsPrefix + '="' + nsURI + '"'; } // only add default namespace if the schema elementFormDefault is qualified if (qualified || soapHeader) { xmlnsAttrib += ' xmlns="' + nsURI + '"'; } } } if (!nsContext) { nsContext = new nscontext_1.NamespaceContext(); nsContext.declareNamespace(nsPrefix, nsURI); } else { nsContext.pushContext(); } // explicitly use xmlns attribute if available if (xmlnsAttr && !(this.options.overrideRootElement && this.options.overrideRootElement.xmlnsAttributes)) { xmlnsAttrib = xmlnsAttr; } var ns = ''; if (this.options.overrideRootElement && isFirst) { ns = this.options.overrideRootElement.namespace; } else if (prefixNamespace && (qualified || isFirst || soapHeader) && !this.isIgnoredNameSpace(nsPrefix)) { ns = nsPrefix; } var i; var n; // start building out XML string. if (Array.isArray(obj)) { var nonSubNameSpace = ''; var emptyNonSubNameSpaceForArray = false; var nameWithNsRegex = /^([^:]+):([^:]+)$/.exec(name); if (nameWithNsRegex) { nonSubNameSpace = nameWithNsRegex[1]; name = nameWithNsRegex[2]; } else if (name[0] === ':') { emptyNonSubNameSpaceForArray = true; name = name.substr(1); } for (i = 0, n = obj.length; i < n; i++) { var item = obj[i]; var arrayAttr = this.processAttributes(item, nsContext); var correctOuterNsPrefix = nonSubNameSpace || parentNsPrefix || ns; // using the parent namespace prefix if given var body = this.objectToXML(item, name, nsPrefix, nsURI, false, null, schemaObject, nsContext); var openingTagParts = ['<', name, arrayAttr, xmlnsAttrib]; if (!emptyNonSubNameSpaceForArray) { openingTagParts = ['<', appendColon(correctOuterNsPrefix), name, arrayAttr, xmlnsAttrib]; } if (body === '' && this.options.useEmptyTag) { // Use empty (self-closing) tags if no contents openingTagParts.push(' />'); parts.push(openingTagParts.join('')); } else { openingTagParts.push('>'); if (this.options.namespaceArrayElements || i === 0) { parts.push(openingTagParts.join('')); } parts.push(body); if (this.options.namespaceArrayElements || i === n - 1) { if (emptyNonSubNameSpaceForArray) { parts.push(['</', name, '>'].join('')); } else { parts.push(['</', appendColon(correctOuterNsPrefix), name, '>'].join('')); } } } } } else if (typeof obj === 'object') { for (name in obj) { if (!obj.hasOwnProperty(name)) { continue; } // don't process attributes as element if (name === this.options.attributesKey) { continue; } // Its the value of a xml object. Return it directly. if (name === this.options.xmlKey) { nsContext.popContext(); return obj[name]; }
if (name === this.options.valueKey) { nsContext.popContext(); return xmlEscape(obj[name]); } var child = obj[name]; if (typeof child === 'undefined') { continue; } var attr = this.processAttributes(child, nsContext); var value = ''; var nonSubNameSpace = ''; var emptyNonSubNameSpace = false; var nameWithNsRegex = /^([^:]+):([^:]+)$/.exec(name); if (nameWithNsRegex) { nonSubNameSpace = nameWithNsRegex[1] + ':'; name = nameWithNsRegex[2]; } else if (name[0] === ':') { emptyNonSubNameSpace = true; name = name.substr(1); } if (isFirst) { value = this.objectToXML(child, name, nsPrefix, nsURI, false, null, schemaObject, nsContext); } else { if (this.definitions.schemas) { if (schema) { var childSchemaObject = this.findChildSchemaObject(schemaObject, name); // find sub namespace if not a primitive if (childSchemaObject && ((childSchemaObject.$type && (childSchemaObject.$type.indexOf('xsd:') === -1)) || childSchemaObject.$ref || childSchemaObject.$name)) { /*if the base name space of the children is not in the ingoredSchemaNamspaces we use it. This is because in some services the child nodes do not need the baseNameSpace. */ var childNsPrefix = ''; var childName = ''; var childNsURI = void 0; var childXmlnsAttrib = ''; var elementQName = childSchemaObject.$ref || childSchemaObject.$name; if (elementQName) { elementQName = utils_1.splitQName(elementQName); childName = elementQName.name; if (elementQName.prefix === utils_1.TNS_PREFIX) { // Local element childNsURI = childSchemaObject.$targetNamespace; childNsPrefix = nsContext.registerNamespace(childNsURI); if (this.isIgnoredNameSpace(childNsPrefix)) { childNsPrefix = nsPrefix; } } else { childNsPrefix = elementQName.prefix; if (this.isIgnoredNameSpace(childNsPrefix)) { childNsPrefix = nsPrefix; } childNsURI = schema.xmlns[childNsPrefix] || this.definitions.xmlns[childNsPrefix]; } var unqualified = false; // Check qualification form for local elements if (childSchemaObject.$name && childSchemaObject.targetNamespace === undefined) { if (childSchemaObject.$form === 'unqualified') { unqualified = true; } else if (childSchemaObject.$form === 'qualified') { unqualified = false; } else { unqualified = schema.$elementFormDefault !== 'qualified'; } } if (unqualified) { childNsPrefix = ''; } if (childNsURI && childNsPrefix) { if (nsContext.declareNamespace(childNsPrefix, childNsURI)) { childXmlnsAttrib = ' xmlns:' + childNsPrefix + '="' + childNsURI + '"'; xmlnsAttrib += childXmlnsAttrib; } } } var resolvedChildSchemaObject = void 0; if (childSchemaObject.$type) { var typeQName = utils_1.splitQName(childSchemaObject.$type); var typePrefix = typeQName.prefix; var typeURI = schema.xmlns[typePrefix] || this.definitions.xmlns[typePrefix]; childNsURI = typeURI; if (typeURI !== 'http://www.w3.org/2001/XMLSchema' && typePrefix !== utils_1.TNS_PREFIX) { // Add the prefix/namespace mapping, but not declare it nsContext.addNamespace(typePrefix, typeURI); } resolvedChildSchemaObject = this.findSchemaType(typeQName.name, typeURI) || childSchemaObject; } else { resolvedChildSchemaObject = this.findSchemaObject(childNsURI, childName) || childSchemaObject; } if (childSchemaObject.$baseNameSpace && this.options.ignoreBaseNameSpaces) { childNsPrefix = nsPrefix; childNsURI = nsURI; } if (this.options.ignoreBaseNameSpaces) { childNsPrefix = ''; childNsURI = ''; } ns = childNsPrefix; if (Array.isArray(child)) { // for arrays, we need to remember the current namespace childNsPrefix = { current: childNsPrefix, parent: ns }; } else { // parent (array) already got the namespace childXmlnsAttrib = null; } value = this.objectToXML(child, name, childNsPrefix, childNsURI, false, childXmlnsAttrib, resolvedChildSchemaObject, nsContext); } else if (obj[this.options.attributesKey] && obj[this.options.attributesKey].xsi_type) { // if parent object has complex type defined and child not found in parent var completeChildParamTypeObject = this.findChildSchemaObject(obj[this.options.attributesKey].xsi_type.type, obj[this.options.attributesKey].xsi_type.xmlns); nonSubNameSpace = obj[this.options.attributesKey].xsi_type.prefix; nsContext.addNamespace(obj[this.options.attributesKey].xsi_type.prefix, obj[this.options.attributesKey].xsi_type.xmlns); value = this.objectToXML(child, name, obj[this.options.attributesKey].xsi_type.prefix, obj[this.options.attributesKey].xsi_type.xmlns, false, null, null, nsContext); } else { if (Array.isArray(child)) { if (emptyNonSubNameSpace) { name = ':' + name; } else { name = nonSubNameSpace + name; } } value = this.objectToXML(child, name, nsPrefix, nsURI, false, null, null, nsContext); } } else { value = this.objectToXML(child, name, nsPrefix, nsURI, false, null, null, nsContext); } } } ns = noColonNameSpace(ns); if (prefixNamespace && !qualified && isFirst && !this.options.overrideRootElement) { ns = nsPrefix; } else if (this.isIgnoredNameSpace(ns)) { ns = ''; } var useEmptyTag = !value && this.options.useEmptyTag; if (!Array.isArray(child)) { // start tag parts.push(['<', emptyNonSubNameSpace ? '' : appendColon(nonSubNameSpace || ns), name, attr, xmlnsAttrib, (child === null ? ' xsi:nil="true"' : ''), useEmptyTag ? ' />' : '>', ].join('')); } if (!useEmptyTag) { parts.push(value); if (!Array.isArray(child)) { // end tag parts.push(['</', emptyNonSubNameSpace ? '' : appendColon(nonSubNameSpace || ns), name, '>'].join('')); } } } } else if (obj !== undefined) { parts.push((this.options.escapeXML) ? xmlEscape(obj) : obj); } nsContext.popContext(); return parts.join(''); }; WSDL.prototype.processAttributes = function (child, nsContext) { var attr = ''; if (child === null) { child = []; } var attrObj = child[this.options.attributesKey]; if (attrObj && attrObj.xsi_type) { var xsiType = attrObj.xsi_type; var prefix = xsiType.prefix || xsiType.namespace; // Generate a new namespace for complex extension if one not provided if (!prefix) { prefix = nsContext.registerNamespace(xsiType.xmlns); } else { nsContext.declareNamespace(prefix, xsiType.xmlns); } xsiType.prefix = prefix; } if (attrObj) { for (var attrKey in attrObj) { // handle complex extension separately if (attrKey === 'xsi_type') { var attrValue = attrObj[attrKey]; attr += ' xsi:type="' + attrValue.prefix + ':' + attrValue.type + '"'; attr += ' xmlns:' + attrValue.prefix + '="' + attrValue.xmlns + '"'; continue; } else { attr += ' ' + attrKey + '="' + xmlEscape(attrObj[attrKey]) + '"'; } } } return attr; }; /** * Look up a schema type definition * @param name * @param nsURI * @returns {*} */ WSDL.prototype.findSchemaType = function (name, nsURI) { if (!this.definitions.schemas || !name || !nsURI) { return null; } var schema = this.definitions.schemas[nsURI]; if (!schema || !schema.complexTypes) { return null; } return schema.complexTypes[name]; }; WSDL.prototype.findChildSchemaObject = function (parameterTypeObj, childName, backtrace) { if (!parameterTypeObj || !childName) { return null; } if (!backtrace) { backtrace = []; } if (backtrace.indexOf(parameterTypeObj) >= 0) { // We've recursed back to ourselves; break. return null; } else { backtrace = backtrace.concat([parameterTypeObj]); } var found = null; var i = 0; var child; var ref; if (Array.isArray(parameterTypeObj.$lookupTypes) && parameterTypeObj.$lookupTypes.length) { var types = parameterTypeObj.$lookupTypes; for (i = 0; i < types.length; i++) { var typeObj = types[i]; if (typeObj.$name === childName) { found = typeObj; break; } } } var object = parameterTypeObj; if (object.$name === childName && object.name === 'element') { return object; } if (object.$ref) { ref = utils_1.splitQName(object.$ref); if (ref.name === childName) { return object; } } var childNsURI; // want to avoid unecessary recursion to improve performance if (object.$type && backtrace.length === 1) { var typeInfo = utils_1.splitQName(object.$type); if (typeInfo.prefix === utils_1.TNS_PREFIX) { childNsURI = parameterTypeObj.$targetNamespace; } else { childNsURI = this.definitions.xmlns[typeInfo.prefix]; } var typeDef = this.findSchemaType(typeInfo.name, childNsURI); if (typeDef) { return this.findChildSchemaObject(typeDef, childName, backtrace); } } if (object.children) { for (i = 0, child; child = object.children[i]; i++) { found = this.findChildSchemaObject(child, childName, backtrace); if (found) { break; } if (child.$base) { var baseQName = utils_1.splitQName(child.$base); var childNameSpace = baseQName.prefix === utils_1.TNS_PREFIX ? '' : baseQName.prefix; childNsURI = child.xmlns[baseQName.prefix] || this.definitions.xmlns[baseQName.prefix]; var foundBase = this.findSchemaType(baseQName.name, childNsURI); if (foundBase) { found = this.findChildSchemaObject(foundBase, childName, backtrace); if (found) { found.$baseNameSpace = childNameSpace; found.$type = childNameSpace + ':' + childName; break; } } } } } if (!found && object.$name === childName) { return object; } return found; }; WSDL.prototype._initializeOptions = function (options) { this._originalIgnoredNamespaces = (options || {}).ignoredNamespaces; this.options = {}; if (options.forceHTTPS) this.options.forceHTTPS = true var ignoredNamespaces = options ? options.ignoredNamespaces : null; if (ignoredNamespaces && (Array.isArray(ignoredNamespaces.namespaces) || typeof ignoredNamespaces.namespaces === 'string')) { if (ignoredNamespaces.override) { this.options.ignoredNamespaces = ignoredNamespaces.namespaces; } else { this.options.ignoredNamespaces = this.ignoredNamespaces.concat(ignoredNamespaces.namespaces); } } else { this.options.ignoredNamespaces = this.ignoredNamespaces; } this.options.valueKey = options.valueKey || this.valueKey; this.options.xmlKey = options.xmlKey || this.xmlKey; if (options.escapeXML !== undefined) { this.options.escapeXML = options.escapeXML; } else { this.options.escapeXML = true; } if (options.returnFault !== undefined) { this.options.returnFault = options.returnFault; } else { this.options.returnFault = false; } this.options.handleNilAsNull = !!options.handleNilAsNull; if (options.namespaceArrayElements !== undefined) { this.options.namespaceArrayElements = options.namespaceArrayElements; } else { this.options.namespaceArrayElements = true; } // Allow any request headers to keep passing through this.options.wsdl_headers = options.wsdl_headers; this.options.wsdl_options = options.wsdl_options; if (options.httpClient) { this.options.httpClient = options.httpClient; } // The supplied request-object should be passed through if (options.request) { this.options.request = options.request; } var ignoreBaseNameSpaces = options ? options.ignoreBaseNameSpaces : null; if (ignoreBaseNameSpaces !== null && typeof ignoreBaseNameSpaces !== 'undefined') { this.options.ignoreBaseNameSpaces = ignoreBaseNameSpaces; } else { this.options.ignoreBaseNameSpaces = this.ignoreBaseNameSpaces; } // Works only in client this.options.forceSoap12Headers = options.forceSoap12Headers; this.options.customDeserializer = options.customDeserializer; if (options.overrideRootElement !== undefined) { this.options.overrideRootElement = options.overrideRootElement; } this.options.useEmptyTag = !!options.useEmptyTag; }; WSDL.prototype._processNextInclude = function (includes, callback) { var _this = this; var include = includes.shift(); if (!include) { return callback(); } var includePath; if (!/^https?:/i.test(this.uri) && !/^https?:/i.test(include.location)) { includePath = path.resolve(path.dirname(this.uri), include.location); } else { includePath = url.resolve(this.uri || '', include.location); } var options = _.assign({}, this.options); // follow supplied ignoredNamespaces option options.ignoredNamespaces = this._originalIgnoredNamespaces || this.options.ignoredNamespaces; options.WSDL_CACHE = this.WSDL_CACHE; open_wsdl_recursive(includePath, options, function (err, wsdl) { if (err) { return callback(err); } _this._includesWsdl.push(wsdl); if (wsdl.definitions instanceof elements.DefinitionsElement) { _.mergeWith(_this.definitions, wsdl.definitions, function (a, b) { return (a instanceof elements.SchemaElement) ? a.merge(b) : undefined; }); } else { return callback(new Error('wsdl.defintions is not an instance of elements.DefinitionsElement')); } _this._processNextInclude(includes, function (err) { callback(err); }); }); }; WSDL.prototype._parse = function (xml) { var _this = this; var p = sax.parser(true, null); var stack = []; var root = null; var types = null; var schema = null; var schemaAttrs = null; var options = this.options; p.onopentag = function (node) { var nsName = node.name; var attrs = node.attributes; var top = stack[stack.length - 1]; var name = utils_1.splitQName(nsName).name; if (name === 'schema') { schemaAttrs = attrs; } if (top) { try { top.startElement(stack, nsName, attrs, options, schemaAttrs); } catch (e) { if (_this.options.strict) { throw e; } else { stack.push(new elements.Element(nsName, attrs, options, schemaAttrs)); } } } else { if (name === 'definitions') { root = new elements.DefinitionsElement(nsName, attrs, options); stack.push(root); } else if (name === 'schema') { // Shim a structure in here to allow the proper objects to be created when merging back. root = new elements.DefinitionsElement('definitions', {}, {}); types = new elements.TypesElement('types', {}, {}); schema = new elements.SchemaElement(nsName, attrs, options); types.addChild(schema); root.addChild(types); stack.push(schema); } else { throw new Error('Unexpected root element of WSDL or include'); } } }; p.onclosetag = function (name) { var top = stack[stack.length - 1]; assert_1.ok(top, 'Unmatched close tag: ' + name); top.endElement(stack, name); }; p.write(xml).close(); return root; }; WSDL.prototype._fromXML = function (xml) { this.definitions = this._parse(xml); this.definitions.descriptions = { types: {} }; this.xml = xml; }; WSDL.prototype._fromServices = function (services) { }; WSDL.prototype._xmlnsMap = function () { var xmlns = this.definitions.xmlns; var str = ''; for (var alias in xmlns) { if (alias === '' || alias === utils_1.TNS_PREFIX) { continue; } var ns = xmlns[alias]; switch (ns) { case 'http://xml.apache.org/xml-soap': // apachesoap case 'http://schemas.xmlsoap.org/wsdl/': // wsdl case 'http://schemas.xmlsoap.org/wsdl/soap/': // wsdlsoap case 'http://schemas.xmlsoap.org/wsdl/soap12/': // wsdlsoap12 case 'http://schemas.xmlsoap.org/soap/encoding/': // soapenc case 'http://www.w3.org/2001/XMLSchema': // xsd continue; } if (~ns.indexOf('http://schemas.xmlsoap.org/')) { continue; } if (~ns.indexOf('http://www.w3.org/')) { continue; } if (~ns.indexOf('http://xml.apache.org/')) { continue; } str += ' xmlns:' + alias + '="' + ns + '"'; } return str; }; return WSDL; }()); exports.WSDL = WSDL; function open_wsdl_recursive (uri, p2, p3) { var fromCache; var WSDL_CACHE; var options; var callback; if (typeof p2 === 'function') { options = {}; callback = p2; } else { options = p2; callback = p3; } WSDL_CACHE = options.WSDL_CACHE; if (fromCache = WSDL_CACHE[uri]) { return callback.call(fromCache, null, fromCache); } return open_wsdl(uri, options, callback); } function open_wsdl (uri, p2, p3) { var options; var callback; if (typeof p2 === 'function') { options = {}; callback = p2; } else if (typeof p3 === 'function') { options = p2; callback = p3; } // initialize cache when calling open_wsdl directly var WSDL_CACHE = options.WSDL_CACHE || {}; var request_headers = options.wsdl_headers; var request_options = options.wsdl_options; var wsdl; if (!/^https?:/i.test(uri)) { debug('Reading file: %s', uri); fs.readFile(uri, 'utf8', function (err, definition) { if (err) { callback(err); } else { wsdl = new WSDL(definition, uri, options); WSDL_CACHE[uri] = wsdl; wsdl.WSDL_CACHE = WSDL_CACHE; wsdl.onReady(callback); } }); } else { debug('Reading url: %s', uri); var httpClient = options.httpClient || new http_1.HttpClient(options); if (options.forceHTTPS && !uri.includes('https')) uri = uri.replace('http', 'https') httpClient.request(uri, null /* options */, function (err, response, definition) { if (err) { callback(err); } else if (response && response.statusCode === 200) { wsdl = new WSDL(definition, uri, options); WSDL_CACHE[uri] = wsdl; wsdl.WSDL_CACHE = WSDL_CACHE; wsdl.onReady(callback); } else { callback(new Error('Invalid WSDL URL: ' + uri + '\n\n\r Code: ' + response.statusCode + '\n\n\r Response Body: ' + response.body)); } }, request_headers, request_options); } return wsdl; } exports.open_wsdl = open_wsdl; //# sourceMappingURL=index.js.map
// Its the value of an item. Return it directly.
grpc_proxy.go
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package etcdmain import ( "context" "crypto/tls" "crypto/x509" "fmt" "io/ioutil" "log" "math" "net" "net/http" "net/url" "os" "path/filepath" "time" "github.com/friendlyhank/etcd-3.4-annotated/clientv3" "github.com/friendlyhank/etcd-3.4-annotated/clientv3/leasing" "github.com/friendlyhank/etcd-3.4-annotated/clientv3/namespace" "github.com/friendlyhank/etcd-3.4-annotated/clientv3/ordering" "github.com/friendlyhank/etcd-3.4-annotated/etcdserver/api/v3election/v3electionpb" "github.com/friendlyhank/etcd-3.4-annotated/etcdserver/api/v3lock/v3lockpb" pb "github.com/friendlyhank/etcd-3.4-annotated/etcdserver/etcdserverpb" "github.com/friendlyhank/etcd-3.4-annotated/pkg/debugutil" "github.com/friendlyhank/etcd-3.4-annotated/pkg/logutil" "github.com/friendlyhank/etcd-3.4-annotated/pkg/transport" "github.com/friendlyhank/etcd-3.4-annotated/proxy/grpcproxy" grpc_prometheus "github.com/grpc-ecosystem/go-grpc-prometheus" "github.com/soheilhy/cmux" "github.com/spf13/cobra" "go.uber.org/zap" "google.golang.org/grpc" "google.golang.org/grpc/grpclog" ) var ( grpcProxyListenAddr string grpcProxyMetricsListenAddr string grpcProxyEndpoints []string grpcProxyDNSCluster string grpcProxyDNSClusterServiceName string grpcProxyInsecureDiscovery bool grpcProxyDataDir string grpcMaxCallSendMsgSize int grpcMaxCallRecvMsgSize int // tls for connecting to etcd grpcProxyCA string grpcProxyCert string grpcProxyKey string grpcProxyInsecureSkipTLSVerify bool // tls for clients connecting to proxy grpcProxyListenCA string grpcProxyListenCert string grpcProxyListenKey string grpcProxyListenAutoTLS bool grpcProxyListenCRL string grpcProxyAdvertiseClientURL string grpcProxyResolverPrefix string grpcProxyResolverTTL int grpcProxyNamespace string grpcProxyLeasing string grpcProxyEnablePprof bool grpcProxyEnableOrdering bool grpcProxyDebug bool ) const defaultGRPCMaxCallSendMsgSize = 1.5 * 1024 * 1024 //etcdadmin 初始化grpc func init() { rootCmd.AddCommand(newGRPCProxyCommand()) } // newGRPCProxyCommand returns the cobra command for "grpc-proxy". func newGRPCProxyCommand() *cobra.Command { lpc := &cobra.Command{ Use: "grpc-proxy <subcommand>", Short: "grpc-proxy related command", } lpc.AddCommand(newGRPCProxyStartCommand()) return lpc } func newGRPCProxyStartCommand() *cobra.Command { cmd := cobra.Command{ Use: "start", Short: "start the grpc proxy", Run: startGRPCProxy, } cmd.Flags().StringVar(&grpcProxyListenAddr, "listen-addr", "127.0.0.1:23790", "listen address") cmd.Flags().StringVar(&grpcProxyDNSCluster, "discovery-srv", "", "domain name to query for SRV records describing cluster endpoints") cmd.Flags().StringVar(&grpcProxyDNSClusterServiceName, "discovery-srv-name", "", "service name to query when using DNS discovery") cmd.Flags().StringVar(&grpcProxyMetricsListenAddr, "metrics-addr", "", "listen for endpoint /metrics requests on an additional interface") cmd.Flags().BoolVar(&grpcProxyInsecureDiscovery, "insecure-discovery", false, "accept insecure SRV records") cmd.Flags().StringSliceVar(&grpcProxyEndpoints, "endpoints", []string{"127.0.0.1:2379"}, "comma separated etcd cluster endpoints") cmd.Flags().StringVar(&grpcProxyAdvertiseClientURL, "advertise-client-url", "127.0.0.1:23790", "advertise address to register (must be reachable by client)") cmd.Flags().StringVar(&grpcProxyResolverPrefix, "resolver-prefix", "", "prefix to use for registering proxy (must be shared with other grpc-proxy members)") cmd.Flags().IntVar(&grpcProxyResolverTTL, "resolver-ttl", 0, "specify TTL, in seconds, when registering proxy endpoints") cmd.Flags().StringVar(&grpcProxyNamespace, "namespace", "", "string to prefix to all keys for namespacing requests") cmd.Flags().BoolVar(&grpcProxyEnablePprof, "enable-pprof", false, `Enable runtime profiling data via HTTP server. Address is at client URL + "/debug/pprof/"`) cmd.Flags().StringVar(&grpcProxyDataDir, "data-dir", "default.proxy", "Data directory for persistent data") cmd.Flags().IntVar(&grpcMaxCallSendMsgSize, "max-send-bytes", defaultGRPCMaxCallSendMsgSize, "message send limits in bytes (default value is 1.5 MiB)") cmd.Flags().IntVar(&grpcMaxCallRecvMsgSize, "max-recv-bytes", math.MaxInt32, "message receive limits in bytes (default value is math.MaxInt32)") // client TLS for connecting to server cmd.Flags().StringVar(&grpcProxyCert, "cert", "", "identify secure connections with etcd servers using this TLS certificate file") cmd.Flags().StringVar(&grpcProxyKey, "key", "", "identify secure connections with etcd servers using this TLS key file") cmd.Flags().StringVar(&grpcProxyCA, "cacert", "", "verify certificates of TLS-enabled secure etcd servers using this CA bundle") cmd.Flags().BoolVar(&grpcProxyInsecureSkipTLSVerify, "insecure-skip-tls-verify", false, "skip authentication of etcd server TLS certificates") // client TLS for connecting to proxy cmd.Flags().StringVar(&grpcProxyListenCert, "cert-file", "", "identify secure connections to the proxy using this TLS certificate file") cmd.Flags().StringVar(&grpcProxyListenKey, "key-file", "", "identify secure connections to the proxy using this TLS key file") cmd.Flags().StringVar(&grpcProxyListenCA, "trusted-ca-file", "", "verify certificates of TLS-enabled secure proxy using this CA bundle") cmd.Flags().BoolVar(&grpcProxyListenAutoTLS, "auto-tls", false, "proxy TLS using generated certificates") cmd.Flags().StringVar(&grpcProxyListenCRL, "client-crl-file", "", "proxy client certificate revocation list file.") // experimental flags cmd.Flags().BoolVar(&grpcProxyEnableOrdering, "experimental-serializable-ordering", false, "Ensure serializable reads have monotonically increasing store revisions across endpoints.") cmd.Flags().StringVar(&grpcProxyLeasing, "experimental-leasing-prefix", "", "leasing metadata prefix for disconnected linearized reads.") cmd.Flags().BoolVar(&grpcProxyDebug, "debug", false, "Enable debug-level logging for grpc-proxy.") return &cmd } func startGRPCProxy(cmd *cobra.Command, args []string) { checkArgs() lcfg := logutil.DefaultZapLoggerConfig if grpcProxyDebug { lcfg.Level = zap.NewAtomicLevelAt(zap.DebugLevel) grpc.EnableTracing = true } lg, err := lcfg.Build() if err != nil { log.Fatal(err) } defer lg.Sync() var gl grpclog.LoggerV2 gl, err = logutil.NewGRPCLoggerV2(lcfg) if err != nil { log.Fatal(err) } grpclog.SetLoggerV2(gl)
tlsinfo := newTLS(grpcProxyListenCA, grpcProxyListenCert, grpcProxyListenKey) if tlsinfo == nil && grpcProxyListenAutoTLS { host := []string{"https://" + grpcProxyListenAddr} dir := filepath.Join(grpcProxyDataDir, "fixtures", "proxy") autoTLS, err := transport.SelfCert(lg, dir, host) if err != nil { log.Fatal(err) } tlsinfo = &autoTLS } if tlsinfo != nil { lg.Info("gRPC proxy server TLS", zap.String("tls-info", fmt.Sprintf("%+v", tlsinfo))) } m := mustListenCMux(lg, tlsinfo) grpcl := m.Match(cmux.HTTP2()) defer func() { grpcl.Close() lg.Info("stop listening gRPC proxy client requests", zap.String("address", grpcProxyListenAddr)) }() client := mustNewClient(lg) httpClient := mustNewHTTPClient(lg) srvhttp, httpl := mustHTTPListener(lg, m, tlsinfo, client) errc := make(chan error) //gRPC proxy服务 go func() { errc <- newGRPCProxyServer(lg, client).Serve(grpcl) }() go func() { errc <- srvhttp.Serve(httpl) }() go func() { errc <- m.Serve() }() if len(grpcProxyMetricsListenAddr) > 0 { mhttpl := mustMetricsListener(lg, tlsinfo) go func() { mux := http.NewServeMux() grpcproxy.HandleMetrics(mux, httpClient, client.Endpoints()) grpcproxy.HandleHealth(mux, client) lg.Info("gRPC proxy server metrics URL serving") herr := http.Serve(mhttpl, mux) if herr != nil { lg.Fatal("gRPC proxy server metrics URL returned", zap.Error(herr)) } else { lg.Info("gRPC proxy server metrics URL returned") } }() } lg.Info("started gRPC proxy", zap.String("address", grpcProxyListenAddr)) // grpc-proxy is initialized, ready to serve notifySystemd(lg) fmt.Fprintln(os.Stderr, <-errc) os.Exit(1) } func checkArgs() { if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL < 1 { fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-ttl %d", grpcProxyResolverTTL)) os.Exit(1) } if grpcProxyResolverPrefix == "" && grpcProxyResolverTTL > 0 { fmt.Fprintln(os.Stderr, fmt.Errorf("invalid resolver-prefix %q", grpcProxyResolverPrefix)) os.Exit(1) } if grpcProxyResolverPrefix != "" && grpcProxyResolverTTL > 0 && grpcProxyAdvertiseClientURL == "" { fmt.Fprintln(os.Stderr, fmt.Errorf("invalid advertise-client-url %q", grpcProxyAdvertiseClientURL)) os.Exit(1) } } func mustNewClient(lg *zap.Logger) *clientv3.Client { srvs := discoverEndpoints(lg, grpcProxyDNSCluster, grpcProxyCA, grpcProxyInsecureDiscovery, grpcProxyDNSClusterServiceName) eps := srvs.Endpoints if len(eps) == 0 { eps = grpcProxyEndpoints } cfg, err := newClientCfg(lg, eps) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } cfg.DialOptions = append(cfg.DialOptions, grpc.WithUnaryInterceptor(grpcproxy.AuthUnaryClientInterceptor)) cfg.DialOptions = append(cfg.DialOptions, grpc.WithStreamInterceptor(grpcproxy.AuthStreamClientInterceptor)) client, err := clientv3.New(*cfg) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } return client } func newClientCfg(lg *zap.Logger, eps []string) (*clientv3.Config, error) { // set tls if any one tls option set cfg := clientv3.Config{ Endpoints: eps, DialTimeout: 5 * time.Second, } if grpcMaxCallSendMsgSize > 0 { cfg.MaxCallSendMsgSize = grpcMaxCallSendMsgSize } if grpcMaxCallRecvMsgSize > 0 { cfg.MaxCallRecvMsgSize = grpcMaxCallRecvMsgSize } tls := newTLS(grpcProxyCA, grpcProxyCert, grpcProxyKey) if tls == nil && grpcProxyInsecureSkipTLSVerify { tls = &transport.TLSInfo{} } if tls != nil { clientTLS, err := tls.ClientConfig() if err != nil { return nil, err } clientTLS.InsecureSkipVerify = grpcProxyInsecureSkipTLSVerify cfg.TLS = clientTLS lg.Info("gRPC proxy client TLS", zap.String("tls-info", fmt.Sprintf("%+v", tls))) } return &cfg, nil } func newTLS(ca, cert, key string) *transport.TLSInfo { if ca == "" && cert == "" && key == "" { return nil } return &transport.TLSInfo{TrustedCAFile: ca, CertFile: cert, KeyFile: key, EmptyCN: true} } func mustListenCMux(lg *zap.Logger, tlsinfo *transport.TLSInfo) cmux.CMux { l, err := net.Listen("tcp", grpcProxyListenAddr) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if l, err = transport.NewKeepAliveListener(l, "tcp", nil); err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } if tlsinfo != nil { tlsinfo.CRLFile = grpcProxyListenCRL if l, err = transport.NewTLSListener(l, tlsinfo); err != nil { lg.Fatal("failed to create TLS listener", zap.Error(err)) } } lg.Info("listening for gRPC proxy client requests", zap.String("address", grpcProxyListenAddr)) return cmux.New(l) } func newGRPCProxyServer(lg *zap.Logger, client *clientv3.Client) *grpc.Server { if grpcProxyEnableOrdering { vf := ordering.NewOrderViolationSwitchEndpointClosure(*client) client.KV = ordering.NewKV(client.KV, vf) lg.Info("waiting for linearized read from cluster to recover ordering") for { _, err := client.KV.Get(context.TODO(), "_", clientv3.WithKeysOnly()) if err == nil { break } lg.Warn("ordering recovery failed, retrying in 1s", zap.Error(err)) time.Sleep(time.Second) } } if len(grpcProxyNamespace) > 0 { client.KV = namespace.NewKV(client.KV, grpcProxyNamespace) client.Watcher = namespace.NewWatcher(client.Watcher, grpcProxyNamespace) client.Lease = namespace.NewLease(client.Lease, grpcProxyNamespace) } if len(grpcProxyLeasing) > 0 { client.KV, _, _ = leasing.NewKV(client, grpcProxyLeasing) } //etcd client的kv封装 kvp, _ := grpcproxy.NewKvProxy(client) //etcd client的watch封装 watchp, _ := grpcproxy.NewWatchProxy(client) if grpcProxyResolverPrefix != "" { grpcproxy.Register(client, grpcProxyResolverPrefix, grpcProxyAdvertiseClientURL, grpcProxyResolverTTL) } clusterp, _ := grpcproxy.NewClusterProxy(client, grpcProxyAdvertiseClientURL, grpcProxyResolverPrefix) leasep, _ := grpcproxy.NewLeaseProxy(client) mainp := grpcproxy.NewMaintenanceProxy(client) authp := grpcproxy.NewAuthProxy(client) electionp := grpcproxy.NewElectionProxy(client) lockp := grpcproxy.NewLockProxy(client) server := grpc.NewServer( grpc.StreamInterceptor(grpc_prometheus.StreamServerInterceptor), grpc.UnaryInterceptor(grpc_prometheus.UnaryServerInterceptor), grpc.MaxConcurrentStreams(math.MaxUint32), ) //注册服务 pb.RegisterKVServer(server, kvp) pb.RegisterWatchServer(server, watchp) pb.RegisterClusterServer(server, clusterp) pb.RegisterLeaseServer(server, leasep) pb.RegisterMaintenanceServer(server, mainp) pb.RegisterAuthServer(server, authp) v3electionpb.RegisterElectionServer(server, electionp) v3lockpb.RegisterLockServer(server, lockp) return server } func mustHTTPListener(lg *zap.Logger, m cmux.CMux, tlsinfo *transport.TLSInfo, c *clientv3.Client) (*http.Server, net.Listener) { httpClient := mustNewHTTPClient(lg) httpmux := http.NewServeMux() httpmux.HandleFunc("/", http.NotFound) grpcproxy.HandleMetrics(httpmux, httpClient, c.Endpoints()) grpcproxy.HandleHealth(httpmux, c) if grpcProxyEnablePprof { for p, h := range debugutil.PProfHandlers() { httpmux.Handle(p, h) } lg.Info("gRPC proxy enabled pprof", zap.String("path", debugutil.HTTPPrefixPProf)) } srvhttp := &http.Server{ Handler: httpmux, ErrorLog: log.New(ioutil.Discard, "net/http", 0), } if tlsinfo == nil { return srvhttp, m.Match(cmux.HTTP1()) } srvTLS, err := tlsinfo.ServerConfig() if err != nil { lg.Fatal("failed to set up TLS", zap.Error(err)) } srvhttp.TLSConfig = srvTLS return srvhttp, m.Match(cmux.Any()) } func mustNewHTTPClient(lg *zap.Logger) *http.Client { transport, err := newHTTPTransport(grpcProxyCA, grpcProxyCert, grpcProxyKey) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } return &http.Client{Transport: transport} } func newHTTPTransport(ca, cert, key string) (*http.Transport, error) { tr := &http.Transport{} if ca != "" && cert != "" && key != "" { caCert, err := ioutil.ReadFile(ca) if err != nil { return nil, err } keyPair, err := tls.LoadX509KeyPair(cert, key) if err != nil { return nil, err } caPool := x509.NewCertPool() caPool.AppendCertsFromPEM(caCert) tlsConfig := &tls.Config{ Certificates: []tls.Certificate{keyPair}, RootCAs: caPool, } tlsConfig.BuildNameToCertificate() tr.TLSClientConfig = tlsConfig } else if grpcProxyInsecureSkipTLSVerify { tlsConfig := &tls.Config{InsecureSkipVerify: grpcProxyInsecureSkipTLSVerify} tr.TLSClientConfig = tlsConfig } return tr, nil } func mustMetricsListener(lg *zap.Logger, tlsinfo *transport.TLSInfo) net.Listener { murl, err := url.Parse(grpcProxyMetricsListenAddr) if err != nil { fmt.Fprintf(os.Stderr, "cannot parse %q", grpcProxyMetricsListenAddr) os.Exit(1) } ml, err := transport.NewListener(murl.Host, murl.Scheme, tlsinfo) if err != nil { fmt.Fprintln(os.Stderr, err) os.Exit(1) } lg.Info("gRPC proxy listening for metrics", zap.String("address", murl.String())) return ml }
networksecurityrule.py
# ####### # Copyright (c) 2016-2020 Cloudify Platform Ltd. All rights reserved # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """ resources.network.NetworkSecurityRule ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ Microsoft Azure Network Security Rule interface """ # Node properties and logger from cloudify import ctx # Base resource class from cloudify_azure.resources.base import Resource # Lifecycle operation decorator from cloudify.decorators import operation # Logger, API version from cloudify_azure import (constants, utils) class NetworkSecurityRule(Resource): """ Microsoft Azure Network Security Rule interface .. warning:: This interface should only be instantiated from within a Cloudify Lifecycle Operation :param string resource_group: Name of the parent Resource Group :param string virtual_network: Name of the parent Virtual Network :param string api_version: API version to use for all requests :param `logging.Logger` logger: Parent logger for the class to use. Defaults to `ctx.logger` """ def __init__(self, resource_group=None, network_security_group=None, api_version=constants.API_VER_NETWORK, logger=None, _ctx=ctx): resource_group = resource_group or \ utils.get_resource_group(_ctx=_ctx) network_security_group = network_security_group or \ utils.get_network_security_group(_ctx=_ctx) Resource.__init__( self, 'Network Security Rule', '/{0}/{1}/{2}/{3}'.format( 'resourceGroups/{0}'.format(resource_group), 'providers/Microsoft.Network', 'networkSecurityGroups/{0}'.format(network_security_group), 'securityRules' ), api_version=api_version, logger=logger, _ctx=_ctx) @operation(resumable=True) def
(**_): """Uses an existing, or creates a new, Network Security Rule""" # Create a resource (if necessary) utils.task_resource_create( NetworkSecurityRule(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)), { 'location': ctx.node.properties.get('location'), 'tags': ctx.node.properties.get('tags'), 'properties': utils.get_resource_config() }) @operation(resumable=True) def delete(**_): """Deletes a Network Security Rule""" # Delete the resource utils.task_resource_delete( NetworkSecurityRule(api_version=ctx.node.properties.get( 'api_version', constants.API_VER_NETWORK)))
create
uuid.go
package main import "time" var randSource *Xor64Source func init() { // rand.Seed(time.Now().UnixNano()) randSource = NewXor64Source(time.Now().UnixNano()) } // idMaker interface generates a unique uint64 id type idMaker interface { Uint64() uint64 } // Create a new idMaker func NewRandSource() idMaker { return NewXor64Source(time.Now().UnixNano()) } // Generate a new unique id using the shared generator func uuid() uint64 { //// Slowest // b := make([]byte, 16) // rand.Read(b) // b[6] = (b[6] & 0x0f) | 0x40 // b[8] = (b[8] & 0x3f) | 0x80 // u := binary.BigEndian.Uint64(b) //// Better // u := uint64(rand.Uint32())<<32 + uint64(rand.Uint32()) // Best
return u } // xorshift implements a 64-bit version of Marsaglia's xorshift PRNG as // described in http://www.jstatsoft.org/v08/i14/paper. // The generator has a period of 2^64-1 and passes most of the tests in the // dieharder test suit. // // https://gist.githubusercontent.com/anonymous/3908425/raw/0972271d707997619a45da44103d4dddee17b8c5/xorshift.go // *Xor64Source implements the rand.Source interface from math/rand. type Xor64Source uint64 // seed0 is used as default seed to initialize the generator. const seed0 = 88172645463325252 // NewXor64Source returns a pointer to a new Xor64Source seeded with the given // value. func NewXor64Source(seed int64) *Xor64Source { var s Xor64Source s.Seed(seed) return &s } // xor64 generates the next value of a pseudo-random sequence given a current // state x. func xor64(x uint64) uint64 { x ^= x << 13 x ^= x >> 7 x ^= x << 17 return x } // next advances the generators internal state to the next value and returns // this value as an uint64. func (s *Xor64Source) next() uint64 { x := xor64(uint64(*s)) *s = Xor64Source(x) return x } // Int63 returns a pseudo-random integer in [0,2^63-1) as an int64. func (s *Xor64Source) Int63() int64 { return int64(s.next() >> 1) } // Uint64 returns a pseudo-random integer in [1,2^64-1) as an uint64. func (s *Xor64Source) Uint64() uint64 { return s.next() } // Seed uses the given value to initialize the generator. If this value is 0, a // pre-defined seed is used instead, since the xorshift algorithm requires at // least one bit of the internal state to be set. func (s *Xor64Source) Seed(seed int64) { if seed == 0 { seed = seed0 } *s = Xor64Source(seed) }
u := randSource.Uint64()
cau_ldr_ca2.rs
#[doc = "Writer for register CAU_LDR_CA2"] pub type W = crate::W<u32, super::CAU_LDR_CA2>; #[doc = "Register CAU_LDR_CA2 `reset()`'s with value 0"] impl crate::ResetValue for super::CAU_LDR_CA2 { type Type = u32; #[inline(always)] fn reset_value() -> Self::Type { 0 } } #[doc = "Write proxy for field `CA2`"] pub struct CA2_W<'a> { w: &'a mut W, } impl<'a> CA2_W<'a> { #[doc = r"Writes raw bits to the field"] #[inline(always)] pub unsafe fn
(self, value: u32) -> &'a mut W { self.w.bits = (self.w.bits & !0xffff_ffff) | ((value as u32) & 0xffff_ffff); self.w } } impl W { #[doc = "Bits 0:31 - CA2"] #[inline(always)] pub fn ca2(&mut self) -> CA2_W { CA2_W { w: self } } }
bits
variable-plugin.ts
import { Contracts } from "ts-extractor"; import { BasePlugin } from "@src/abstractions/base-plugin"; import { SupportedApiItemKindType, PluginResult, PluginOptions } from "@src/contracts/plugin"; import { GeneratorHelpers } from "@src/generator-helpers"; import { MarkdownBuilder } from "@simplrjs/markdown"; export class VariablePlugin extends BasePlugin<Contracts.ApiVariableDto> { public SupportedApiDefinitionKind(): SupportedApiItemKindType[] { return [GeneratorHelpers.ApiDefinitionKind.Variable]; } public Render(options: PluginOptions, apiItem: Contracts.ApiVariableDto): PluginResult<Contracts.ApiVariableDto> { const heading: string = `My variable ${options.Reference.Alias}`; const pluginResult: PluginResult<Contracts.ApiVariableDto> = { ...GeneratorHelpers.GetDefaultPluginResultData(), ApiItem: apiItem, Reference: options.Reference, Headings: [
} ], UsedReferences: [options.Reference.Id] }; pluginResult.Result = new MarkdownBuilder() .Header(heading, 3) .EmptyLine() .Text("Variable plugin result.") .GetOutput(); return pluginResult; } }
{ ApiItemId: options.Reference.Id, Heading: heading
model_type.rs
#[repr(u8)] /// The model type, specifically for bits 64 to 71 of snowflakes. pub enum
{ Guild = 0, User = 1, Channel = 2, Message = 3, Role = 4, /// You should never get a snowflake with this model type, /// as these types are only used for cases where the database needs a ID but no other option applies. InternalUse = 5, DmChannel = 6, Bot = 7, Unknown = !0, }
ModelType
models.py
# Copyright 2016 Mandiant, A FireEye Company # Authors: Brian Jones # License: Apache 2.0 ''' Model classes for "Relational Learning with TensorFlow" tutorial ''' import numpy as np import tensorflow as tf from .util import ContrastiveTrainingProvider def least_squares_objective(output, target, add_bias=True): ''' Creates final model output and loss for least squares objective Args: output: Model output target: Training target placeholder add_bias: If True, a bias Variable will be added to the output Returns: tuple (final output, loss) ''' y = output if add_bias: bias = tf.Variable([0.0]) y = output + bias loss = tf.reduce_sum(tf.square(y - target)) return y, loss def logistic_objective(output, target, add_bias=True): ''' Creates final model output and loss for logistic objective Args: output: Model output target: Training target placeholder add_bias: If True, a bias Variable will be added to the output Returns: tuple (final output, loss) ''' y = output if add_bias: bias = tf.Variable([0.0]) y = output + bias sig_y = tf.clip_by_value(tf.sigmoid(y), 0.001, 0.999) # avoid NaNs loss = -tf.reduce_sum(target*tf.log(sig_y) + (1-target)*tf.log(1-sig_y)) return sig_y, loss def ranking_margin_objective(output, margin=1.0): ''' Create final model output and loss for pairwise ranking margin objective Loss for single pair (f(p), f(n)) = [margin - f(p) + f(n)]+ This only works when given model output on alternating positive/negative pairs: [pos,neg,pos,neg,...]. TODO: check target placeholder at runtime to make sure this is the case? Args: output: Model output margin: The margin value for the pairwise hinge loss Returns: tuple (final output, loss) ''' y_pairs = tf.reshape(output, [-1,2]) # fold: 1 x n -> [n/2 x 2] pos_scores, neg_scores = tf.split(1, 2, y_pairs) # separate pairs hinge_losses = tf.nn.relu(margin - pos_scores + neg_scores) total_hinge_loss = tf.reduce_sum(hinge_losses) return output, total_hinge_loss def sparse_maxnorm_update(var_matrix, indices, maxnorm=1.0): '''Sparse update operation that ensures selected rows in var_matrix do not have a Euclidean norm greater than maxnorm. Rows that exceed it are scaled to length. Args: var_matrix: 2D mutable tensor (Variable) to operate on indices: 1D tensor with the row indices to constrain maxnorm: the maximum Euclidean norm Returns: An operation that will update var_matrix when run in a Session ''' selected_rows = tf.nn.embedding_lookup(var_matrix, indices) row_norms = tf.sqrt(tf.reduce_sum(tf.square(selected_rows), 1)) scaling = maxnorm / tf.maximum(row_norms, maxnorm) scaled = selected_rows * tf.expand_dims(scaling, 1) return tf.scatter_update(var_matrix, indices, scaled) def dense_maxnorm_update(var_matrix, maxnorm=1.0): '''Dense update operation that ensures all rows in var_matrix do not have a Euclidean norm greater than maxnorm. Rows that exceed it are scaled to length. Args: var_matrix: 2D mutable tensor (Variable) to operate on maxnorm: the maximum Euclidean norm Returns: An operation that will update var_matrix when run in a Session ''' row_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1)) scaling = maxnorm / tf.maximum(row_norms, maxnorm) scaled = var_matrix * tf.expand_dims(scaling, 1) return tf.assign(var_matrix, scaled) def dense_maxnorm(var_matrix, maxnorm=1.0): '''Similar to dense_maxnorm_update(), except this returns a new Tensor instead of an operation that modifies var_matrix. Args: var_matrix: 2D tensor (Variable) maxnorm: the maximum Euclidean norm Returns: A new tensor where all rows have been scaled as necessary ''' axis_norms = tf.sqrt(tf.reduce_sum(tf.square(var_matrix), 1)) scaling = maxnorm / tf.maximum(axis_norms, maxnorm) return var_matrix * tf.expand_dims(scaling, 1) class BaseModel(object): ''' Base class for embedding-based relational learning models that use maxnorm regularization. Subclasses must implement _create_model() and populate self.train_step, and can optionally populate self.post_step for post-processing. Note: When model_type is 'ranking_margin', the mini-batch provider returned by _create_batch_provider() must provide instances in alternating pos/neg pairs: [pos, neg, pos, neg, ...]. This is satisfied when using ContrastiveTrainingProvider; be careful if you use a different one. Args: embedding_size: Embedding vector length maxnorm: Maximum Euclidean norm for embedding vectors batch_pos_cnt: Number of positive examples to use in each mini-batch max_iter: Maximum number of optimization iterations to perform model_type: Possible values: 'least_squares': squared loss on 0/1 targets 'logistic': sigmoid link function, crossent loss on 0/1 targets 'ranking_margin': ranking margin on pos/neg pairs add_bias: If True, a bias Variable will be added to the output for least_squares and logistic models. opt: An optimizer object to use. If None, the default optimizer is tf.train.AdagradOptimizer(1.0) TODO: add support for other regularizers like L2 ''' def __init__(self, embedding_size, maxnorm=1.0, batch_pos_cnt=100, max_iter=1000, model_type='least_squares', add_bias=True, opt=None): self.embedding_size = embedding_size self.maxnorm = maxnorm self.batch_pos_cnt = batch_pos_cnt self.max_iter = max_iter self.model_type = model_type self.add_bias = add_bias if opt is None: opt = tf.train.AdagradOptimizer(1.0) self.opt = opt self.sess = None self.train_step = None self.post_step = None self.graph = tf.Graph() with self.graph.as_default(): self.head_input = tf.placeholder(tf.int32, shape=[None]) self.rel_input = tf.placeholder(tf.int32, shape=[None]) self.tail_input = tf.placeholder(tf.int32, shape=[None]) self.target = tf.placeholder(tf.float32, shape=[None]) def _create_model(self, train_triples): ''' Subclasses must build Graph and set self.train_step ''' raise Exception('subclass must implement') def _create_batch_provider(self, train_triples): ''' Default implementation ''' return ContrastiveTrainingProvider(train_triples, self.batch_pos_cnt) def _create_output_and_loss(self, raw_output): if self.model_type == 'least_squares': return least_squares_objective(raw_output, self.target, self.add_bias) elif self.model_type == 'logistic': return logistic_objective(raw_output, self.target, self.add_bias) elif self.model_type == 'ranking_margin': return ranking_margin_objective(raw_output, 1.0) else: raise Exception('Unknown model_type') def _norm_constraint_op(self, var_matrix, row_indices, maxnorm): ''' Args: var_matrix: A 2D Tensor holding the vectors to constrain (in rows) row_indices: The rows in var_tensor that are being considered for constraint application (typically embedding vectors for entities observed for a minibatch of training data). These will be used for a sparse variable update operation if the chosen optimizer only modified these entries. Otherwise a dense operation is used and row_indices are ignored. maxnorm: The maximum Euclidean norm for the rows in var_tensor Returns: An operation which will apply the constraints when run in a Session ''' # Currently, TF optimizers do not update variables with zero gradient # except AdamOptimizer if isinstance(self.opt, tf.train.AdamOptimizer): return dense_maxnorm_update(var_matrix, maxnorm) else: return sparse_maxnorm_update(var_matrix, row_indices, maxnorm) def embeddings(self): ''' Subclass should override this if it uses different embedding variables Returns: A list of pairs: [(embedding name, embedding 2D Tensor)] ''' return [('entity', self.entity_embedding_vars), ('rel', self.rel_embedding_vars)] def create_feed_dict(self, triples, labels=None, training=False): ''' Create a TensorFlow feed dict for relationship triples Args: triples: A numpy integer array of relationship triples, where each row contains [head idx, relationship idx, tail idx] labels: (optional) A label array for triples training: (optional) A flag indicating whether the feed dict is for training or test purposes. Useful for things like dropout where a dropout_probability variable is set differently in the two contexts. ''' feed_dict = {self.head_input: triples[:, 0], self.rel_input: triples[:, 1], self.tail_input: triples[:, 2]} if labels is not None: feed_dict[self.target] = labels return feed_dict def close(self): ''' Closes the TensorFlow Session object ''' self.sess.close(); def fit(self, train_triples, step_callback=None): ''' Trains the model on relationship triples Args: train_triples: A numpy integer array of relationship triples, where each row of contains [head idx, relationship idx, tail idx] step_callback: (optional) A function that will be called before each optimization step, step_callback(iteration, feed_dict) ''' if self.sess is not None: self.sess.close() self.sess = tf.Session(graph=self.graph) with self.graph.as_default(): self._create_model(train_triples) self.sess.run(tf.initialize_all_variables()) batch_provider = self._create_batch_provider(train_triples) for i in range(self.max_iter): batch_triples, batch_labels = batch_provider.next_batch() feed_dict = self.create_feed_dict(batch_triples, batch_labels, training=True) if step_callback: keep_going = step_callback(i, feed_dict) if not keep_going: break self.sess.run(self.train_step, feed_dict) if self.post_step is not None: self.sess.run(self.post_step, feed_dict) def predict(self, triples): ''' Runs a trained model on the supplied relationship triples. fit() must be called before calling this function. Args: triples: A numpy integer array of relationship triples, where each row of contains [head idx, relationship idx, tail idx] ''' feed_dict = self.create_feed_dict(triples, training=False) return self.sess.run(self.output, feed_dict=feed_dict) class Contrastive_CP(BaseModel): ''' Model with a scoring function based on CANDECOMP/PARAFAC tensor decomposition. Optimization differs, however, in the use of maxnorm regularization and contrastive negative sampling. Score for (head i, rel k, tail j) triple is: h_i^T * diag(r_k) * t_j, where h_i and t_j are embedding vectors for the head and tail entities, and r_k is an embedding vector for the relationship type. Args: embedding_size: Embedding vector length maxnorm: Maximum Euclidean norm for embedding vectors batch_pos_cnt: Number of positive examples to use in each mini-batch max_iter: Maximum number of optimization iterations to perform model_type: Possible values: 'least_squares': squared loss on 0/1 targets 'logistic': sigmoid link function, crossent loss on 0/1 targets 'ranking_margin': ranking margin on pos/neg pairs add_bias: If True, a bias Variable will be added to the output for least_squares and logistic models. opt: An optimizer object to use. If None, the default optimizer is tf.train.AdagradOptimizer(1.0) References: Kolda, Tamara G., and Brett W. Bader. "Tensor decompositions and applications." SIAM review 51.3 (2009): 455-500. ''' def _create_model(self, train_triples): # Count unique items to determine embedding matrix sizes head_cnt = len(set(train_triples[:,0])) rel_cnt = len(set(train_triples[:,1])) tail_cnt = len(set(train_triples[:,2])) init_sd = 1.0 / np.sqrt(self.embedding_size) # Embedding matrices for entities and relationship types head_init = tf.truncated_normal([head_cnt, self.embedding_size], stddev=init_sd) rel_init = tf.truncated_normal([rel_cnt, self.embedding_size], stddev=init_sd) tail_init = tf.truncated_normal([tail_cnt, self.embedding_size], stddev=init_sd) if self.maxnorm is not None: # Ensure maxnorm constraints are initially satisfied head_init = dense_maxnorm(head_init, self.maxnorm) rel_init = dense_maxnorm(rel_init, self.maxnorm) tail_init = dense_maxnorm(tail_init, self.maxnorm) self.head_embedding_vars = tf.Variable(head_init) self.rel_embedding_vars = tf.Variable(rel_init) self.tail_embedding_vars = tf.Variable(tail_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.head_embedding_vars, self.head_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) tail_embed = tf.nn.embedding_lookup(self.tail_embedding_vars, self.tail_input) # Model output raw_output = tf.reduce_sum(tf.mul(tf.mul(head_embed, rel_embed), tail_embed), 1) self.output, self.loss = self._create_output_and_loss(raw_output) # Optimization self.train_step = self.opt.minimize(self.loss) if self.maxnorm is not None: # Post-processing to limit embedding vars to L2 ball head_constraint = self._norm_constraint_op(self.head_embedding_vars, tf.unique(self.head_input)[0], self.maxnorm) rel_constraint = self._norm_constraint_op(self.rel_embedding_vars, tf.unique(self.rel_input)[0], self.maxnorm) tail_constraint = self._norm_constraint_op(self.tail_embedding_vars, tf.unique(self.tail_input)[0], self.maxnorm) self.post_step = [head_constraint, rel_constraint, tail_constraint] def _create_batch_provider(self, train): # CP treats head and tail entities separately return ContrastiveTrainingProvider(train, self.batch_pos_cnt, separate_head_tail=True) def embeddings(self): ''' Returns: A list of pairs: [(embedding name, embedding 2D Tensor)] ''' return [('head', self.head_embedding_vars), ('tail', self.head_embedding_vars), ('rel', self.rel_embedding_vars)] class Bilinear(BaseModel): ''' Model with a scoring function based on the bilinear formulation of RESCAL. Optimization differs, however, in the use of maxnorm regularization and contrastive negative sampling. Score for (head i, rel k, tail j) triple is: e_i^T * R_k * e_j where e_i and e_j are D-dimensional embedding vectors for the head and tail entities, and R_k is a (D x D) matrix for the relationship type acting as a bilinear operator. Args: embedding_size: Embedding vector length maxnorm: Maximum Euclidean norm for embedding vectors rel_maxnorm_mult: Multiplier for the maxnorm threshold used for relationship embeddings. Example: If maxnorm=2.0 and rel_maxnorm_mult=4.0, then the maxnorm constrain for relationships will be 2.0 * 4.0 = 8.0. batch_pos_cnt: Number of positive examples to use in each mini-batch max_iter: Maximum number of optimization iterations to perform model_type: Possible values: 'least_squares': squared loss on 0/1 targets 'logistic': sigmoid link function, crossent loss on 0/1 targets 'ranking_margin': ranking margin on pos/neg pairs add_bias: If True, a bias Variable will be added to the output for least_squares and logistic models. opt: An optimizer object to use. If None, the default optimizer is tf.train.AdagradOptimizer(1.0) References: Nickel, Maximilian, Volker Tresp, and Hans-Peter Kriegel. "A three-way model for collective learning on multi-relational data." Proceedings of the 28th international conference on machine learning (ICML-11). 2011. ''' def __init__(self, embedding_size, maxnorm=1.0, rel_maxnorm_mult=3.0, batch_pos_cnt=100, max_iter=1000, model_type='least_squares', add_bias=True, opt=None): super(Bilinear, self).__init__( embedding_size=embedding_size, maxnorm=maxnorm, batch_pos_cnt=batch_pos_cnt, max_iter=max_iter, model_type=model_type, opt=opt) self.rel_maxnorm_mult = rel_maxnorm_mult def _create_model(self, train_triples): # Count unique items to determine embedding matrix sizes entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2])) rel_cnt = len(set(train_triples[:,1])) init_sd = 1.0 / np.sqrt(self.embedding_size) # Embedding variables for all entities and relationship types
# Relationship embeddings will be stored in flattened format to make # applying maxnorm constraints easier rel_embedding_shape = [rel_cnt, self.embedding_size * self.embedding_size] entity_init = tf.truncated_normal(entity_embedding_shape, stddev=init_sd) rel_init = tf.truncated_normal(rel_embedding_shape, stddev=init_sd) if self.maxnorm is not None: # Ensure maxnorm constraints are initially satisfied entity_init = dense_maxnorm(entity_init, self.maxnorm) rel_init = dense_maxnorm(rel_init, self.maxnorm) self.entity_embedding_vars = tf.Variable(entity_init) self.rel_embedding_vars = tf.Variable(rel_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input) tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) # Reshape rel_embed into square D x D matrices rel_embed_square = tf.reshape(rel_embed, (-1, self.embedding_size, self.embedding_size)) # Reshape head_embed and tail_embed to be suitable for the matrix multiplication head_embed_row = tf.expand_dims(head_embed, 1) # embeddings as row vectors tail_embed_col = tf.expand_dims(tail_embed, 2) # embeddings as column vectors head_rel_mult = tf.batch_matmul(head_embed_row, rel_embed_square) # Output needs a squeeze into a 1d vector raw_output = tf.squeeze(tf.batch_matmul(head_rel_mult, tail_embed_col)) self.output, self.loss = self._create_output_and_loss(raw_output) # Optimization self.train_step = self.opt.minimize(self.loss) if self.maxnorm is not None: # Post-processing to limit embedding vars to L2 ball rel_maxnorm = self.maxnorm * self.rel_maxnorm_mult unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0] unique_rel_indices = tf.unique(self.rel_input)[0] entity_constraint = self._norm_constraint_op(self.entity_embedding_vars, unique_ent_indices, self.maxnorm) rel_constraint = self._norm_constraint_op(self.rel_embedding_vars, unique_rel_indices, rel_maxnorm) self.post_step = [entity_constraint, rel_constraint] class TransE(BaseModel): ''' TransE: Translational Embeddings Model Score for (head i, rel k, tail j) triple is: d(e_i + t_k, e_i) where e_i and e_j are D-dimensional embedding vectors for the head and tail entities, t_k is a another D-dimensional vector acting as a translation, and d() is a dissimilarity function like Euclidean distance. Optimization is performed uing SGD on ranking margin loss between contrastive training pairs. Entity embeddings are contrained to lie within the unit L2 ball, relationship vectors are left unconstrained. Args: embedding_size: Embedding vector length batch_pos_cnt: Number of positive examples to use in each mini-batch max_iter: Maximum number of optimization iterations to perform dist: Distance function used in loss: 'euclidean': sqrt(sum((x - y)^2)) 'sqeuclidean': squared Euclidean, sum((x - y)^2) 'manhattan': sum of absolute differences, sum(|x - y|) margin: Margin parameter for parwise ranking hinge loss opt: An optimizer object to use. If None, the default optimizer is tf.train.AdagradOptimizer(1.0) References: Bordes, Antoine, et al. "Translating embeddings for modeling multi-relational data." Advances in Neural Information Processing Systems. 2013. ''' def __init__(self, embedding_size, batch_pos_cnt=100, max_iter=1000, dist='euclidean', margin=1.0, opt=None): super(TransE, self).__init__(embedding_size=embedding_size, maxnorm=1.0, batch_pos_cnt=batch_pos_cnt, max_iter=max_iter, model_type='ranking_margin', opt=opt) self.dist = dist self.margin = margin self.EPS = 1e-3 # for sqrt gradient when dist='euclidean' def _create_model(self, train_triples): # Count unique items to determine embedding matrix sizes entity_cnt = len(set(train_triples[:,0]).union(train_triples[:,2])) rel_cnt = len(set(train_triples[:,1])) init_sd = 1.0 / np.sqrt(self.embedding_size) # Embedding variables entity_var_shape = [entity_cnt, self.embedding_size] rel_var_shape = [rel_cnt, self.embedding_size] entity_init = tf.truncated_normal(entity_var_shape, stddev=init_sd) rel_init = tf.truncated_normal(rel_var_shape, stddev=init_sd) # Ensure maxnorm constraints are initially satisfied entity_init = dense_maxnorm(entity_init, self.maxnorm) self.entity_embedding_vars = tf.Variable(entity_init) self.rel_embedding_vars = tf.Variable(rel_init) # Embedding layer for each (head, rel, tail) triple being fed in as input head_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.head_input) tail_embed = tf.nn.embedding_lookup(self.entity_embedding_vars, self.tail_input) rel_embed = tf.nn.embedding_lookup(self.rel_embedding_vars, self.rel_input) # Relationship vector acts as a translation in entity embedding space diff_vec = tail_embed - (head_embed + rel_embed) # negative dist so higher scores are better (important for pairwise loss) if self.dist == 'manhattan': raw_output = -tf.reduce_sum(tf.abs(diff_vec), 1) elif self.dist == 'euclidean': # +eps because gradients can misbehave for small values in sqrt raw_output = -tf.sqrt(tf.reduce_sum(tf.square(diff_vec), 1) + self.EPS) elif self.dist == 'sqeuclidean': raw_output = -tf.reduce_sum(tf.square(diff_vec), 1) else: raise Exception('Unknown distance type') # Model output self.output, self.loss = ranking_margin_objective(raw_output, self.margin) # Optimization with postprocessing to limit embedding vars to L2 ball self.train_step = self.opt.minimize(self.loss) unique_ent_indices = tf.unique(tf.concat(0, [self.head_input, self.tail_input]))[0] self.post_step = self._norm_constraint_op(self.entity_embedding_vars, unique_ent_indices, self.maxnorm)
entity_embedding_shape = [entity_cnt, self.embedding_size]
App.js
import React from 'react'; import Navbar from './components/Navbar/'; import Contact from './pages/Contact'; import About from './pages/About'; import Portfolio from './pages/Portfolio'; import { BrowserRouter as Router, Route } from "react-router-dom"; function App() { return (
<Navbar /> <Route exact path='/' component={About} /> <Route exact path='/contact' component={Contact} /> <Route exact path='/portfolio' component={Portfolio} /> </div> </Router> ); } export default App;
<Router> <div>
type_anonymity.rs
fn apply<F>(f: F) where F: Fn()
fn main() { let x = 7; let print = || println!("{}", x + 1); apply(print); }
{ f(); }
ng-alt.module.js
/** * @fileoverview added by tsickle
*/ import { NgModule } from '@angular/core'; import { NgAltDirective, NgAltGroupDirective } from './ng-alt.directive'; var NgAltModule = /** @class */ (function () { function NgAltModule() { } NgAltModule.decorators = [ { type: NgModule, args: [{ declarations: [NgAltGroupDirective, NgAltDirective], exports: [NgAltGroupDirective, NgAltDirective] },] } ]; return NgAltModule; }()); export { NgAltModule }; //# sourceMappingURL=data:application/json;base64,eyJ2ZXJzaW9uIjozLCJmaWxlIjoibmctYWx0Lm1vZHVsZS5qcyIsInNvdXJjZVJvb3QiOiJuZzovL25nLWFsdC8iLCJzb3VyY2VzIjpbIm5nLWFsdC5tb2R1bGUudHMiXSwibmFtZXMiOltdLCJtYXBwaW5ncyI6Ijs7OztBQUFBLE9BQU8sRUFBRSxRQUFRLEVBQUUsTUFBTSxlQUFlLENBQUM7QUFDekMsT0FBTyxFQUFFLGNBQWMsRUFBRSxtQkFBbUIsRUFBRSxNQUFNLG9CQUFvQixDQUFDO0FBRXpFO0lBQUE7SUFJMkIsQ0FBQzs7Z0JBSjNCLFFBQVEsU0FBQztvQkFDUixZQUFZLEVBQUUsQ0FBRSxtQkFBbUIsRUFBRSxjQUFjLENBQUU7b0JBQ3JELE9BQU8sRUFBRSxDQUFFLG1CQUFtQixFQUFFLGNBQWMsQ0FBRTtpQkFDakQ7O0lBQzBCLGtCQUFDO0NBQUEsQUFKNUIsSUFJNEI7U0FBZixXQUFXIiwic291cmNlc0NvbnRlbnQiOlsiaW1wb3J0IHsgTmdNb2R1bGUgfSBmcm9tICdAYW5ndWxhci9jb3JlJztcbmltcG9ydCB7IE5nQWx0RGlyZWN0aXZlLCBOZ0FsdEdyb3VwRGlyZWN0aXZlIH0gZnJvbSAnLi9uZy1hbHQuZGlyZWN0aXZlJztcblxuQE5nTW9kdWxlKHtcbiAgZGVjbGFyYXRpb25zOiBbIE5nQWx0R3JvdXBEaXJlY3RpdmUsIE5nQWx0RGlyZWN0aXZlIF0sXG4gIGV4cG9ydHM6IFsgTmdBbHRHcm91cERpcmVjdGl2ZSwgTmdBbHREaXJlY3RpdmUgXVxufSlcbmV4cG9ydCBjbGFzcyBOZ0FsdE1vZHVsZSB7IH1cbiJdfQ==
* @suppress {checkTypes,extraRequire,missingReturn,unusedPrivateMembers,uselessCode} checked by tsc
index.ts
export { toArbitrary } from './arbitrary'