max_stars_count
int64
301
224k
text
stringlengths
6
1.05M
token_count
int64
3
727k
1,484
<reponame>ballad86/anchore-engine """ Entities for the catalog service including services, users, images, etc. Pretty much everything except image analysis data """ import datetime import enum from sqlalchemy import ( JSON, BigInteger, Boolean, Column, DateTime, Enum, ForeignKey, Index, Integer, LargeBinary, String, inspect, ) from sqlalchemy.orm import relationship from anchore_engine.db.entities.common import ( Base, StringJSON, UtilMixin, anchore_now, anchore_now_datetime, anchore_uuid, truncate_index_name, ) from anchore_engine.util.time import datetime_to_rfc3339 class Anchore(Base, UtilMixin): __tablename__ = "anchore" service_version = Column(String, primary_key=True) db_version = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) scanner_version = Column(String) def __repr__(self): return "service_version='%s' db_version='%s' scanner_version='%s'" % ( self.service_version, self.db_version, self.scanner_version, ) class ObjectStorageMetadata(Base, UtilMixin): __tablename__ = "archive_metadata" bucket = Column(String, primary_key=True) archiveId = Column(String, primary_key=True) userId = Column(String, primary_key=True) documentName = Column(String, primary_key=True) content_url = Column(String) is_compressed = Column(Boolean) digest = Column(String) size = Column(BigInteger) document_metadata = Column(String) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) class LegacyArchiveDocument(Base, UtilMixin): __tablename__ = "archive_document" bucket = Column(String, primary_key=True) archiveId = Column(String, primary_key=True) userId = Column(String, primary_key=True) documentName = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) jsondata = Column(String) b64_encoded = Column(Boolean, default=False) def __repr__(self): return "userId='%s'" % (self.userId) class ObjectStorageRecord(Base, UtilMixin): """ Content storage for the db driver for object storage. """ __tablename__ = "object_storage" userId = Column(String, primary_key=True) bucket = Column(String, primary_key=True) key = Column(String, primary_key=True) version = Column(String, primary_key=True, default="") object_metadata = Column(String) content = Column(LargeBinary) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) # This will be migrated to the Account table class User(Base, UtilMixin): """ Legacy user definition. Migrated to Account + UserCredential """ __tablename__ = "users" userId = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) password = Column(String) email = Column(String) acls = Column(String) active = Column(Boolean) def __repr__(self): return "userId='%s'" % (self.userId) class Event(Base, UtilMixin): __tablename__ = "events" generated_uuid = Column(String, primary_key=True, default=anchore_uuid) created_at = Column(DateTime, default=anchore_now_datetime) resource_user_id = Column(String, nullable=True) resource_id = Column(String, nullable=True) resource_type = Column(String, nullable=True) source_servicename = Column(String, nullable=True) source_base_url = Column(String, nullable=True) source_hostid = Column(String, nullable=True) source_request_id = Column(String, nullable=True) type = Column(String) level = Column(String) message = Column(String) details = Column(StringJSON) timestamp = Column(DateTime) __table_args__ = ( Index(truncate_index_name("ix_timestamp"), timestamp.desc()), Index(truncate_index_name("ix_resource_user_id"), resource_user_id), Index(truncate_index_name("ix_resource_type"), resource_type), Index(truncate_index_name("ix_resource_id"), resource_id), Index(truncate_index_name("ix_source_servicename"), source_servicename), Index(truncate_index_name("ix_source_hostid"), source_hostid), Index(truncate_index_name("ix_level"), level), Index(truncate_index_name("ix_type"), type), ) def __repr__(self): return "generated_uuid='%s' level='%s' message='%s'" % ( self.generated_uuid, self.level, self.message, ) class QueueItem(Base, UtilMixin): """ Queue data used by notification system for queueing up notifications for delivery. """ __tablename__ = "queues" queueId = Column(String, primary_key=True) userId = Column(String, primary_key=True) dataId = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) data = Column(String) tries = Column(Integer) max_tries = Column(Integer) def __repr__(self): return "queueId='%s'" % (self.queueId) class QueueMeta(Base, UtilMixin): """ Metadata for queues themselves. msgs are stored in the queue table for simplequeue service. """ __tablename__ = "queuemeta" queueName = Column(String, primary_key=True) userId = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) qlen = Column(BigInteger, default=0) # For support of limiting number of messages being processed max_outstanding_messages = Column(Integer, default=0) # Default visibility timeout in seconds to be applied to messages if set visibility_timeout = Column(Integer, default=0) def __repr__(self): return "queueName='%s'" % (self.queueName) class Queue(Base, UtilMixin): """ Queue data used by the simplequeue service. """ __tablename__ = "queue" queueId = Column(BigInteger, primary_key=True, autoincrement=True) userId = Column(String, primary_key=True) queueName = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) popped = Column(Boolean, default=False) priority = Column(Boolean, default=False) data = Column(String, default="{}") dataId = Column(String) tries = Column(Integer, default=0) max_tries = Column(Integer, default=0) # Receipt handle is generated on dequeue and stored with the message as well as returned to the caller to support later deletion of the message receipt_handle = Column(String) visible_at = Column(DateTime) def __repr__(self): return "queueId='%s'" % (self.queueId) class Subscription(Base, UtilMixin): __tablename__ = "subscriptions" subscription_id = Column(String, primary_key=True) userId = Column(String, primary_key=True) subscription_type = Column(String, primary_key=True) subscription_key = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) subscription_value = Column(String) active = Column(Boolean) def make(self): ret = {} m = inspect(self) for c in m.attrs: ret[c.key] = None return ret def __repr__(self): return "userId='%s' subscription_type='%s' subscription_key='%s'" % ( self.userId, self.subscription_type, self.subscription_key, ) # if False: # class CatalogRepoTag(Base, UtilMixin): # __tablename__ = "catalog_repotag" # # regrepo = Column(String, primary_key=True) # tag = Column(String, primary_key=True) # userId = Column(String, primary_key=True) # created_at = Column(Integer, default=anchore_now) # last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) # record_state_key = Column(String, default="active") # record_state_val = Column(String) # # image_type = Column(String) # # def make(self): # ret = {} # # m = inspect(self) # for c in m.attrs: # ret[c.key] = None # # return ret # # def __repr__(self): # return "registry='%s'" % (self.registry) class CatalogImage(Base, UtilMixin): __tablename__ = "catalog_image" imageDigest = Column(String, primary_key=True) userId = Column(String, primary_key=True) parentDigest = Column(String) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) analyzed_at = Column(Integer) record_state_key = Column(String, default="active") record_state_val = Column(String) image_type = Column(String) # image metadata arch = Column(String) distro = Column(String) distro_version = Column(String) dockerfile_mode = Column(String) image_size = Column(BigInteger) layer_count = Column(Integer) annotations = Column(String) analysis_status = Column(String) image_status = Column(String) def make(self): ret = {} m = inspect(self) for c in m.attrs: ret[c.key] = None return ret def __repr__(self): return "imageDigest='%s'" % (self.imageDigest) class CatalogImageDocker(Base, UtilMixin): __tablename__ = "catalog_image_docker" imageDigest = Column(String, primary_key=True) userId = Column(String, primary_key=True) registry = Column(String, primary_key=True) repo = Column(String, primary_key=True) tag = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) tag_detected_at = Column(Integer, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) digest = Column(String) imageId = Column(String) dockerfile = Column(String) def make(self): ret = {} m = inspect(self) for c in m.attrs: ret[c.key] = None return ret def __repr__(self): return "<{} {}/{}:{},digest={},detected_at={}>".format( super().__repr__(), self.registry, self.repo, self.tag, self.imageDigest, self.tag_detected_at, ) class ArchivedImage(Base, UtilMixin): """ Archive equivalent of CatalogImage, shortened for efficiency and to help manage lifecycle """ __tablename__ = "catalog_archived_images" account = Column(String, primary_key=True) imageDigest = Column(String, primary_key=True) parentDigest = Column(String) image_record_created_at = Column( Integer ) # The created_at from the original image record image_record_last_updated = Column( Integer ) # The last_updated from the original image record analyzed_at = Column(Integer) status = Column(String) # Use same def for now, but move this and all other annotation types to a more general and searchable format (e.g. join table) annotations = Column(JSON) # Location of the archive manifest within the archive object store (used to located the other artifacts) manifest_bucket = Column(String) manifest_key = Column(String) archive_size_bytes = Column(BigInteger) # Timestamps for this archive record itself created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) _tags = relationship( "ArchivedImageDocker", primaryjoin="and_(ArchivedImage.account == foreign(ArchivedImageDocker.account), ArchivedImage.imageDigest == foreign(ArchivedImageDocker.imageDigest))", lazy="joined", back_populates="_image", cascade="all, delete-orphan", ) def tags(self): return self._tags def __repr__(self): return "<{} account={},imageDigest={}>".format( super().__repr__(), self.account, self.imageDigest ) @classmethod def from_catalog_image(cls, catalog_img_dict, cascade=True): i = ArchivedImage() i.account = catalog_img_dict["userId"] i.imageDigest = catalog_img_dict["imageDigest"] i.analyzed_at = catalog_img_dict["analyzed_at"] i.parentDigest = catalog_img_dict["parentDigest"] i.annotations = catalog_img_dict["annotations"] i.image_record_created_at = catalog_img_dict["created_at"] i.image_record_last_updated = catalog_img_dict["last_updated"] i.status = "archiving" details = [] if cascade: for detail in catalog_img_dict["image_detail"]: d = ArchivedImageDocker() d.imageDigest = i.imageDigest d.account = i.account d.tag = detail["tag"] d.repository = detail["repo"] d.registry = detail["registry"] d.tag_detected_at = detail["tag_detected_at"] d.imageId = detail["imageId"] details.append(d) i._tags = details return i class ArchivedImageDocker(Base, UtilMixin): """ Archived equivalent of a catalog_image_docker, but with some data removed to keep the records small since most metadata is in the archive objects themselves. """ __tablename__ = "catalog_archived_images_docker" account = Column(String, primary_key=True) imageDigest = Column(String, primary_key=True) registry = Column(String, primary_key=True) repository = Column(String, primary_key=True) tag = Column(String, primary_key=True) imageId = Column(String) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) tag_detected_at = Column(Integer, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) _image = relationship( "ArchivedImage", primaryjoin="and_(foreign(ArchivedImageDocker.account) == ArchivedImage.account, foreign(ArchivedImageDocker.imageDigest) == ArchivedImage.imageDigest)", lazy="joined", back_populates="_tags", ) @property def repo(self): """ alias for self.repository. Use for interface compat with CatalogImageDocker :return: """ return self.repository def parent_image(self): return self._image def __repr__(self): return "digest='%s'" % (self.imageDigest) class ArchiveTransitions(enum.Enum): archive = "archive" delete = "delete" class TransitionHistoryState(enum.Enum): pending = "pending" complete = "complete" class ArchiveTransitionRule(Base, UtilMixin): __tablename__ = "catalog_archive_transition_rules" account = Column(String, primary_key=True) rule_id = Column(String, primary_key=True) transition = Column(Enum(ArchiveTransitions, name="archive_transitions")) selector_registry = Column(String) selector_repository = Column(String) selector_tag = Column(String) tag_versions_newer = Column(Integer) analysis_age_days = Column(Integer) system_global = Column(Boolean, default=False) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) exclude_selector_registry = Column(String) exclude_selector_repository = Column(String) exclude_selector_tag = Column(String) exclude_expiration_days = Column(Integer) max_images_per_account = Column(Integer) def __repr__(self): return "<ArchiveTransitionRule account={},rule_id={}>".format( self.account, self.rule_id ) class ArchiveTransitionHistoryEntry(Base, UtilMixin): """ An entry in the transition history log. One digest may match multiple rules in a single task (e.g. multiple rules that cumulatively match all tags for an image). """ __tablename__ = "catalog_archive_transition_history" transition_task_id = Column(String, primary_key=True) account = Column(String, primary_key=True) rule_id = Column(String, primary_key=True) image_digest = Column(String, primary_key=True) transition = Column( Enum(ArchiveTransitions, name="archive_transitions"), primary_key=True ) transition_state = Column( Enum(TransitionHistoryState, name="archive_transition_history_state") ) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) # _matches = relationship("TransitionRuleMatch", primaryjoin="and_(ArchiveTransitionHistoryEntry.rule_id == foreign(TransitionRuleMatch.rule_id), ArchiveTransitionHistoryEntry.task_id == foreign(TransitionMatch.task_id))", lazy='joined', back_populates='_image', cascade='all, delete-orphan') # class TransitionRuleMatch(Base, UtilMixin): # task_id = Column(String, primary_key=True) # rule_id = Column(String, primary_key=True) # image_digest = Column(String, primary_key=True) # registry = Column(String, primary_key=True) # repository = Column(String, primary_key=True) # tag = Column(String, primary_key=True) # created_at = Column(Integer, default=anchore_now) # class PolicyBundle(Base, UtilMixin): __tablename__ = "policy_bundle" policyId = Column(String, primary_key=True) userId = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) active = Column(Boolean) policy_source = Column(String, default="local") # policybundle = Column(String) def __repr__(self): return "policyId='%s'" % (self.policyId) class PolicyEval(Base, UtilMixin): __tablename__ = "policy_eval" userId = Column(String, primary_key=True) imageDigest = Column(String, primary_key=True) tag = Column(String, primary_key=True) policyId = Column(String, primary_key=True) final_action = Column(String, primary_key=True) created_at = Column(Integer, primary_key=True) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) evalId = Column(String) policyeval = Column(String) def make(self): ret = {} m = inspect(self) for c in m.attrs: ret[c.key] = None return ret def content_compare(self, other): selfdata = dict( (key, value) for key, value in vars(self).items() if not key.startswith("_") ) otherdata = dict( (key, value) for key, value in vars(other).items() if not key.startswith("_") ) for k in ["userId", "imageDigest", "tag", "policyId", "final_action"]: try: if selfdata[k] != otherdata[k]: return False except: return False return True def __repr__(self): return "policyId='%s' userId='%s' imageDigest='%s' tag='%s'" % ( self.policyId, self.userId, self.imageDigest, self.tag, ) class Service(Base, UtilMixin): __tablename__ = "services" hostid = Column(String, primary_key=True) servicename = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) type = Column(String) version = Column(String) base_url = Column(String) short_description = Column(String) status = Column(Boolean) status_message = Column(String) heartbeat = Column(Integer) def make(self): ret = {} m = inspect(self) for c in m.attrs: ret[c.key] = None return ret def __repr__(self): return "hostid='%s'" % (self.hostid) class Registry(Base, UtilMixin): __tablename__ = "registries" registry = Column(String, primary_key=True) userId = Column(String, primary_key=True) created_at = Column(Integer, default=anchore_now) last_updated = Column(Integer, onupdate=anchore_now, default=anchore_now) record_state_key = Column(String, default="active") record_state_val = Column(String) registry_type = Column(String) registry_name = Column(String) registry_user = Column(String) registry_pass = Column(String) registry_verify = Column(Boolean) registry_meta = Column(String) def __repr__(self): return "registry='%s' userId='%s' registry_user='%s'" % ( self.registry, self.userId, self.registry_user, ) # Application-defined lease using a flag in a db row. These are leases, not locks, because they have expirations. class Lease(Base, UtilMixin): __tablename__ = "leases" _default_expiration_duration = 10 id = Column(String, primary_key=True) held_by = Column(String) expires_at = Column(DateTime) epoch = Column(BigInteger, default=0) # Some convenience functions, these should be executed inside for_update locks def do_acquire(self, holder_id, duration_sec=None): return self.is_available() and self.set_holder( holder_id, duration_sec=duration_sec ) def is_available(self): return self.held_by is None or self.is_expired() def is_expired(self): return self.expires_at is None or self.expires_at < datetime.datetime.utcnow() def set_holder(self, id, duration_sec=None): if not duration_sec: duration_sec = self._default_expiration_duration self.held_by = id self.expires_at = datetime.datetime.utcnow() + datetime.timedelta( seconds=duration_sec ) self.epoch += 1 return True def release_holder(self): self.expires_at = None self.held_by = None self.epoch += 1 return True def __str__(self): return "<{} id={},held_by={},expires_at={},epoch={}>".format( self.__class__.__name__, self.id, self.held_by, self.expires_at.isoformat() if self.expires_at else self.expires_at, self.epoch, ) # Image import entities class ImportState(enum.Enum): pending = "pending" processing = "processing" complete = "complete" failed = "failed" expired = "expired" invalidated = "invalidated" def is_active(self): """ True if record is in an active state, false if in a terminal state :return: """ return self in [ImportState.processing, ImportState.pending] class ImageImportOperation(Base, UtilMixin): __tablename__ = "image_imports" uuid = Column(String, primary_key=True, default=anchore_uuid) account = Column(String) expires_at = Column(DateTime) status = Column(Enum(ImportState)) created_at = Column(DateTime, default=anchore_now_datetime) last_updated = Column( DateTime, default=anchore_now_datetime, onupdate=anchore_now_datetime ) contents = relationship("ImageImportContent", back_populates="operation") __table_args__ = ( Index(truncate_index_name("ix_image_imports_account"), account), {}, ) def to_json(self): j = super().to_json() j["status"] = self.status.value j["expires_at"] = datetime_to_rfc3339(self.expires_at) j["created_at"] = datetime_to_rfc3339(self.created_at) j["last_update"] = datetime_to_rfc3339(self.last_updated) return j class ImageImportContent(Base, UtilMixin): """ References to objects in the object store used for """ __tablename__ = "image_import_content" operation_id = Column(String, ForeignKey("image_imports.uuid"), primary_key=True) digest = Column(String, primary_key=True) content_type = Column(String, primary_key=True) created_at = Column(DateTime, default=anchore_now_datetime) last_updated = Column( DateTime, default=anchore_now_datetime, onupdate=anchore_now_datetime ) content_storage_bucket = Column(String) content_storage_key = Column(String) operation = relationship("ImageImportOperation", back_populates="contents") def to_json(self): j = super().to_json() j["created_at"] = datetime_to_rfc3339(self.created_at) j["last_update"] = datetime_to_rfc3339(self.last_updated) return j
10,415
736
<reponame>tuomijal/pmdarima # -*- coding: utf-8 -*- import pytest import numpy as np from numpy.testing import assert_array_equal from pmdarima.compat import pmdarima as pm_compat from pmdarima.compat import pytest as pt_compat xreg = np.random.rand(4, 4) @pytest.mark.parametrize( 'X,kw,X_exp,kw_exp,exp_warning,exp_error', [ # provided as `exogenous` pytest.param( None, {"exogenous": xreg}, xreg, {}, DeprecationWarning, None, ), # provided as `X` with additional kwargs pytest.param( xreg, {"foo": "bar"}, xreg, {"foo": "bar"}, None, None, ), # provided as `X` AND `exogenous` will raise pytest.param( xreg, {"exogenous": xreg}, None, None, None, ValueError, ), ] ) def test_get_X(X, kw, X_exp, kw_exp, exp_warning, exp_error): with pytest.warns(exp_warning) as w, \ pt_compat.raises(exp_error) as e: X_act, kw_act = pm_compat.get_X(X, **kw) if exp_warning: assert w else: assert not w if exp_error: assert e # no other assertions can be made else: assert not e assert_array_equal(X_act, X_exp) assert kw_act == kw_exp
779
14,668
<filename>ash/quick_pair/common/account_key_failure.cc // Copyright 2021 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ash/quick_pair/common/account_key_failure.h" namespace ash { namespace quick_pair { std::ostream& operator<<(std::ostream& stream, AccountKeyFailure failure) { switch (failure) { case AccountKeyFailure::kAccountKeyCharacteristicDiscovery: stream << "[Failed to find the Account Key GATT characteristic]"; break; case AccountKeyFailure::kAccountKeyCharacteristicWrite: stream << "[Failed to write to the Account Key GATT characteristic]"; break; } return stream; } } // namespace quick_pair } // namespace ash
244
5,169
{ "name": "ARSPopover", "version": "2.0.0", "summary": "Universal popover for iPhone and iPad.", "description": "# ARSPopover\nUniversal popover for iPhone and iPad that you can use in your projects. No custom drawing, no custom elements - everything is purely native.\n\n| iPhone | iPad |\n| ---------------------------- | ------------------------ |\n| ![ARSPopover-iPhone][iPhone] | ![ARSPopover-iPad][iPad] |\n\n[iPhone]: http://git.arsenkin.com/ARSPopover-iPhone.gif\n[iPad]: http://git.arsenkin.com/ARSPopover-iPad.gif\n\n## Installation\n\n### CocoaPods\nTo install with [CocoaPods](http://cocoapods.org/), copy and paste this in your *.pod* file:\n\n platform :ios, '8.0'\n pod 'ARSPopover', '~> 2.0'\n\n### Non-CocoaPods way\nYou can always to do the old way - just drag the source files into your projects and you are good to go.\n\n ## Usage\n Sample usage of the ARSPopover might look like this:\n\n ``` objective-c\n - (IBAction)showPopoverWithWebView:(id)sender {\n ARSPopover *popoverController = [ARSPopover new];\n popoverController.sourceView = self.buttonWithWebView;\n popoverController.sourceRect = CGRectMake(CGRectGetMidX(self.buttonWithWebView.bounds), CGRectGetMaxY(self.buttonWithWebView.bounds), 0, 0);\n popoverController.contentSize = CGSizeMake(400, 600);\n popoverController.arrowDirection = UIPopoverArrowDirectionUp;\n\n [self presentViewController:popoverController animated:YES completion:^{\n [popoverController insertContentIntoPopover:^(ARSPopover *popover, CGSize popoverPresentedSize, CGFloat popoverArrowHeight) {\n CGFloat originX = 0;\n CGFloat originY = 0;\n CGFloat width = popoverPresentedSize.width;\n CGFloat height = popoverPresentedSize.height - popoverArrowHeight;\n\n CGRect frame = CGRectMake(originX, originY, width, height);\n UIWebView *webView = [[UIWebView alloc] initWithFrame:frame];\n webView.scalesPageToFit = YES;\n [webView loadRequest:[NSURLRequest requestWithURL:[NSURL URLWithString:@\"http://google.com\"]]];\n [popover.view addSubview:webView];\n }];\n }];\n }\n ```\n ### Required properties' configurations\n\n In order to get a working popover, you need to specify next properties:\n\n * `popoverController.sourceView` - The view containing the anchor rectangle for the popover.\n\n ``` objective-c\n popoverController.sourceView = self.buttonWithWebView;\n ```\n\n * `popoverController.sourceRect` - The rectangle in the specified view in which to anchor the popover.\n\n ``` objective-c\n popoverController.sourceRect = CGRectMake(CGRectGetMidX(self.buttonWithWebView.bounds), CGRectGetMaxY(self.buttonWithWebView.bounds), 0, 0);\n ```\n\n * `popoverController.contentSize` - The preferred size for the popover’s view.\n\n ``` objective-c\n popoverController.contentSize = CGSizeMake(400, 600);\n ```\n\n * And the last, most important thing - you have to call method `insertContentIntoPopover` and pass a block of code, which should add subviews to popover's view you wish to see.\n\n _Be sure to call this method only after you have presented popup. Otherwise you might get wrong size in popoverPresentedSize._\n\n ``` objective-c\n [popoverController insertContentIntoPopover:^(ARSPopover *popover, CGSize popoverPresentedSize, CGFloat popoverArrowHeight) {\n CGFloat originX = 0;\n CGFloat originY = 0;\n CGFloat width = popoverPresentedSize.width;\n CGFloat height = popoverPresentedSize.height - popoverArrowHeight;\n\n CGRect frame = CGRectMake(originX, originY, width, height);\n UIWebView *webView = [[UIWebView alloc] initWithFrame:frame];\n webView.scalesPageToFit = YES;\n [webView loadRequest:[NSURLRequest requestWithURL:[NSURL URLWithString:@\"http://google.com\"]]];\n [popover.view addSubview:webView];\n }];\n ```\n\n## License\nARSPopover is released under the [MIT license](http://opensource.org/licenses/MIT). See LICENSE for details.", "homepage": "https://github.com/soberman/ARSPopover", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "<NAME>": "<EMAIL>" }, "social_media_url": "http://twitter.com/Soberman777", "platforms": { "ios": "8.0" }, "source": { "git": "https://github.com/soberman/ARSPopover.git", "tag": "2.0.0" }, "source_files": "Source/ARSPopover.{h,m}", "exclude_files": "Demo/*", "public_header_files": "Source/ARSPopover.h", "requires_arc": true }
1,888
808
<reponame>714627034/Paddle-Lite # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import sys sys.path.append('..') from program_config import TensorConfig, ProgramConfig, OpConfig, CxxConfig, TargetType, PrecisionType, DataLayoutType, Place import numpy as np from functools import partial, reduce from typing import Optional, List, Callable, Dict, Any, Set import unittest import hypothesis from hypothesis import given, settings, seed, example, assume, reproduce_failure import hypothesis.strategies as st def mul(x, y): return x * y def sample_program_configs(draw): in_shape = draw( st.lists( st.integers( min_value=5, max_value=64), min_size=2, max_size=4)) bias_shape = draw( st.lists( st.integers( min_value=1, max_value=64), min_size=1, max_size=2)) weight_shape = draw( st.lists( st.integers( min_value=5, max_value=64), min_size=2, max_size=2)) in_num_col_dims_data = draw( st.integers( min_value=1, max_value=len(in_shape) - 1)) padding_weights_data = draw(st.sampled_from([False, True])) has_bias = draw(st.sampled_from([True, False])) w_dims1 = weight_shape[1] w_dims0 = weight_shape[0] if padding_weights_data == True: w_dims1 = weight_shape[1] - 4 w_dims0 = weight_shape[0] - 4 if has_bias == True: if len(bias_shape) == 2: assume(bias_shape[0] == 1) assume(bias_shape[-1] == w_dims1) inshape0 = reduce(mul, in_shape[0:in_num_col_dims_data]) inshape1 = reduce(mul, in_shape[in_num_col_dims_data:]) assume(inshape1 == w_dims0) fc_out_shape = [] for i in range(0, in_num_col_dims_data): fc_out_shape.append(in_shape[i]) fc_out_shape.append(w_dims1) Alpha_shape = [] mode_data = draw(st.sampled_from(["all", "channel", "element"])) if mode_data == "all": Alpha_shape = [1] elif mode_data == "channel": Alpha_shape = [fc_out_shape[1]] assume(len(fc_out_shape) >= 2) elif mode_data == "element": Alpha_shape = fc_out_shape assume(len(fc_out_shape) >= 1) inputs_fc = {} if has_bias == True: inputs_fc = { "Input": ["input_data"], "W": ["weight_data"], "Bias": ["bias_data"] } else: inputs_fc = {"Input": ["input_data"], "W": ["weight_data"]} fc_op = OpConfig( type="fc", inputs=inputs_fc, outputs={"Out": ["fc_output_data"]}, attrs={ "in_num_col_dims": in_num_col_dims_data, "activation_type": "", "padding_weights": padding_weights_data, "use_mkldnn": False, }) prelu_op = OpConfig( type="prelu", inputs={"X": ["fc_output_data"], "Alpha": ["alpha_data"]}, outputs={"Out": ["output_data"]}, attrs={"mode": mode_data, "data_format": "NCHW"}) inputs_ = {} ops = [fc_op, prelu_op] program_config = ProgramConfig( ops=ops, weights={"bias_data": TensorConfig(shape=bias_shape)}, inputs={ "weight_data": TensorConfig(shape=weight_shape), "input_data": TensorConfig(shape=in_shape), "alpha_data": TensorConfig(shape=Alpha_shape) }, outputs=["output_data"]) return program_config
1,802
12,278
<filename>3rdParty/boost/1.71.0/libs/preprocessor/test/slot.cxx # /* ************************************************************************** # * * # * (C) Copyright <NAME> 2002. # * Distributed under the Boost Software License, Version 1.0. (See # * accompanying file LICENSE_1_0.txt or copy at # * http://www.boost.org/LICENSE_1_0.txt) # * * # ************************************************************************** */ # # /* See http://www.boost.org for most recent version. */ # # include <boost/preprocessor/slot.hpp> # include <libs/preprocessor/test/test.h> # include <boost/preprocessor/slot/counter.hpp> # define X() 4 # define BOOST_PP_VALUE 1 + 2 + 3 + X() # include BOOST_PP_ASSIGN_SLOT(1) # undef X BEGIN BOOST_PP_SLOT(1) == 10 END # define BOOST_PP_VALUE BOOST_PP_SLOT(1) * BOOST_PP_SLOT(1) # include BOOST_PP_ASSIGN_SLOT(1) BEGIN BOOST_PP_SLOT(1) == 100 END BEGIN BOOST_PP_COUNTER == 0 END #include BOOST_PP_UPDATE_COUNTER() BEGIN BOOST_PP_COUNTER == 1 END #include BOOST_PP_UPDATE_COUNTER() #include BOOST_PP_UPDATE_COUNTER() BEGIN BOOST_PP_COUNTER == 3 END #include BOOST_PP_UPDATE_COUNTER() #include BOOST_PP_UPDATE_COUNTER() #include BOOST_PP_UPDATE_COUNTER() BEGIN BOOST_PP_COUNTER == 6 END #include BOOST_PP_UPDATE_COUNTER() #include BOOST_PP_UPDATE_COUNTER() #include BOOST_PP_UPDATE_COUNTER() #include BOOST_PP_UPDATE_COUNTER() #include BOOST_PP_UPDATE_COUNTER() BEGIN BOOST_PP_COUNTER == 11 END
738
2,151
<filename>chrome/browser/chromeos/arc/wallpaper/arc_wallpaper_service.h // Copyright 2016 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #ifndef CHROME_BROWSER_CHROMEOS_ARC_WALLPAPER_ARC_WALLPAPER_SERVICE_H_ #define CHROME_BROWSER_CHROMEOS_ARC_WALLPAPER_ARC_WALLPAPER_SERVICE_H_ #include <stdint.h> #include <memory> #include <vector> #include "ash/public/interfaces/wallpaper.mojom.h" #include "base/macros.h" #include "base/memory/weak_ptr.h" #include "chrome/browser/image_decoder.h" #include "components/arc/common/wallpaper.mojom.h" #include "components/arc/connection_observer.h" #include "components/keyed_service/core/keyed_service.h" #include "mojo/public/cpp/bindings/associated_binding.h" namespace content { class BrowserContext; } // namespace content namespace arc { class ArcBridgeService; // Lives on the UI thread. class ArcWallpaperService : public KeyedService, public ConnectionObserver<mojom::WallpaperInstance>, public mojom::WallpaperHost, public ash::mojom::WallpaperObserver { public: // Returns singleton instance for the given BrowserContext, // or nullptr if the browser |context| is not allowed to use ARC. static ArcWallpaperService* GetForBrowserContext( content::BrowserContext* context); ArcWallpaperService(content::BrowserContext* context, ArcBridgeService* bridge_service); ~ArcWallpaperService() override; // ConnectionObserver<mojom::WallpaperInstance> overrides. void OnConnectionReady() override; // mojom::WallpaperHost overrides. void SetWallpaper(const std::vector<uint8_t>& data, int32_t wallpaper_id) override; void SetDefaultWallpaper() override; void GetWallpaper(GetWallpaperCallback callback) override; // ash::mojom::WallpaperObserver overrides. void OnWallpaperChanged(uint32_t image_id) override; void OnWallpaperColorsChanged( const std::vector<SkColor>& prominent_colors) override; void OnWallpaperBlurChanged(bool blurred) override; class DecodeRequestSender { public: virtual ~DecodeRequestSender(); // Decodes image |data| and notifies the result to |request|. virtual void SendDecodeRequest(ImageDecoder::ImageRequest* request, const std::vector<uint8_t>& data) = 0; }; // Replace a way to decode images for unittests. Originally it uses // ImageDecoder which communicates with the external process. void SetDecodeRequestSenderForTesting( std::unique_ptr<DecodeRequestSender> sender); private: friend class TestApi; class AndroidIdStore; class DecodeRequest; struct WallpaperIdPair; // Initiates a set wallpaper request to //ash. void OnWallpaperDecoded(const gfx::ImageSkia& image, int32_t android_id); // Notifies wallpaper change if we have wallpaper instance. void NotifyWallpaperChanged(int android_id); // Notifies wallpaper change of |android_id|, then notify wallpaper change of // -1 to reset wallpaper cache at Android side. void NotifyWallpaperChangedAndReset(int android_id); // If the wallpaper is allowed to be shown on screen, stores the |image_id| // in order to track the wallpaper change later, otherwise notify the Android // side immediately that the request is not going through. void OnSetThirdPartyWallpaperCallback(int32_t android_id, bool allowed, uint32_t image_id); // Initiates an encoding image request after getting the wallpaper image. void OnGetWallpaperImageCallback(GetWallpaperCallback callback, const gfx::ImageSkia& image); ArcBridgeService* const arc_bridge_service_; // Owned by ArcServiceManager. std::unique_ptr<DecodeRequest> decode_request_; std::vector<WallpaperIdPair> id_pairs_; std::unique_ptr<DecodeRequestSender> decode_request_sender_; // The binding this instance uses to implement ash::mojom::WallpaperObserver. mojo::AssociatedBinding<ash::mojom::WallpaperObserver> observer_binding_; base::WeakPtrFactory<ArcWallpaperService> weak_ptr_factory_; DISALLOW_COPY_AND_ASSIGN(ArcWallpaperService); }; } // namespace arc #endif // CHROME_BROWSER_CHROMEOS_ARC_WALLPAPER_ARC_WALLPAPER_SERVICE_H_
1,562
367
<reponame>MatanyaStroh/opal from logging import disable import os import signal import asyncio import uuid import aiohttp import functools from typing import List, Optional from fastapi import FastAPI import websockets from opal_common.logger import logger, configure_logs from opal_common.middleware import configure_middleware from opal_common.config import opal_common_config from opal_common.security.sslcontext import get_custom_ssl_context from opal_common.authentication.verifier import JWTVerifier from opal_common.authentication.deps import JWTAuthenticator from opal_client.policy_store.api import init_policy_store_router from opal_client.config import PolicyStoreTypes, opal_client_config from opal_client.data.api import init_data_router from opal_client.data.updater import DataUpdater from opal_client.data.fetcher import DataFetcher from opal_client.policy_store.base_policy_store_client import BasePolicyStoreClient from opal_client.policy_store.policy_store_client_factory import PolicyStoreClientFactory from opal_client.opa.runner import OpaRunner from opal_client.opa.options import OpaServerOptions from opal_client.policy.api import init_policy_router from opal_client.policy.updater import PolicyUpdater from opal_client.callbacks.register import CallbacksRegister from opal_client.callbacks.api import init_callbacks_api class OpalClient: def __init__( self, policy_store_type:PolicyStoreTypes=None, policy_store:BasePolicyStoreClient=None, data_updater:DataUpdater=None, data_topics: List[str] = None, policy_updater:PolicyUpdater=None, inline_opa_enabled:bool=None, inline_opa_options:OpaServerOptions=None, verifier: Optional[JWTVerifier] = None, ) -> None: """ Args: policy_store_type (PolicyStoreTypes, optional): [description]. Defaults to POLICY_STORE_TYPE. Internal components (for each pass None for default init, or False to disable): policy_store (BasePolicyStoreClient, optional): The policy store client. Defaults to None. data_updater (DataUpdater, optional): Defaults to None. policy_updater (PolicyUpdater, optional): Defaults to None. """ # defaults policy_store_type: PolicyStoreTypes = policy_store_type or opal_client_config.POLICY_STORE_TYPE inline_opa_enabled: bool = inline_opa_enabled or opal_client_config.INLINE_OPA_ENABLED inline_opa_options: OpaServerOptions = inline_opa_options or opal_client_config.INLINE_OPA_CONFIG opal_client_identifier: str = opal_client_config.OPAL_CLIENT_STAT_ID or f"CLIENT_{uuid.uuid4().hex}" # set logs configure_logs() # Init policy store client self.policy_store_type: PolicyStoreTypes = policy_store_type self.policy_store: BasePolicyStoreClient = policy_store or PolicyStoreClientFactory.create(policy_store_type) # data fetcher self.data_fetcher = DataFetcher() # callbacks register if hasattr(opal_client_config.DEFAULT_UPDATE_CALLBACKS, 'callbacks'): default_callbacks = opal_client_config.DEFAULT_UPDATE_CALLBACKS.callbacks else: default_callbacks = [] self._callbacks_register = CallbacksRegister(default_callbacks) # Init policy updater if policy_updater is not None: self.policy_updater = policy_updater else: self.policy_updater = PolicyUpdater(policy_store=self.policy_store, data_fetcher=self.data_fetcher, callbacks_register=self._callbacks_register, opal_client_id=opal_client_identifier) # Data updating service if opal_client_config.DATA_UPDATER_ENABLED: if data_updater is not None: self.data_updater = data_updater else: data_topics = data_topics if data_topics is not None else opal_client_config.DATA_TOPICS self.data_updater = DataUpdater(policy_store=self.policy_store, data_topics=data_topics, data_fetcher=self.data_fetcher, callbacks_register=self._callbacks_register, opal_client_id=opal_client_identifier) else: self.data_updater = None # Internal services # Policy store if self.policy_store_type == PolicyStoreTypes.OPA and inline_opa_enabled: rehydration_callbacks = [ # refetches policy code (e.g: rego) and static data from server functools.partial(self.policy_updater.update_policy, force_full_update=True), ] if self.data_updater: rehydration_callbacks.append( functools.partial(self.data_updater.get_base_policy_data, data_fetch_reason="policy store rehydration") ) self.opa_runner = OpaRunner.setup_opa_runner(options=inline_opa_options, rehydration_callbacks=rehydration_callbacks) else: self.opa_runner = False custom_ssl_context = get_custom_ssl_context() if opal_common_config.CLIENT_SELF_SIGNED_CERTIFICATES_ALLOWED and custom_ssl_context is not None: logger.warning("OPAL client is configured to trust self-signed certificates") if verifier is not None: self.verifier = verifier else: self.verifier = JWTVerifier( public_key=opal_common_config.AUTH_PUBLIC_KEY, algorithm=opal_common_config.AUTH_JWT_ALGORITHM, audience=opal_common_config.AUTH_JWT_AUDIENCE, issuer=opal_common_config.AUTH_JWT_ISSUER, ) if not self.verifier.enabled: logger.info("API authentication disabled (public encryption key was not provided)") # init fastapi app self.app: FastAPI = self._init_fast_api_app() def _init_fast_api_app(self): """ inits the fastapi app object """ app = FastAPI( title="OPAL Client", description="OPAL is an administration layer for Open Policy Agent (OPA), detecting changes" + \ " to both policy and data and pushing live updates to your agents. The opal client is" + \ " deployed alongside a policy-store (e.g: OPA), keeping it up-to-date, by connecting to" + \ " an opal-server and subscribing to pub/sub updates for policy and policy data changes.", version="0.1.0" ) configure_middleware(app) self._configure_api_routes(app) self._configure_lifecycle_callbacks(app) return app def _configure_api_routes(self, app: FastAPI): """ mounts the api routes on the app object """ authenticator = JWTAuthenticator(self.verifier) # Init api routers with required dependencies policy_router = init_policy_router(policy_updater=self.policy_updater) data_router = init_data_router(data_updater=self.data_updater) policy_store_router = init_policy_store_router(authenticator) callbacks_router = init_callbacks_api(authenticator, self._callbacks_register) # mount the api routes on the app object app.include_router(policy_router, tags=["Policy Updater"]) app.include_router(data_router, tags=["Data Updater"]) app.include_router(policy_store_router, tags=["Policy Store"]) app.include_router(callbacks_router, tags=["Callbacks"]) # top level routes (i.e: healthchecks) @app.get("/healthcheck", include_in_schema=False) @app.get("/", include_in_schema=False) def healthcheck(): return {"status": "ok"} return app def _configure_lifecycle_callbacks(self, app: FastAPI): """ registers callbacks on app startup and shutdown. on app startup we launch our long running processes (async tasks) on the event loop. on app shutdown we stop these long running tasks. """ @app.on_event("startup") async def startup_event(): asyncio.create_task(self.start_client_background_tasks()) @app.on_event("shutdown") async def shutdown_event(): await self.stop_client_background_tasks() return app async def start_client_background_tasks(self): """ Launch OPAL client long-running tasks: - Policy Store runner (e.g: Opa Runner) - Policy Updater - Data Updater If there is a policy store to run, we wait until its up before launching dependent tasks. """ if self.opa_runner: # runs the policy store dependent tasks after policy store is up self.opa_runner.register_opa_initial_start_callbacks([self.launch_policy_store_dependent_tasks]) async with self.opa_runner: await self.opa_runner.wait_until_done() else: # we do not run the policy store in the same container # therefore we can immediately launch dependent tasks await self.launch_policy_store_dependent_tasks() async def stop_client_background_tasks(self): """ stops all background tasks (called on shutdown event) """ logger.info("stopping background tasks...") # stopping opa runner if self.opa_runner: await self.opa_runner.stop() # stopping updater tasks (each updater runs a pub/sub client) logger.info("trying to shutdown DataUpdater and PolicyUpdater gracefully...") tasks: List[asyncio.Task] = [] if self.data_updater: tasks.append(asyncio.create_task(self.data_updater.stop())) if self.policy_updater: tasks.append(asyncio.create_task(self.policy_updater.stop())) try: await asyncio.gather(*tasks) except Exception: logger.exception("exception while shutting down updaters") async def launch_policy_store_dependent_tasks(self): try: await self.maybe_init_healthcheck_policy() except Exception: logger.critical("healthcheck policy enabled but could not be initialized!") self._trigger_shutdown() return try: for task in asyncio.as_completed([self.launch_policy_updater(), self.launch_data_updater()]): await task except websockets.exceptions.InvalidStatusCode as err: logger.error("Failed to launch background task -- {err}", err=repr(err)) self._trigger_shutdown() async def maybe_init_healthcheck_policy(self): """ This function only runs if OPA_HEALTH_CHECK_POLICY_ENABLED is true. Puts the healthcheck policy in opa cache and inits the transaction log used by the policy. If any action fails, opal client will shutdown. """ if not opal_client_config.OPA_HEALTH_CHECK_POLICY_ENABLED: return # skip healthcheck_policy_relpath = opal_client_config.OPA_HEALTH_CHECK_POLICY_PATH here = os.path.abspath(os.path.dirname(__file__)) healthcheck_policy_path = os.path.join(here, healthcheck_policy_relpath) if not os.path.exists(healthcheck_policy_path): logger.error("Critical: OPA health-check policy is enabled, but cannot find policy at {path}", path=healthcheck_policy_path) raise ValueError("OPA health check policy not found!") try: healthcheck_policy_code = open(healthcheck_policy_path, 'r').read() except IOError as err: logger.error("Critical: Cannot read healthcheck policy: {err}", err=repr(err)) raise try: await self.policy_store.init_healthcheck_policy( policy_id=healthcheck_policy_relpath, policy_code=healthcheck_policy_code, data_updater_enabled=opal_client_config.DATA_UPDATER_ENABLED ) except aiohttp.ClientError as err: logger.error("Failed to connect to OPA agent while init healthcheck policy -- {err}", err=repr(err)) raise def _trigger_shutdown(self): """ this will send SIGTERM (Keyboard interrupt) to the worker, making uvicorn send "lifespan.shutdown" event to Starlette via the ASGI lifespan interface. Starlette will then trigger the @app.on_event("shutdown") callback, which in our case (self.stop_client_background_tasks()) will gracefully shutdown the background processes and only then will terminate the worker. """ logger.info("triggering shutdown with SIGTERM...") os.kill(os.getpid(), signal.SIGTERM) async def launch_policy_updater(self): if self.policy_updater: async with self.policy_updater: await self.policy_updater.wait_until_done() async def launch_data_updater(self): if self.data_updater: async with self.data_updater: await self.data_updater.wait_until_done()
5,410
435
package datawave.query.iterator; import java.util.Map.Entry; import datawave.query.function.Aggregation; import datawave.query.function.KeyToDocumentData; import datawave.query.attributes.Document; import datawave.query.tld.TLD; import datawave.query.util.EntryToTuple; import datawave.query.util.Tuple2; import org.apache.accumulo.core.data.Key; import org.apache.log4j.Logger; import com.google.common.base.Function; import com.google.common.collect.Maps; public class GetParentDocument implements Function<Entry<Key,Document>,Tuple2<Key,Document>> { private final KeyToDocumentData fetchDocData; private final Aggregation makeDocument; private final EntryToTuple<Key,Document> convert = new EntryToTuple<>(); private static final Logger log = Logger.getLogger(GetParentDocument.class); public GetParentDocument(KeyToDocumentData fetchDocData, Aggregation makeDocument) { this.fetchDocData = fetchDocData; this.makeDocument = makeDocument; } public Tuple2<Key,Document> apply(Entry<Key,Document> from) { if (log.isTraceEnabled()) log.trace("Apply parent key " + from.getKey()); Key parentKey = TLD.buildParentKey(from.getKey().getRow(), TLD.parseParentPointerFromId(from.getKey().getColumnFamilyData()), from.getKey() .getColumnQualifierData(), from.getKey().getColumnVisibility(), from.getKey().getTimestamp()); if (log.isTraceEnabled()) log.trace("parent key " + parentKey); Entry<Key,Document> parentEntry = Maps.immutableEntry(parentKey, new Document()); Entry<Key,Document> aggParentEntry = makeDocument.apply(this.fetchDocData.apply(parentEntry)); Entry<Key,Document> keySwap = Maps.immutableEntry(from.getKey(), aggParentEntry.getValue()); if (log.isTraceEnabled()) log.trace("Key Swap is " + keySwap); return convert.apply(keySwap); } }
709
29,258
package com.taobao.arthas.core.config; import com.taobao.arthas.core.shell.ShellServerOptions; import com.taobao.arthas.core.util.reflect.ArthasReflectUtils; import java.lang.reflect.Field; import java.util.HashMap; import java.util.Map; import static java.lang.reflect.Modifier.isStatic; /** * <pre> * 配置类。 * 注意本类里的所有字段不能有默认值,否则会出现配置混乱。 * 在 com.taobao.arthas.core.Arthas#attach 里会调用 Configure#toStrig * <pre> * * @author vlinux * @author hengyunabc 2018-11-12 */ @Config(prefix = "arthas") public class Configure { private String ip; private Integer telnetPort; private Integer httpPort; private Long javaPid; private String arthasCore; private String arthasAgent; private String tunnelServer; private String agentId; private String username; private String password; /** * @see com.taobao.arthas.common.ArthasConstants#ARTHAS_OUTPUT */ private String outputPath; /** * 需要被增强的ClassLoader的全类名,多个用英文 , 分隔 */ private String enhanceLoaders; /** * <pre> * 1. 如果显式传入 arthas.agentId ,则直接使用 * 2. 如果用户没有指定,则自动尝试在查找应用的 appname,加为前缀,比如 system properties设置 project.name是 demo,则 * 生成的 agentId是 demo-xxxx * </pre> */ private String appName; /** * report executed command */ private String statUrl; /** * session timeout seconds * @see ShellServerOptions#DEFAULT_SESSION_TIMEOUT */ private Long sessionTimeout; /** * disabled commands */ private String disabledCommands; /** * 本地连接不需要鉴权,即使配置了password。arthas.properties 里默认为true */ private Boolean localConnectionNonAuth; public String getIp() { return ip; } public void setIp(String ip) { this.ip = ip; } public Integer getTelnetPort() { return telnetPort; } public void setTelnetPort(int telnetPort) { this.telnetPort = telnetPort; } public void setHttpPort(int httpPort) { this.httpPort = httpPort; } public Integer getHttpPort() { return httpPort; } public long getJavaPid() { return javaPid; } public void setJavaPid(long javaPid) { this.javaPid = javaPid; } public String getArthasAgent() { return arthasAgent; } public void setArthasAgent(String arthasAgent) { this.arthasAgent = arthasAgent; } public String getArthasCore() { return arthasCore; } public void setArthasCore(String arthasCore) { this.arthasCore = arthasCore; } public Long getSessionTimeout() { return sessionTimeout; } public void setSessionTimeout(long sessionTimeout) { this.sessionTimeout = sessionTimeout; } public String getTunnelServer() { return tunnelServer; } public void setTunnelServer(String tunnelServer) { this.tunnelServer = tunnelServer; } public String getAgentId() { return agentId; } public void setAgentId(String agentId) { this.agentId = agentId; } public String getStatUrl() { return statUrl; } public void setStatUrl(String statUrl) { this.statUrl = statUrl; } public String getAppName() { return appName; } public void setAppName(String appName) { this.appName = appName; } public String getEnhanceLoaders() { return enhanceLoaders; } public void setEnhanceLoaders(String enhanceLoaders) { this.enhanceLoaders = enhanceLoaders; } public String getOutputPath() { return outputPath; } public void setOutputPath(String outputPath) { this.outputPath = outputPath; } public String getUsername() { return username; } public void setUsername(String username) { this.username = username; } public String getPassword() { return password; } public void setPassword(String password) { this.password = password; } public String getDisabledCommands() { return disabledCommands; } public void setDisabledCommands(String disabledCommands) { this.disabledCommands = disabledCommands; } public boolean isLocalConnectionNonAuth() { return localConnectionNonAuth != null && localConnectionNonAuth; } public void setLocalConnectionNonAuth(boolean localConnectionNonAuth) { this.localConnectionNonAuth = localConnectionNonAuth; } /** * 序列化成字符串 * * @return 序列化字符串 */ @Override public String toString() { final Map<String, String> map = new HashMap<String, String>(); for (Field field : ArthasReflectUtils.getFields(Configure.class)) { // 过滤掉静态类 if (isStatic(field.getModifiers())) { continue; } // 非静态的才需要纳入非序列化过程 try { Object fieldValue = ArthasReflectUtils.getFieldValueByField(this, field); if (fieldValue != null) { map.put(field.getName(), String.valueOf(fieldValue)); } } catch (Throwable t) { // } } return FeatureCodec.DEFAULT_COMMANDLINE_CODEC.toString(map); } /** * 反序列化字符串成对象 * * @param toString 序列化字符串 * @return 反序列化的对象 */ public static Configure toConfigure(String toString) throws IllegalAccessException { final Configure configure = new Configure(); final Map<String, String> map = FeatureCodec.DEFAULT_COMMANDLINE_CODEC.toMap(toString); for (Map.Entry<String, String> entry : map.entrySet()) { final Field field = ArthasReflectUtils.getField(Configure.class, entry.getKey()); if (null != field && !isStatic(field.getModifiers())) { ArthasReflectUtils.set(field, ArthasReflectUtils.valueOf(field.getType(), entry.getValue()), configure); } } return configure; } }
2,866
357
<gh_stars>100-1000 /* * * Copyright (c) 2012-2016 VMware, Inc. All Rights Reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); you may not * use this file except in compliance with the License. You may obtain a copy * of the License at http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, without * warranties or conditions of any kind, EITHER EXPRESS OR IMPLIED. See the * License for the specific language governing permissions and limitations * under the License. * */ package com.vmware.identity.idm; import java.io.Serializable; import java.net.URL; import java.security.cert.X509Certificate; import org.apache.commons.lang.Validate; public class AlternativeOCSP implements Serializable { /** * */ private static final long serialVersionUID = 2291153773678362909L; private URL _responderURL; private X509Certificate _responderSigningCert; public AlternativeOCSP(URL responderURL, X509Certificate signingCert) { Validate.notNull(responderURL, "responderURL"); this._responderURL = responderURL; this._responderSigningCert = signingCert; } public X509Certificate get_responderSigningCert() { return _responderSigningCert; } public void set_responderSigningCert(X509Certificate cert) { this._responderSigningCert = cert; } public URL get_responderURL() { return _responderURL; } @Override public boolean equals(Object other) { boolean result = true; if (other instanceof AlternativeOCSP) { AlternativeOCSP otherAlternativeOCSP = (AlternativeOCSP) other; if (!this._responderURL.equals(otherAlternativeOCSP.get_responderURL()) || (this._responderSigningCert != null && !this._responderSigningCert.equals(otherAlternativeOCSP.get_responderSigningCert()) ) ) { result = false; } } else { result = false; } return result; } @Override public int hashCode() { int hash = _responderURL.hashCode(); if (null != _responderSigningCert) { hash += _responderSigningCert.hashCode(); } return hash; } }
876
1,853
#include <chrono> using seconds_t = std::chrono::seconds; constexpr seconds_t operator ""_s(unsigned long long s) { return seconds_t(s); } int main() { auto s = 1_s; }
78
310
// (C) Copyright <NAME> 2019 // Use, modification and distribution are subject to the Boost Software License, // Version 1.0. (See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt). #if !defined(BOOST_TTI_DETAIL_COMP_MEM_FUN_TEMPLATE_HPP) #define BOOST_TTI_DETAIL_COMP_MEM_FUN_TEMPLATE_HPP #include <boost/mpl/bool.hpp> #include <boost/preprocessor/cat.hpp> #include <boost/preprocessor/array/enum.hpp> #include <boost/tti/detail/dftclass.hpp> #include <boost/tti/detail/dnullptr.hpp> #include <boost/tti/detail/dmacro_sunfix.hpp> #include <boost/tti/gen/namespace_gen.hpp> #include <boost/type_traits/remove_const.hpp> #include <boost/type_traits/detail/yes_no_type.hpp> #define BOOST_TTI_DETAIL_TRAIT_HAS_COMP_MEMBER_FUNCTION_TEMPLATE(trait,name,tparray) \ template<class BOOST_TTI_DETAIL_TP_T> \ struct BOOST_PP_CAT(trait,_detail_hcmft) \ { \ template<class BOOST_TTI_DETAIL_TP_F> \ struct cl_type : \ boost::remove_const \ < \ typename BOOST_TTI_NAMESPACE::detail::class_type<BOOST_TTI_DETAIL_TP_F>::type \ > \ { \ }; \ \ template<BOOST_TTI_DETAIL_TP_T> \ struct helper BOOST_TTI_DETAIL_MACRO_SUNFIX ; \ \ template<class BOOST_TTI_DETAIL_TP_U> \ static ::boost::type_traits::yes_type check(helper<&BOOST_TTI_DETAIL_TP_U::template name<BOOST_PP_ARRAY_ENUM(tparray)> > *); \ \ template<class BOOST_TTI_DETAIL_TP_U> \ static ::boost::type_traits::no_type check(...); \ \ typedef boost::mpl::bool_<sizeof(check<typename cl_type<BOOST_TTI_DETAIL_TP_T>::type>(BOOST_TTI_DETAIL_NULLPTR))==sizeof(::boost::type_traits::yes_type)> type; \ }; \ /**/ #endif // BOOST_TTI_DETAIL_COMP_MEM_FUN_TEMPLATE_HPP
830
1,736
/* Copyright (c) 2020-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #if __INTEL_COMPILER && _MSC_VER #pragma warning(disable : 2586) // decorated name length exceeded, name was truncated #endif #include "conformance_flowgraph.h" //! \file conformance_join_node.cpp //! \brief Test for [flow_graph.join_node] specification using input_msg = conformance::message</*default_ctor*/true, /*copy_ctor*/true, /*copy_assign*/true>; using my_input_tuple = std::tuple<int, float, input_msg>; std::vector<my_input_tuple> get_values( conformance::test_push_receiver<my_input_tuple>& rr ) { std::vector<my_input_tuple> messages; int val = 0; for(my_input_tuple tmp(0, 0.f, input_msg(0)); rr.try_get(tmp); ++val) { messages.push_back(tmp); } return messages; } #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT void test_deduction_guides() { using namespace tbb::flow; graph g; using tuple_type = std::tuple<int, int, int>; broadcast_node<int> b1(g), b2(g), b3(g); broadcast_node<tuple_type> b4(g); join_node<tuple_type> j0(g); #if __TBB_PREVIEW_FLOW_GRAPH_NODE_SET join_node j1(follows(b1, b2, b3)); static_assert(std::is_same_v<decltype(j1), join_node<tuple_type>>); join_node j2(follows(b1, b2, b3), reserving()); static_assert(std::is_same_v<decltype(j2), join_node<tuple_type, reserving>>); join_node j3(precedes(b4)); static_assert(std::is_same_v<decltype(j3), join_node<tuple_type>>); join_node j4(precedes(b4), reserving()); static_assert(std::is_same_v<decltype(j4), join_node<tuple_type, reserving>>); #endif join_node j5(j0); static_assert(std::is_same_v<decltype(j5), join_node<tuple_type>>); } #endif //! The node that is constructed has a reference to the same graph object as src. //! The list of predecessors, messages in the input ports, and successors are not copied. //! \brief \ref interface TEST_CASE("join_node copy constructor"){ oneapi::tbb::flow::graph g; oneapi::tbb::flow::continue_node<int> node0( g, [](oneapi::tbb::flow::continue_msg) { return 1; } ); oneapi::tbb::flow::join_node<std::tuple<int>> node1(g); conformance::test_push_receiver<std::tuple<int>> node2(g); conformance::test_push_receiver<std::tuple<int>> node3(g); oneapi::tbb::flow::make_edge(node0, oneapi::tbb::flow::input_port<0>(node1)); oneapi::tbb::flow::make_edge(node1, node2); oneapi::tbb::flow::join_node<std::tuple<int>> node_copy(node1); oneapi::tbb::flow::make_edge(node_copy, node3); oneapi::tbb::flow::input_port<0>(node_copy).try_put(1); g.wait_for_all(); auto values = conformance::get_values(node3); CHECK_MESSAGE((conformance::get_values(node2).size() == 0 && values.size() == 1), "Copied node doesn`t copy successor"); node0.try_put(oneapi::tbb::flow::continue_msg()); g.wait_for_all(); CHECK_MESSAGE((conformance::get_values(node2).size() == 1 && conformance::get_values(node3).size() == 0), "Copied node doesn`t copy predecessor"); oneapi::tbb::flow::remove_edge(node1, node2); oneapi::tbb::flow::input_port<0>(node1).try_put(1); g.wait_for_all(); oneapi::tbb::flow::join_node<std::tuple<int>> node_copy2(node1); oneapi::tbb::flow::make_edge(node_copy2, node3); oneapi::tbb::flow::input_port<0>(node_copy2).try_put(2); g.wait_for_all(); CHECK_MESSAGE((std::get<0>(conformance::get_values(node3)[0]) == 2), "Copied node doesn`t copy messages in the input ports"); } //! Test inheritance relations //! \brief \ref interface TEST_CASE("join_node inheritance"){ CHECK_MESSAGE((std::is_base_of<oneapi::tbb::flow::graph_node, oneapi::tbb::flow::join_node<my_input_tuple>>::value), "join_node should be derived from graph_node"); CHECK_MESSAGE((std::is_base_of<oneapi::tbb::flow::sender<my_input_tuple>, oneapi::tbb::flow::join_node<my_input_tuple>>::value), "join_node should be derived from sender<input_tuple>"); } //! Test join_node<queueing> behavior and broadcast property //! \brief \ref requirement TEST_CASE("join_node queueing policy and broadcast property") { oneapi::tbb::flow::graph g; oneapi::tbb::flow::function_node<int, int> f1( g, oneapi::tbb::flow::unlimited, [](const int &i) { return i; } ); oneapi::tbb::flow::function_node<float, float> f2( g, oneapi::tbb::flow::unlimited, [](const float &f) { return f; } ); oneapi::tbb::flow::continue_node<input_msg> c1( g, [](oneapi::tbb::flow::continue_msg) { return input_msg(1); } ); oneapi::tbb::flow::join_node<my_input_tuple, oneapi::tbb::flow::queueing> testing_node(g); conformance::test_push_receiver<my_input_tuple> q_node(g); std::atomic<int> number{1}; oneapi::tbb::flow::function_node<my_input_tuple, my_input_tuple> f3( g, oneapi::tbb::flow::unlimited, [&]( const my_input_tuple &t ) { CHECK_MESSAGE((std::get<0>(t) == number), "Messages must be in first-in first-out order" ); CHECK_MESSAGE((std::get<1>(t) == static_cast<float>(number) + 0.5f), "Messages must be in first-in first-out order" ); CHECK_MESSAGE((std::get<2>(t) == 1), "Messages must be in first-in first-out order" ); ++number; return t; } ); oneapi::tbb::flow::make_edge(f1, oneapi::tbb::flow::input_port<0>(testing_node)); oneapi::tbb::flow::make_edge(f2, oneapi::tbb::flow::input_port<1>(testing_node)); oneapi::tbb::flow::make_edge(c1, oneapi::tbb::flow::input_port<2>(testing_node)); make_edge(testing_node, f3); make_edge(f3, q_node); f1.try_put(1); g.wait_for_all(); CHECK_MESSAGE((get_values(q_node).size() == 0), "join_node must broadcast when there is at least one message at each input port"); f1.try_put(2); f2.try_put(1.5f); g.wait_for_all(); CHECK_MESSAGE((get_values(q_node).size() == 0), "join_node must broadcast when there is at least one message at each input port"); f1.try_put(3); f2.try_put(2.5f); c1.try_put(oneapi::tbb::flow::continue_msg()); g.wait_for_all(); CHECK_MESSAGE((get_values(q_node).size() == 1), "join_node must broadcast when there is at least one message at each input port"); f2.try_put(3.5f); c1.try_put(oneapi::tbb::flow::continue_msg()); g.wait_for_all(); CHECK_MESSAGE((get_values(q_node).size() == 1), "If at least one successor accepts the tuple, the head of each input port’s queue is removed"); c1.try_put(oneapi::tbb::flow::continue_msg()); g.wait_for_all(); CHECK_MESSAGE((get_values(q_node).size() == 1), "If at least one successor accepts the tuple, the head of each input port’s queue is removed"); c1.try_put(oneapi::tbb::flow::continue_msg()); g.wait_for_all(); CHECK_MESSAGE((get_values(q_node).size() == 0), "join_node must broadcast when there is at least one message at each input port"); oneapi::tbb::flow::remove_edge(testing_node, f3); f1.try_put(1); f2.try_put(1); c1.try_put(oneapi::tbb::flow::continue_msg()); g.wait_for_all(); my_input_tuple tmp(0, 0.f, input_msg(0)); CHECK_MESSAGE((testing_node.try_get(tmp)), "If no one successor accepts the tuple the messages\ must remain in their respective input port queues"); CHECK_MESSAGE((tmp == my_input_tuple(1, 1.f, input_msg(1))), "If no one successor accepts the tuple\ the messages must remain in their respective input port queues"); } //! Test join_node<reserving> behavior //! \brief \ref requirement TEST_CASE("join_node reserving policy") { conformance::test_with_reserving_join_node_class<oneapi::tbb::flow::write_once_node<int>>(); } template<typename KeyType> struct MyHash{ std::size_t hash(const KeyType &k) const { return k * 2000 + 3; } bool equal(const KeyType &k1, const KeyType &k2) const{ return hash(k1) == hash(k2); } }; //! Test join_node<key_matching> behavior //! \brief \ref requirement TEST_CASE("join_node key_matching policy"){ oneapi::tbb::flow::graph g; auto body1 = [](const oneapi::tbb::flow::continue_msg &) -> int { return 1; }; auto body2 = [](const float &val) -> int { return static_cast<int>(val); }; oneapi::tbb::flow::join_node<std::tuple<oneapi::tbb::flow::continue_msg, float>, oneapi::tbb::flow::key_matching<int, MyHash<int>>> testing_node(g, body1, body2); oneapi::tbb::flow::input_port<0>(testing_node).try_put(oneapi::tbb::flow::continue_msg()); oneapi::tbb::flow::input_port<1>(testing_node).try_put(1.3f); g.wait_for_all(); std::tuple<oneapi::tbb::flow::continue_msg, float> tmp; CHECK_MESSAGE((testing_node.try_get(tmp)), "Mapped keys should match.\ If no successor accepts the tuple, it is must been saved and will be forwarded on a subsequent try_get"); CHECK_MESSAGE((!testing_node.try_get(tmp)), "Message should not exist after item is consumed"); } //! Test join_node<tag_matching> behavior //! \brief \ref requirement TEST_CASE("join_node tag_matching policy"){ oneapi::tbb::flow::graph g; auto body1 = [](const oneapi::tbb::flow::continue_msg &) -> oneapi::tbb::flow::tag_value { return 1; }; auto body2 = [](const float &val) -> oneapi::tbb::flow::tag_value { return static_cast<oneapi::tbb::flow::tag_value>(val); }; oneapi::tbb::flow::join_node<std::tuple<oneapi::tbb::flow::continue_msg, float>, oneapi::tbb::flow::tag_matching> testing_node(g, body1, body2); oneapi::tbb::flow::input_port<0>(testing_node).try_put(oneapi::tbb::flow::continue_msg()); oneapi::tbb::flow::input_port<1>(testing_node).try_put(1.3f); g.wait_for_all(); std::tuple<oneapi::tbb::flow::continue_msg, float> tmp; CHECK_MESSAGE((testing_node.try_get(tmp) == true), "Mapped keys should match"); } #if __TBB_CPP17_DEDUCTION_GUIDES_PRESENT //! Test deduction guides //! \brief \ref requirement TEST_CASE("Deduction guides test"){ test_deduction_guides(); } #endif //! Test join_node input_ports() returns a tuple of input ports. //! \brief \ref interface \ref requirement TEST_CASE("join_node output_ports") { oneapi::tbb::flow::graph g; oneapi::tbb::flow::join_node<std::tuple<int>> node(g); CHECK_MESSAGE((std::is_same<oneapi::tbb::flow::join_node<std::tuple<int>>::input_ports_type&, decltype(node.input_ports())>::value), "join_node input_ports should returns a tuple of input ports"); }
4,582
319
<filename>src/NativeScript/ManualInstrumentation.h // // Created by <NAME> on 26/05/2017. // #ifndef MANUALINSTRUMENTATION_H #define MANUALINSTRUMENTATION_H #include <WTF/Assertions.h> #import <chrono> #include <string> namespace tns { namespace instrumentation { enum Mode { Disabled = 0, Uninitialized = 1, Enabled = 2 }; class Frame { public: inline Frame() : Frame("") {} inline Frame(std::string name) : start(mode == Mode::Disabled ? disabled_time : std::chrono::steady_clock::now()) , name(name) {} inline Frame(const Frame& copy) : start(copy.start) , name(copy.name) {} inline ~Frame() { if (!name.empty() && check()) { log(name); } } inline bool check() const { if (mode == Mode::Disabled) { return false; } std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); auto duration = std::chrono::duration_cast<std::chrono::microseconds>(std::chrono::operator-(end, start)).count(); return duration >= 16000; } inline void log(const char* message) const { if (mode == Mode::Disabled) { return; } std::chrono::steady_clock::time_point end = std::chrono::steady_clock::now(); auto startMilis = std::chrono::time_point_cast<std::chrono::microseconds>(start).time_since_epoch().count() / 1000.0; auto endMilis = std::chrono::time_point_cast<std::chrono::microseconds>(end).time_since_epoch().count() / 1000.0; WTFLogAlways("CONSOLE LOG Timeline: Runtime: %s (%.3fms - %.3fms)\n", message, startMilis, endMilis); } inline void log(const std::string& message) const { log(message.c_str()); } static inline void enable() { mode = Mode::Enabled; } static inline void disable() { mode = Mode::Disabled; } /** * Use enable() disable() instead. */ static Mode mode; static const std::chrono::steady_clock::time_point disabled_time; // Couldn't find reasonable constant private: const std::chrono::steady_clock::time_point start; const std::string name; Frame& operator=(const Frame&) = delete; }; }; // namespace instrumentation }; // namespace tns /** * Place at the start of a method. Will log to android using the "JS" tag methods that execute relatively slow. */ #define TNSPERF() tns::instrumentation::Frame __tns_manual_instrumentation(__func__) #endif //MANUALINSTRUMENTATION_H
1,228
403
<filename>Lib/Source/Translate.cpp #include "Source/Syntax.h" #include "Target/Syntax.h" #include "Target/SmallLiteral.h" #include "Target/LoadStore.h" #include "Common/Seq.h" // ============================================================================ // Opcodes and operands // ============================================================================ // Translate source operator to target opcode ALUOp opcode(Op op) { if (op.type == FLOAT) { switch (op.op) { case ADD: return A_FADD; case SUB: return A_FSUB; case MUL: return M_FMUL; case MIN: return A_FMIN; case MAX: return A_FMAX; case ItoF: return A_ItoF; case ROTATE: return M_ROTATE; default: assert(false); } } else { switch (op.op) { case ADD: return A_ADD; case SUB: return A_SUB; case MUL: return M_MUL24; case MIN: return A_MIN; case MAX: return A_MAX; case FtoI: return A_FtoI; case SHL: return A_SHL; case SHR: return A_ASR; case USHR: return A_SHR; case ROR: return A_ROR; case BAND: return A_BAND; case BOR: return A_BOR; case BXOR: return A_BXOR; case BNOT: return A_BNOT; case ROTATE: return M_ROTATE; default: assert(false); } } } // Translate variable to source register. Reg srcReg(Var v) { Reg r; switch (v.tag) { case UNIFORM: r.tag = SPECIAL; r.regId = SPECIAL_UNIFORM; return r; case QPU_NUM: r.tag = SPECIAL; r.regId = SPECIAL_QPU_NUM; return r; case ELEM_NUM: r.tag = SPECIAL; r.regId = SPECIAL_ELEM_NUM; return r; case STANDARD: r.tag = REG_A; r.regId = v.id; return r; } // Not reachable assert(false); } // Translate variable to target register. Reg dstReg(Var v) { Reg r; switch (v.tag) { case UNIFORM: case QPU_NUM: case ELEM_NUM: printf("QPULib: writing to read-only special register is forbidden\n"); assert(false); case STANDARD: r.tag = REG_A; r.regId = v.id; return r; case TMU0_ADDR: r.tag = SPECIAL; r.regId = SPECIAL_TMU0_S; return r; } // Not reachable assert(false); } // Translate the argument of an operator (either a variable or a small // literal) to a target operand. RegOrImm operand(Expr* e) { RegOrImm x; if (e->tag == VAR) { x.tag = REG; x.reg = srcReg(e->var); return x; } int enc = encodeSmallLit(e); assert(enc >= 0); x.tag = IMM; x.smallImm.tag = SMALL_IMM; x.smallImm.val = enc; return x; } // ============================================================================ // 'Simple' expressions // ============================================================================ // An expression is 'simple' if it is a small literal (see // Target/SmallLiteral.cpp) or a variable. bool isSimple(Expr* e) { return (e->tag == VAR) || isSmallLit(e); } // Translate an expression to a simple expressions, generating // instructions along the way. (Prototype here, implementation below, // since this and 'varAssign' are mutually recursive.) Expr* simplify(Seq<Instr>* seq, Expr* e); // Similar to 'simplify' but ensure that the result is a variable. Expr* putInVar(Seq<Instr>* seq, Expr* e); // ============================================================================ // Variable assignments // ============================================================================ // Translate the conditional assignment of a variable to an expression. void varAssign( Seq<Instr>* seq // Target instruction sequence to extend , AssignCond cond // Condition on assignment , Var v // Variable on LHS , Expr* expr // Expression on RHS ) { Expr e = *expr; // ----------------------------------------- // Case: v := w, where v and w are variables // ----------------------------------------- if (e.tag == VAR) { Var w = e.var; Instr instr; instr.tag = ALU; instr.ALU.setFlags = false; instr.ALU.cond = cond; instr.ALU.dest = dstReg(v); instr.ALU.srcA.tag = REG; instr.ALU.srcA.reg = srcReg(w); instr.ALU.op = A_BOR; instr.ALU.srcB.tag = REG; instr.ALU.srcB.reg = instr.ALU.srcA.reg; seq->append(instr); return; } // ------------------------------------------- // Case: v := i, where i is an integer literal // ------------------------------------------- if (e.tag == INT_LIT) { int i = e.intLit; Instr instr; instr.tag = LI; instr.LI.setFlags = false; instr.LI.cond = cond; instr.LI.dest = dstReg(v); instr.LI.imm.tag = IMM_INT32; instr.LI.imm.intVal = i; seq->append(instr); return; } // ---------------------------------------- // Case: v := f, where f is a float literal // ---------------------------------------- if (e.tag == FLOAT_LIT) { float f = e.floatLit; Instr instr; instr.tag = LI; instr.LI.setFlags = false; instr.LI.cond = cond; instr.LI.dest = dstReg(v); instr.LI.imm.tag = IMM_FLOAT32; instr.LI.imm.floatVal = f; seq->append(instr); return; } // ---------------------------------------------- // Case: v := x op y, where x or y are not simple // ---------------------------------------------- if (e.tag == APPLY && (!isSimple(e.apply.lhs) || !isSimple(e.apply.rhs))) { e.apply.lhs = simplify(seq, e.apply.lhs); e.apply.rhs = simplify(seq, e.apply.rhs); } // -------------------------------------------------- // Case: v := x op y, where x and y are both literals // -------------------------------------------------- if (e.tag == APPLY && isLit(e.apply.lhs) && isLit(e.apply.rhs)) { Var tmpVar = freshVar(); varAssign(seq, cond, tmpVar, e.apply.lhs); e.apply.lhs = mkVar(tmpVar); } // ------------------------------------------- // Case: v := x op y, where x and y are simple // ------------------------------------------- if (e.tag == APPLY) { Instr instr; instr.tag = ALU; instr.ALU.setFlags = false; instr.ALU.cond = cond; instr.ALU.dest = dstReg(v); instr.ALU.srcA = operand(e.apply.lhs); instr.ALU.op = opcode(e.apply.op); instr.ALU.srcB = operand(e.apply.rhs); seq->append(instr); return; } // --------------------------------------- // Case: v := *w where w is not a variable // --------------------------------------- if (e.tag == DEREF && e.deref.ptr->tag != VAR) { assert(!isLit(e.deref.ptr)); e.deref.ptr = simplify(seq, e.deref.ptr); } // ----------------------------------- // Case: v := *w where w is a variable // ----------------------------------- // // Restriction: we disallow dereferencing in conditional ('where') // assignments for simplicity. In most (all?) cases it should be // trivial to lift these outside the 'where'. // if (e.tag == DEREF) { if (cond.tag != ALWAYS) { printf("QPULib: dereferencing not yet supported inside 'where'\n"); assert(false); } Instr instr; instr.tag = LD1; instr.LD1.addr = srcReg(e.deref.ptr->var); instr.LD1.buffer = A; seq->append(instr); instr.tag = LD2; seq->append(instr); instr.tag = LD3; instr.LD3.buffer = A; seq->append(instr); instr.tag = LD4; instr.LD4.dest = dstReg(v); seq->append(instr); return; } // This case should not be reachable assert(false); } // Translate an expression to a simple expression, generating // instructions along the way. Expr* simplify(Seq<Instr>* seq, Expr* e) { if (!isSimple(e)) { AssignCond always; always.tag = ALWAYS; Var tmp = freshVar(); varAssign(seq, always, tmp, e); return mkVar(tmp); } else return e; } // Similar to 'simplify' but ensure that the result is a variable. Expr* putInVar(Seq<Instr>* seq, Expr* e) { if (e->tag != VAR) { AssignCond always; always.tag = ALWAYS; Var tmp = freshVar(); varAssign(seq, always, tmp, e); return mkVar(tmp); } else return e; } // ============================================================================ // Assignment statements // ============================================================================ void assign( Seq<Instr>* seq // Target instruction sequence to extend , Expr *lhsExpr // Expression on left-hand side , Expr *rhs // Expression on right-hand side ) { Expr lhs = *lhsExpr; AssignCond always; always.tag = ALWAYS; // ----------------------------------------------------------- // Case: v := rhs, where v is a variable and rhs an expression // ----------------------------------------------------------- if (lhs.tag == VAR) { varAssign(seq, always, lhs.var, rhs); return; } // --------------------------------------------------------- // Case: *lhs := rhs where lhs is not a var or rhs not a var // --------------------------------------------------------- if (lhs.tag == DEREF && (lhs.deref.ptr->tag != VAR || rhs->tag != VAR)) { assert(!isLit(lhs.deref.ptr)); lhs.deref.ptr = simplify(seq, lhs.deref.ptr); rhs = putInVar(seq, rhs); } // ------------------------------------------------- // Case: *v := rhs where v is a var and rhs is a var // ------------------------------------------------- if (lhs.tag == DEREF) { Instr instr; instr.tag = ST1; instr.ST1.data = srcReg(rhs->var); instr.ST1.buffer = A; seq->append(instr); instr.tag = ST2; instr.ST2.addr = srcReg(lhs.deref.ptr->var); instr.ST2.buffer = A; seq->append(instr); instr.tag = ST3; seq->append(instr); return; } // This case should not be reachable assert(false); } // ============================================================================ // Condition flag helpers // ============================================================================ // Each QPU contains an implicit condition vector which can answer // various questions about each element of a vector: // // * ZS - is zero? // * ZC - is non-zero? // * NS - is negative? // * NC - is non-negative? // // The condition vector is modified when the 'setFlags' field of // an ALU instruction is 'true'. The condition vector can be read // from an assignment condition or in a branch condition. // Function to negate a condition flag Flag negFlag(Flag flag) { switch(flag) { case ZS: return ZC; case ZC: return ZS; case NS: return NC; case NC: return NS; } // Not reachable assert(false); } // Function to negate an assignment condition. AssignCond negAssignCond(AssignCond cond) { switch (cond.tag) { case NEVER: cond.tag = ALWAYS; return cond; case ALWAYS: cond.tag = NEVER; return cond; case FLAG: cond.flag = negFlag(cond.flag); return cond; } // Not reachable assert(false); } // Function to negate a branch condition. BranchCond negBranchCond(BranchCond cond) { switch (cond.tag) { case COND_NEVER: cond.tag = COND_ALWAYS; return cond; case COND_ALWAYS: cond.tag = COND_NEVER; return cond; case COND_ANY: cond.tag = COND_ALL; cond.flag = negFlag(cond.flag); return cond; case COND_ALL: cond.tag = COND_ANY; cond.flag = negFlag(cond.flag); return cond; } // Not reachable assert(false); } // Return a value that will cause the specified flag bit to be set in // the condition vector. int setFlag(Flag f) { switch (f) { case ZS: return 0; case ZC: return 1; case NS: return -1; case NC: return 0; } // Not reachable assert(false); } // Set the condition vector using given variable. Instr setCond(Var v) { AssignCond always; always.tag = ALWAYS; Reg r; r.tag = NONE; Instr instr; instr.tag = ALU; instr.ALU.setFlags = true; instr.ALU.cond = always; instr.ALU.dest = r; instr.ALU.srcA.tag = REG; instr.ALU.srcA.reg = srcReg(v); instr.ALU.op = A_BOR; instr.ALU.srcB.tag = REG; instr.ALU.srcB.reg = instr.ALU.srcA.reg; return instr; } // A shorthand 'move' instruction is handy later. Instr move(Var dst, Var src, bool setFlags) { AssignCond always; always.tag = ALWAYS; Instr instr; instr.tag = ALU; instr.ALU.setFlags = setFlags; instr.ALU.cond = always; instr.ALU.dest = dstReg(dst); instr.ALU.srcA.tag = REG; instr.ALU.srcA.reg = srcReg(src); instr.ALU.op = A_BOR; instr.ALU.srcB.tag = REG; instr.ALU.srcB.reg = instr.ALU.srcA.reg; return instr; } // ============================================================================ // Boolean expressions // ============================================================================ // Evaluating a vector boolean expression results in a condition // pair <condVar,condFlag> where // // * condVar is a variable containing a vector of values // // * condFlag is a condition flag in set {ZS,ZC,NS,NC} (see above) // // If 'condVar' is assigned to a register and the 'setFlags' field of // the assignment is 'true', the implicit condition vector is updated. // The implicit condition vector can then be queried using 'condFlag' // to determine the truth of elements in the boolean vector. // // For example, assuming vectors of size 4 for simplicity, the result // of evaluating // // [1,2,3,4] <= [4,1,3,6] // // might be the condition pair <[-3,1,0,-2], NC>. // // Given two condition pairs <condVarA, condFlagA> and <condVarB, // condFlagB> we would like to compute the logical disjunction. // // Pre-condition: we are in a state where the implicit condition // vector has been set using the value of condVarB, hence we don't // need the value of condVarB as an argument. // // The 'modify' flag defines whether or not to update the implicit // condition vector with the final result. // // The value of condVarA will be overwritten with the 'condVar' of the // disjunction, and the corresponding condFlag will be returned as a // result. AssignCond boolOr( Seq<Instr>* seq , AssignCond condA , Var condVarA , AssignCond condB , bool modify ) { if (condA.tag == ALWAYS) return condA; else if (condB.tag == ALWAYS) return condB; else if (condB.tag == NEVER) { if (modify) seq->append(setCond(condVarA)); return condA; } else if (condA.tag == NEVER) { Instr instr; instr.tag = LI; instr.LI.setFlags = false; instr.LI.cond = condB; instr.LI.dest = dstReg(condVarA); instr.LI.imm.tag = IMM_INT32; instr.LI.imm.intVal = setFlag(condB.flag); seq->append(instr); return condB; } else { Instr instr; instr.tag = LI; instr.LI.setFlags = false; instr.LI.cond = condB; instr.LI.dest = dstReg(condVarA); instr.LI.imm.tag = IMM_INT32; instr.LI.imm.intVal = setFlag(condA.flag); seq->append(instr); if (modify) seq->append(setCond(condVarA)); return condA; } } // Conjunction is now easy thanks to De Morgan's law: AssignCond boolAnd( Seq<Instr>* seq , AssignCond condA , Var condVarA , AssignCond condB , bool modify ) { return negAssignCond( boolOr(seq, negAssignCond(condA), condVarA, negAssignCond(condB), modify)); } // Now the translation scheme for general boolean expressions. // The interface is: // // * a boolean expression to evaluate; // // * a condVar 'v' to which the evaluated expression will be written // to; the return value will contain the corresponding condFlag. // // * if the modify-bit is true, then the implicit condition vector // will be set using with the result of the expression. (This is a // one-way 'if': you cannot rely on the condition vector not // being mutated even if this bit is false.) // // * instructions to evaluate the expression are appended to the // given instruction sequence. AssignCond boolExp( Seq<Instr>* seq , BExpr* bexpr , Var v , bool modify ) { BExpr b = *bexpr; AssignCond always; always.tag = ALWAYS; // ------------------------------- // Case: x > y, replace with y < x // ------------------------------- if (b.tag == CMP && b.cmp.op.op == GT) { Expr* e = b.cmp.lhs; b.cmp.lhs = b.cmp.rhs; b.cmp.rhs = e; b.cmp.op.op = LT; } // --------------------------------- // Case: x <= y, replace with y >= x // --------------------------------- if (b.tag == CMP && b.cmp.op.op == LE) { Expr* e = b.cmp.lhs; b.cmp.lhs = b.cmp.rhs; b.cmp.rhs = e; b.cmp.op.op = GE; } // ----------------------------------- // Case: x op y, where x is not simple // ----------------------------------- if (b.tag == CMP && !isSimple(b.cmp.lhs)) { b.cmp.lhs = simplify(seq, b.cmp.lhs); } // ----------------------------------- // Case: x op y, where y is not simple // ----------------------------------- if (b.tag == CMP && !isSimple(b.cmp.rhs)) { b.cmp.rhs = simplify(seq, b.cmp.rhs); } // --------------------------------------------- // Case: x op y, where x and y are both literals // --------------------------------------------- if (b.tag == CMP && isLit(b.cmp.lhs) && isLit(b.cmp.rhs)) { Var tmpVar = freshVar(); varAssign(seq, always, tmpVar, b.cmp.lhs); b.cmp.lhs = mkVar(tmpVar); } // -------------------------------------- // Case: x op y, where x and y are simple // -------------------------------------- if (b.tag == CMP) { // Compute condition flag AssignCond cond; cond.tag = FLAG; switch(b.cmp.op.op) { case EQ: cond.flag = ZS; break; case NEQ: cond.flag = ZC; break; case LT: cond.flag = NS; break; case GE: cond.flag = NC; break; default: assert(false); } // Implement comparison using subtraction instruction Op op; op.type = b.cmp.op.type; op.op = SUB; Instr instr; instr.tag = ALU; instr.ALU.setFlags = true; instr.ALU.cond = always; instr.ALU.dest = dstReg(v); instr.ALU.srcA = operand(b.cmp.lhs); instr.ALU.op = opcode(op); instr.ALU.srcB = operand(b.cmp.rhs); seq->append(instr); return cond; } // ----------------------------------------- // Case: !b, where b is a boolean expression // ----------------------------------------- if (b.tag == NOT) { AssignCond cond = boolExp(seq, b.neg, v, modify); return negAssignCond(cond); } // ------------------------------------------------ // Case: a || b, where a, b are boolean expressions // ------------------------------------------------ if (b.tag == OR) { Var w = freshVar(); AssignCond condA = boolExp(seq, b.disj.lhs, v, false); AssignCond condB = boolExp(seq, b.disj.rhs, w, true); return boolOr(seq, condA, v, condB, true); } // ------------------------------------------------ // Case: a && b, where a, b are boolean expressions // ------------------------------------------------ if (b.tag == AND) { // Use De Morgan's law BExpr* demorgan = mkNot(mkOr(mkNot(b.conj.lhs), mkNot(b.conj.rhs))); return boolExp(seq, demorgan, v, modify); } // Not reachable assert(false); } // ============================================================================ // Conditional expressions // ============================================================================ BranchCond condExp(Seq<Instr>* seq, CExpr* c) { Var v = freshVar(); AssignCond cond = boolExp(seq, c->bexpr, v, true); BranchCond bcond; if (cond.tag == ALWAYS) { bcond.tag = COND_ALWAYS; return bcond; } if (cond.tag == NEVER) { bcond.tag = COND_NEVER; return bcond; } assert(cond.tag == FLAG); bcond.flag = cond.flag; if (c->tag == ANY) { bcond.tag = COND_ANY; return bcond; } else if (c->tag == ALL) { bcond.tag = COND_ALL; return bcond; } // Not reachable assert(false); } // ============================================================================ // Where statements // ============================================================================ void whereStmt( Seq<Instr>* seq , Stmt* s , Var condVar , AssignCond cond , bool saveRestore ) { if (s == NULL) return; // ---------- // Case: skip // ---------- if (s->tag == SKIP) return; // ------------------------------------------------------ // Case: v = e, where v is a variable and e an expression // ------------------------------------------------------ if (s->tag == ASSIGN && s->assign.lhs->tag == VAR) { varAssign(seq, cond, s->assign.lhs->var, s->assign.rhs); return; } // --------------------------------------------- // Case: s0 ; s1, where s0 and s1 are statements // --------------------------------------------- if (s->tag == SEQ) { whereStmt(seq, s->seq.s0, condVar, cond, true); whereStmt(seq, s->seq.s1, condVar, cond, saveRestore); return; } // ---------------------------------------------------------- // Case: where (b) s0 s1, where b is a boolean expression and // s0 and s1 are statements. // ---------------------------------------------------------- if (s->tag == WHERE) { if (cond.tag == ALWAYS) { // This case has a cheaper implementation // Compile new boolean expression AssignCond newCond = boolExp(seq, s->where.cond, condVar, true); // Compile 'then' statement if (s->where.thenStmt != NULL) whereStmt(seq, s->where.thenStmt, condVar, newCond, s->where.elseStmt != NULL); // Compile 'else' statement if (s->where.elseStmt != NULL) whereStmt(seq, s->where.elseStmt, condVar, negAssignCond(newCond), false); } else { // Save condVar Var savedCondVar = freshVar(); if (saveRestore || s->where.elseStmt != NULL) seq->append(move(savedCondVar, condVar, false)); // Compile new boolean expression Var newCondVar = freshVar(); AssignCond newCond = boolExp(seq, s->where.cond, newCondVar, true); if (s->where.thenStmt != NULL) { // AND new boolean expression with original condition AssignCond andCond = boolAnd(seq, cond, condVar, newCond, true); // Compile 'then' statement whereStmt(seq, s->where.thenStmt, condVar, andCond, false); } if (saveRestore || s->where.elseStmt != NULL) seq->append(move(condVar, savedCondVar, true)); if (s->where.elseStmt != NULL) { // AND negation of new boolean expression with original condition AssignCond andCond = boolAnd(seq, negAssignCond(newCond), newCondVar, cond, true); // Compile 'else' statement whereStmt(seq, s->where.elseStmt, newCondVar, andCond, false); // Restore condVar and implicit condition vector if (saveRestore) seq->append(move(condVar, savedCondVar, true)); } } return; } printf("QPULib: only assignments and nested 'where' \ statements can occur in a 'where' statement\n"); assert(false); } // ============================================================================ // Print statements // ============================================================================ void printStmt(Seq<Instr>* seq, PrintStmt s) { Instr instr; switch (s.tag) { case PRINT_INT: case PRINT_FLOAT: { AssignCond always; always.tag = ALWAYS; Var tmpVar = freshVar(); varAssign(seq, always, tmpVar, s.expr); if (s.tag == PRINT_INT) { instr.tag = PRI; instr.PRI = srcReg(tmpVar); } else { instr.tag = PRF; instr.PRF = srcReg(tmpVar); } seq->append(instr); return; } case PRINT_STR: instr.tag = PRS; instr.PRS = s.str; seq->append(instr); return; } assert(false); } // ============================================================================ // Set-stride statements // ============================================================================ void setStrideStmt(Seq<Instr>* seq, StmtTag tag, Expr* e) { if (e->tag == INT_LIT) { if (tag == SET_READ_STRIDE) genSetReadStride(seq, e->intLit); else genSetWriteStride(seq, e->intLit); } else if (e->tag == VAR) { if (tag == SET_READ_STRIDE) genSetReadStride(seq, srcReg(e->var)); else genSetWriteStride(seq, srcReg(e->var)); } else { AssignCond always; always.tag = ALWAYS; Var v = freshVar(); varAssign(seq, always, v, e); if (tag == SET_READ_STRIDE) genSetReadStride(seq, srcReg(v)); else genSetWriteStride(seq, srcReg(v)); } } // ============================================================================ // Load receive statements // ============================================================================ void loadReceive(Seq<Instr>* seq, Expr* dest) { assert(dest->tag == VAR); Instr instr; instr.tag = RECV; instr.RECV.dest = dstReg(dest->var); seq->append(instr); } // ============================================================================ // Store request // ============================================================================ // A 'store' operation of data to addr is almost the same as // *addr = data. The difference is that a 'store' waits until // outstanding DMAs have completed before performing a write rather // than after a write. This enables other operations to happen in // parallel with the write. void storeRequest(Seq<Instr>* seq, Expr* data, Expr* addr) { if (data->tag != VAR || addr->tag != VAR) { data = putInVar(seq, data); addr = putInVar(seq, addr); } Instr instr; instr.tag = ST3; seq->append(instr); instr.tag = ST1; instr.ST1.data = srcReg(data->var); instr.ST1.buffer = A; seq->append(instr); instr.tag = ST2; instr.ST2.addr = srcReg(addr->var); instr.ST2.buffer = A; seq->append(instr); } // ============================================================================ // Semaphores // ============================================================================ void semaphore(Seq<Instr>* seq, StmtTag tag, int semaId) { Instr instr; instr.tag = tag == SEMA_INC ? SINC : SDEC; instr.semaId = semaId; seq->append(instr); } // ============================================================================ // Host IRQ // ============================================================================ void sendIRQToHost(Seq<Instr>* seq) { Instr instr; instr.tag = IRQ; seq->append(instr); } // ============================================================================ // Statements // ============================================================================ void stmt(Seq<Instr>* seq, Stmt* s) { if (s == NULL) return; // ---------- // Case: skip // ---------- if (s->tag == SKIP) return; // -------------------------------------------------- // Case: lhs = rhs, where lhs and rhs are expressions // -------------------------------------------------- if (s->tag == ASSIGN) { assign(seq, s->assign.lhs, s->assign.rhs); return; } // --------------------------------------------- // Case: s0 ; s1, where s1 and s2 are statements // --------------------------------------------- if (s->tag == SEQ) { stmt(seq, s->seq.s0); stmt(seq, s->seq.s1); return; } // ------------------------------------------------------------------- // Case: if (c) s0 s1, where c is a condition, and lhs,rhs expressions // ------------------------------------------------------------------- if (s->tag == IF) { Instr instr; Label elseLabel = freshLabel(); Label endifLabel = freshLabel(); // Compile condition BranchCond cond = condExp(seq, s->ifElse.cond); // Branch to 'else' statement Instr branchElse; instr.tag = BRL; instr.BRL.cond = negBranchCond(cond); instr.BRL.label = elseLabel; seq->append(instr); // Compile 'then' statement stmt(seq, s->ifElse.thenStmt); // Branch to endif instr.tag = BRL; instr.BRL.cond.tag = COND_ALWAYS; instr.BRL.label = endifLabel; if (s->ifElse.elseStmt != NULL) seq->append(instr); // Label for 'else' statement instr.tag = LAB; instr.label = elseLabel; seq->append(instr); // Compile 'else' statement stmt(seq, s->ifElse.elseStmt); // Label for endif instr.tag = LAB; instr.label = endifLabel; seq->append(instr); return; } // ----------------------------------------------------------- // Case: while (c) s where c is a condition, and s a statement // ----------------------------------------------------------- if (s->tag == WHILE) { Instr instr; Label startLabel = freshLabel(); Label endLabel = freshLabel(); // Compile condition BranchCond cond = condExp(seq, s->loop.cond); // Branch over loop body Instr branchEnd; instr.tag = BRL; instr.BRL.cond = negBranchCond(cond); instr.BRL.label = endLabel; seq->append(instr); // Start label instr.tag = LAB; instr.label = startLabel; seq->append(instr); // Compile body if (s->loop.body != NULL) stmt(seq, s->loop.body); // Compute condition again condExp(seq, s->loop.cond); // Branch to start instr.tag = BRL; instr.BRL.cond = cond; instr.BRL.label = startLabel; seq->append(instr); // End label instr.tag = LAB; instr.label = endLabel; seq->append(instr); return; } // ---------------------------------------------------------------------- // Case: where (b) s0 s1 where c is a boolean expr, and s0, s1 statements // ---------------------------------------------------------------------- if (s->tag == WHERE) { Var condVar = freshVar(); AssignCond always; always.tag = ALWAYS; whereStmt(seq, s, condVar, always, false); return; } // --------------------------------------------- // Case: print(e) where e is an expr or a string // --------------------------------------------- if (s->tag == PRINT) { printStmt(seq, s->print); return; } // -------------------------------------------------------------- // Case: setReadStride(e) or setWriteStride(e) where e is an expr // -------------------------------------------------------------- if (s->tag == SET_READ_STRIDE || s->tag == SET_WRITE_STRIDE) { setStrideStmt(seq, s->tag, s->stride); return; } // ----------------------------------- // Case: receive(e) where e is an expr // ----------------------------------- if (s->tag == LOAD_RECEIVE) { loadReceive(seq, s->loadDest); return; } // --------------------------------------------- // Case: store(e0, e1) where e1 and e2 are exprs // --------------------------------------------- if (s->tag == STORE_REQUEST) { storeRequest(seq, s->storeReq.data, s->storeReq.addr); return; } // ------------- // Case: flush() // ------------- if (s->tag == FLUSH) { // Flush outstanding stores Instr instr; instr.tag = ST3; seq->append(instr); return; } // --------------------------------------------------------------- // Case: semaInc(n) or semaDec(n) where n is an int (semaphore id) // --------------------------------------------------------------- if (s->tag == SEMA_INC || s->tag == SEMA_DEC) { semaphore(seq, s->tag, s->semaId); return; } // --------------- // Case: hostIRQ() // --------------- if (s->tag == SEND_IRQ_TO_HOST) { sendIRQToHost(seq); return; } // Not reachable assert(false); } // ============================================================================ // End code // ============================================================================ void insertEndCode(Seq<Instr>* seq) { Instr instr; // Insert 'end' instruction instr.tag = END; seq->append(instr); } // ============================================================================ // Interface // ============================================================================ // Top-level translation function for statements. void translateStmt(Seq<Instr>* seq, Stmt* s) { stmt(seq, s); insertEndCode(seq); }
12,534
2,151
// Copyright 2014 PDFium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Original code copyright 2014 Foxit Software Inc. http://www.foxitsoftware.com // Original code is licensed as follows: /* * Copyright 2007 ZXing authors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "fxbarcode/qrcode/BC_QRCoderMode.h" #include <utility> #include "fxbarcode/utils.h" CBC_QRCoderMode* CBC_QRCoderMode::sBYTE = nullptr; CBC_QRCoderMode* CBC_QRCoderMode::sNUMERIC = nullptr; CBC_QRCoderMode* CBC_QRCoderMode::sALPHANUMERIC = nullptr; CBC_QRCoderMode* CBC_QRCoderMode::sKANJI = nullptr; CBC_QRCoderMode* CBC_QRCoderMode::sECI = nullptr; CBC_QRCoderMode* CBC_QRCoderMode::sGBK = nullptr; CBC_QRCoderMode* CBC_QRCoderMode::sTERMINATOR = nullptr; CBC_QRCoderMode* CBC_QRCoderMode::sFNC1_FIRST_POSITION = nullptr; CBC_QRCoderMode* CBC_QRCoderMode::sFNC1_SECOND_POSITION = nullptr; CBC_QRCoderMode* CBC_QRCoderMode::sSTRUCTURED_APPEND = nullptr; CBC_QRCoderMode::CBC_QRCoderMode(std::vector<int32_t> charCountBits, int32_t bits, ByteString name) : m_characterCountBitsForVersions(std::move(charCountBits)), m_bits(bits), m_name(name) {} CBC_QRCoderMode::~CBC_QRCoderMode() {} void CBC_QRCoderMode::Initialize() { sBYTE = new CBC_QRCoderMode({8, 16, 16}, 0x4, "BYTE"); sALPHANUMERIC = new CBC_QRCoderMode({9, 11, 13}, 0x2, "ALPHANUMERIC"); sECI = new CBC_QRCoderMode(std::vector<int32_t>(), 0x7, "ECI"); sKANJI = new CBC_QRCoderMode({8, 10, 12}, 0x8, "KANJI"); sNUMERIC = new CBC_QRCoderMode({10, 12, 14}, 0x1, "NUMERIC"); sGBK = new CBC_QRCoderMode({8, 10, 12}, 0x0D, "GBK"); sTERMINATOR = new CBC_QRCoderMode(std::vector<int32_t>(), 0x00, "TERMINATOR"); sFNC1_FIRST_POSITION = new CBC_QRCoderMode(std::vector<int32_t>(), 0x05, "FNC1_FIRST_POSITION"); sFNC1_SECOND_POSITION = new CBC_QRCoderMode(std::vector<int32_t>(), 0x09, "FNC1_SECOND_POSITION"); sSTRUCTURED_APPEND = new CBC_QRCoderMode(std::vector<int32_t>(), 0x03, "STRUCTURED_APPEND"); } void CBC_QRCoderMode::Finalize() { delete sBYTE; delete sALPHANUMERIC; delete sECI; delete sKANJI; delete sNUMERIC; delete sGBK; delete sTERMINATOR; delete sFNC1_FIRST_POSITION; delete sFNC1_SECOND_POSITION; delete sSTRUCTURED_APPEND; } CBC_QRCoderMode* CBC_QRCoderMode::ForBits(int32_t bits, int32_t& e) { switch (bits) { case 0x0: return sTERMINATOR; case 0x1: return sNUMERIC; case 0x2: return sALPHANUMERIC; case 0x3: return sSTRUCTURED_APPEND; case 0x4: return sBYTE; case 0x5: return sFNC1_FIRST_POSITION; case 0x7: return sECI; case 0x8: return sKANJI; case 0x9: return sFNC1_SECOND_POSITION; case 0x0D: return sGBK; default: e = BCExceptionUnsupportedMode; return nullptr; } } int32_t CBC_QRCoderMode::GetBits() const { return m_bits; } ByteString CBC_QRCoderMode::GetName() const { return m_name; } int32_t CBC_QRCoderMode::GetCharacterCountBits(int32_t number, int32_t& e) const { if (m_characterCountBitsForVersions.empty()) { e = BCExceptionCharacterNotThisMode; return 0; } int32_t offset; if (number <= 9) { offset = 0; } else if (number <= 26) { offset = 1; } else { offset = 2; } return m_characterCountBitsForVersions[offset]; } void CBC_QRCoderMode::Destroy() { if (sBYTE) { delete CBC_QRCoderMode::sBYTE; sBYTE = nullptr; } if (sNUMERIC) { delete CBC_QRCoderMode::sNUMERIC; sNUMERIC = nullptr; } if (sALPHANUMERIC) { delete CBC_QRCoderMode::sALPHANUMERIC; sALPHANUMERIC = nullptr; } if (sKANJI) { delete CBC_QRCoderMode::sKANJI; sKANJI = nullptr; } if (sECI) { delete CBC_QRCoderMode::sECI; sECI = nullptr; } if (sGBK) { delete CBC_QRCoderMode::sGBK; sGBK = nullptr; } if (sTERMINATOR) { delete CBC_QRCoderMode::sTERMINATOR; sTERMINATOR = nullptr; } if (sFNC1_FIRST_POSITION) { delete CBC_QRCoderMode::sFNC1_FIRST_POSITION; sFNC1_FIRST_POSITION = nullptr; } if (sFNC1_SECOND_POSITION) { delete CBC_QRCoderMode::sFNC1_SECOND_POSITION; sFNC1_SECOND_POSITION = nullptr; } if (sSTRUCTURED_APPEND) { delete CBC_QRCoderMode::sSTRUCTURED_APPEND; sSTRUCTURED_APPEND = nullptr; } }
2,223
493
<filename>HtmlNativeAndroid/htmlnative-lib/src/main/java/com/mozz/htmlnative/exception/AttrApplyException.java package com.mozz.htmlnative.exception; /** * @author <NAME>, 17/3/3. */ public class AttrApplyException extends Exception { public AttrApplyException() { super(); } public AttrApplyException(Throwable cause){ super(cause); } public AttrApplyException(String msg, Throwable cause){ super(msg, cause); } public AttrApplyException(String msg) { super(msg); } }
204
49,076
/** * Classes adapting Spring's WebSocket API to and from WebSocket providers. */ @NonNullApi @NonNullFields package org.springframework.web.socket.adapter; import org.springframework.lang.NonNullApi; import org.springframework.lang.NonNullFields;
75
703
#include <Texture/TexturePCH.h> #include <Texture/Image/ImageUtils.h> #include <Texture/TexConv/TexConvProcessor.h> ezResult ezTexConvProcessor::Assemble3DTexture(ezImage& dst) const { const auto& images = m_Descriptor.m_InputImages; return ezImageUtils::CreateVolumeTextureFromSingleFile(dst, images[0]); } EZ_STATICLINK_FILE(Texture, Texture_TexConv_Implementation_Texture3D);
141
2,542
<filename>src/prod/src/Common/asyncfile.test.cpp // ------------------------------------------------------------ // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License (MIT). See License.txt in the repo root for license information. // ------------------------------------------------------------ #include "stdafx.h" using namespace Common; using namespace std; #include <boost/test/unit_test.hpp> #include "Common/boost-taef.h" #include "Common/Common.h" #include "Common/AssertWF.h" Common::StringLiteral ReadFileAsyncTestTraceType("ReadFileAsyncOperationTest"); namespace Common { class AsyncFileTest : public TextTraceComponent<TraceTaskCodes::Common> { public: wstring GetPath() { wchar_t tempPath[MAX_PATH]; DWORD dwRetVal = ::GetCurrentDirectory(MAX_PATH, tempPath); ASSERT_IF(dwRetVal <= 0, "GetCurrentDirectory returned {0}", dwRetVal); return wstring(tempPath); } DWORD WriteTestFile(std::wstring const & filePath, BOOL isLargeFileTest) { // This is synchronous because FILE_FLAG_OVERLAPPED within the dwFlagsAndAttributes parameter is not set. HANDLE fileHandle = CreateFile( filePath.c_str(), GENERIC_READ | GENERIC_WRITE, FILE_SHARE_DELETE, NULL, CREATE_ALWAYS, FILE_ATTRIBUTE_NORMAL, NULL); HRESULT error = S_OK; if (fileHandle == INVALID_HANDLE_VALUE) { error = GetLastError(); error = HRESULT_FROM_WIN32(error); // Return from the test with error VERIFY_FAIL(L"File handle is invalid. Last error: {0}", error); } BOOL success = true; std::wstring DataBuffer = L"This is some test data to write to the file."; DWORD dwBytesToWrite = (DWORD)wcslen(DataBuffer.c_str())* sizeof(wchar_t); DWORD dwBytesWritten = 0; DWORD bytes = 0; DWORD totalBytes = 0; if (isLargeFileTest) { totalBytes = 1024 * 1024 * 10; } else { totalBytes = dwBytesToWrite; } while (bytes < totalBytes) { // Reset this to 0 so we don't get false readings to bytesWritten dwBytesWritten = 0; // fileHandle is synchronous, which means that WriteFile is synchronous. success = WriteFile(fileHandle, DataBuffer.c_str(), dwBytesToWrite, &dwBytesWritten, NULL); if (!success) { // Gets the last error on the thread. error = GetLastError(); WriteWarning(ReadFileAsyncTestTraceType, "Unable to write to file {0} with error: {1}", filePath, error); // Retry writing if we were unable to write. continue; } // This will make sure that the file is actually writing something. VERIFY_ARE_EQUAL(dwBytesWritten, dwBytesToWrite); bytes += dwBytesWritten; } WriteInfo(ReadFileAsyncTestTraceType, "Bytes Written : {0}", bytes); VERIFY_IS_TRUE(success); success = CloseHandle(fileHandle); VERIFY_IS_TRUE(success); return bytes; } void DeleteTestFile(std::wstring const & filePath) { BOOL success = DeleteFile(filePath.c_str()); WriteInfo(ReadFileAsyncTestTraceType,"Deleting from path {0}", filePath.c_str()); if (!success) { DWORD error = GetLastError(); WriteError(ReadFileAsyncTestTraceType, "Writing Error:{0}", error); } VERIFY_IS_TRUE(success); } void ReadTestFile(std::wstring const & filePath, DWORD bytesWritten, TimeSpan timeout) { VERIFY_IS_FALSE(filePath.size() == 0); ByteBuffer buffer; ManualResetEvent requestCompleted(false); AsyncFile::BeginReadFile( filePath, timeout, [this, filePath, bytesWritten, &buffer, &requestCompleted](AsyncOperationSPtr const& operation) { WriteInfo(ReadFileAsyncTestTraceType, "File Path:{0}", filePath); auto error = AsyncFile::EndReadFile(operation, buffer); VERIFY_IS_TRUE(error.IsSuccess()); requestCompleted.Set(); }, AsyncOperationSPtr()); VERIFY_IS_TRUE(requestCompleted.WaitOne(timeout)); // Verification and test clean up DWORD bytesRead = static_cast<DWORD>(buffer.size()); WriteInfo(ReadFileAsyncTestTraceType, "Bytes Read :{0}", bytesRead); // BytesWritten is already printed in WriteTestFile VERIFY_ARE_EQUAL(bytesWritten, bytesRead); DeleteTestFile(filePath); } }; BOOST_FIXTURE_TEST_SUITE(AsyncFileTestSuite, AsyncFileTest) BOOST_AUTO_TEST_CASE(ReadFileTest) { // Get Path wstring tempPath = AsyncFileTest::GetPath(); // Concatenating Path with folder Name wstring path = Path::Combine(tempPath, L"ReadFileTest"); VERIFY_IS_TRUE(Directory::Create2(path).IsSuccess()); // Concatenating Temp Path with file Name path = Path::Combine(path, L"TestFile.txt"); DWORD bytesWritten = WriteTestFile(path, false); ReadTestFile(path, bytesWritten, TimeSpan::MaxValue); } // BOOST_AUTO_TEST_CASE(ReadLargeFileTest) // { // // Get Path // wstring tempPath = AsyncFileTest::GetPath(); // // Concatenating Path with folder Name // wstring path = Path::Combine(tempPath, L"ReadLargeFileTest"); // VERIFY_IS_TRUE(Directory::Create2(path).IsSuccess()); // // Concatenating Path with file Name // path = Path::Combine(path, L"TestLargeFile.txt"); // DWORD bytesWritten = WriteTestFile(path, true); // ReadTestFile(path, bytesWritten, TimeSpan::MaxValue); // } BOOST_AUTO_TEST_SUITE_END() }
2,985
1,168
<reponame>wcalandro/kythe<filename>kythe/go/extractors/gcp/config/testdata/guava-mvn.json { "repo": "https://github.com/google/guava", "extractions": [ { "build_system": "MAVEN", "corpus": "github.com/google/guava" } ] }
118
526
/* SPDX-License-Identifier: Apache-2.0 */ /* Copyright Contributors to the ODPi Egeria project. */ package org.odpi.openmetadata.accessservices.assetmanager.properties; import com.fasterxml.jackson.annotation.JsonAutoDetect; import com.fasterxml.jackson.annotation.JsonIgnoreProperties; import com.fasterxml.jackson.annotation.JsonInclude; import java.io.Serializable; import static com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility.NONE; import static com.fasterxml.jackson.annotation.JsonAutoDetect.Visibility.PUBLIC_ONLY; /** * A GlossaryTermRelationshipStatus defines the status of a relationship with a glossary term. */ @JsonAutoDetect(getterVisibility=PUBLIC_ONLY, setterVisibility=PUBLIC_ONLY, fieldVisibility=NONE) @JsonInclude(JsonInclude.Include.NON_NULL) @JsonIgnoreProperties(ignoreUnknown=true) public enum GlossaryTermRelationshipStatus implements Serializable { DRAFT (0, 0, "Draft", "The term relationship is in development."), ACTIVE (1, 1, "Active", "The term relationship is approved and in use."), DEPRECATED (2, 2, "Deprecated", "The term relationship should no longer be used."), OBSOLETE (3, 3, "Obsolete", "The term relationship must no longer be used."), OTHER (99, 99, "Other", "Another term relationship status."); public static final String ENUM_TYPE_GUID = "42282652-7d60-435e-ad3e-7cfe5291bcc7"; public static final String ENUM_TYPE_NAME = "TermRelationshipStatus"; private int openTypeOrdinal; private int ordinal; private String name; private String description; private static final long serialVersionUID = 1L; /** * Constructor to set up the instance of this enum. * * @param ordinal code number * @param openTypeOrdinal code number from the equivalent Enum Type * @param name default name * @param description default description */ GlossaryTermRelationshipStatus(int ordinal, int openTypeOrdinal, String name, String description) { this.ordinal = ordinal; this.openTypeOrdinal = openTypeOrdinal; this.name = name; this.description = description; } /** * Return the code for this enum instance * * @return int key pattern code */ public int getOrdinal() { return ordinal; } /** * Return the default name for this enum instance. * * @return String default name */ public String getName() { return name; } /** * Return the default description for the key pattern for this enum instance. * * @return String default description */ public String getDescription() { return description; } /** * Return the code for this enum that comes from the Open Metadata Type that this enum represents. * * @return int code number */ public int getOpenTypeOrdinal() { return openTypeOrdinal; } /** * Return the unique identifier for the open metadata enum type that this enum class represents. * * @return string guid */ public String getOpenTypeGUID() { return ENUM_TYPE_GUID; } /** * Return the unique name for the open metadata enum type that this enum class represents. * * @return string name */ public String getOpenTypeName() { return ENUM_TYPE_NAME; } /** * Standard toString method. * * @return print out of variables in a JSON-style */ @Override public String toString() { return "GlossaryTermRelationshipStatus{" + "openTypeOrdinal=" + openTypeOrdinal + ", ordinal=" + ordinal + ", name='" + name + '\'' + ", description='" + description + '\'' + ", openTypeGUID='" + getOpenTypeGUID() + '\'' + ", openTypeName='" + getOpenTypeName() + '\'' + '}'; } }
1,695
6,717
<filename>deps/3rdparty/cassowary-0.60/c++/ClC.cc<gh_stars>1000+ /* $Id: ClC.cc,v 1.22 1999/08/27 00:06:27 gjb Exp $ * Cassowary Incremental Constraint Solver * Original Smalltalk Implementation by <NAME> * This C++ Implementation by <NAME>, <<EMAIL>> * http://www.cs.washington.edu/homes/gjb * (C) 1998, 1999 <NAME> and <NAME> * See ../LICENSE for legal details regarding this software * * ClC.c * C wrapper for most important Cassowary functionality * (Originally developed for the Amaya CCSS extensions) */ #define CLC_IMPL #include "ClC.h" #include "Cl.h" #include <strstream.h> #include <stdio.h> #include <limits.h> #ifdef HAVE_CONFIG_H #include <cassowary/config.h> #define CONFIG_H_INCLUDED #endif #define FDN_EOL LONG_MIN #define boolean int extern "C" { typedef StringToVarMap *CL_VarMap; CL_VarMap varmap = NULL; /* Functions unique to the C interface */ void CL_Init() { } void CL_Shutdown() { /* empty */ } CL_VarMap CL_VarMapNew() { return new StringToVarMap(); } CL_VarMap CL_SetVarMap(CL_VarMap varmap_) { if (varmap_ == NULL) varmap_ = CL_VarMapNew(); varmap = varmap_; ClVariable::SetVarMap(varmap); return varmap; } CL_VarMap CL_GetVarMap() { return varmap; } /* return true iff the variable did exist in the current global varmap, else return false */ boolean CL_VarMapDelete(const char *sz) { const string s(sz); StringToVarMap::iterator it = varmap->find(s); if (it != varmap->end()) { varmap->erase(it); return true; } return false; } /* Return a new ClVariable with name and initial Value */ CLV CL_ClvNew(const char *szName, double Value, CL_SimplexSolver solver) { ClVariable *pclv = new ClVariable(szName,Value); if (solver) solver->AddStay(*pclv); #if 0 fprintf(stderr,"Created var %s @ %p\n",szName,pclv->get_pclv()); #endif return pclv; } void CL_VariableSetPv(CLV var, void *pv) { var->SetPv(pv); } void *CL_VariablePv(CLV var) { return var->Pv(); } const char * CL_VariableName(CLV var) { return var->Name().c_str(); } /* Return a new ClSimplexSolver object */ CL_SimplexSolver CL_SimplexSolverNew() { ClSimplexSolver *psolver = new ClSimplexSolver(); return psolver; } /* Print the ClVariable out to the given FILE * */ void CL_ClvPrint(CLV var, FILE *out) { strstream xo; xo << *var << ends; fprintf(out,"%s",xo.str()); } /* Print the ClSolver object out to the given FILE * */ void CL_SolverPrint(CL_Solver solver, FILE *out) { strstream xo; xo << *solver << ends; fprintf(out,"%s",xo.str()); } /* Print the constraint object out to the given FILE * */ void CL_ConstraintPrint(CL_Constraint pcn, FILE *out) { strstream xo; xo << *pcn << ends; fprintf(out,"%s",xo.str()); } void CL_TableauPrintExternalVariables(CL_Tableau tableau, FILE *out) { strstream xo; tableau->printExternalVariablesTo(xo); xo << ends; fprintf(out,"%s",xo.str()); } void CL_SolverSetChangeClvCallback(CL_Solver solver, PfnChangeClvCallback pfn) { solver->SetChangeClvCallback(pfn); } void CL_SimplexSolverAddStrongStay(CL_SimplexSolver solver, CLV var, double weight) { solver->AddStay(*var,ClsMedium(),weight); } void CL_SimplexSolverAddStay(CL_SimplexSolver solver, CLV var, double weight) { solver->AddStay(*var,ClsWeak(),weight); } /* Return a clvariable with the given name, or NULL if not found; be forgiving about leading/trailing whitespace in szNameConst, and also stopping once a non-id character is seen.*/ CLV CL_ClvLookupTrim(const char *szNameConst) { char *szName = const_cast<char *>(szNameConst); // skip leading ws while (szName && *szName && *szName == ' ' || *szName == '\t') ++szName; char *pchEnd = szName + strcspn(szName," \t\n;:<>,./?~!@#$%^&*()+=|\\{}[]\"`'"); char ch = '\0'; if (pchEnd) { ch = *pchEnd; // save the character *pchEnd = '\0'; // and terminate the string } CLV answer = CL_ClvLookup(szName); if (pchEnd) *pchEnd = ch; // restore the character return answer; } /* Return a clvariable with the given name, or NULL if not found */ CLV CL_ClvLookup(const char *szName) { if (!varmap) return NULL; StringToVarMap::iterator it = varmap->find(szName); if (it == varmap->end()) return NULL; ClVariable *pclv = new ClVariable(it->second); return pclv; } /* Return the Value of clv */ double CL_ClvValue(const CLV var) { return var->Value(); } boolean CL_ClvIsNil(const CLV var) { return var->IsNil(); } /* Return a new constraint (or NULL) from parsing the strings */ CL_Constraint CL_ParseConstraint(const char *szConstraintRule, const char *szConstraintStrength) { try { istrstream xiLine(szConstraintRule); ClConstraint *pcn = PcnParseConstraint(xiLine,ClVarLookupInMap(varmap,false), ClsFromSz(szConstraintStrength)); return pcn; } catch (ExCLError &e) { fprintf(stderr,"%s\n",e.description().c_str()); return NULL; } } boolean CL_FIsSatisfied(ClConstraint *pcn) { return pcn->FIsSatisfied(); } /* Add a constraint to the solver; return 1 on success, 0 on failure */ boolean CL_AddConstraint(CL_Solver solver, CL_Constraint cn) { try { return (solver->AddConstraintNoException(cn)?1:0); } catch (...) { return 0; } } /* Add a constraint to the solver; return 1 on success, 0 on failure */ boolean CL_RemoveConstraint(CL_Solver solver, CL_Constraint cn) { try { return (solver->RemoveConstraintNoException(cn)?1:0); } catch (...) { return 0; } } void CL_Solve(CL_Solver solver) { solver->Solve(); } void CL_Resolve(CL_Solver solver) { solver->Resolve(); } void CL_SimplexSolverSetEditedValue(CL_SimplexSolver solver, CLV var, double n) { solver->SetEditedValue(*var,n); } #if defined(CL_HAVE_GTL) && defined(CL_BUILD_FD_SOLVER) #include <stdarg.h> /* Return a new ClVariable containing an FD variable with name and varargs domain_values as its initial domain, terminated with FDN_EOL */ CLV CL_CldvNew(const char *szName, ...) { va_list ap; va_start(ap, szName); list<FDNumber> l; FDNumber n; while ( (n = va_arg(ap, FDNumber)) != FDN_EOL) { l.push_back(n); } va_end(ap); ClVariable *pclv = new ClVariable(new ClFDVariable(szName,0.0,l)); #if 0 fprintf(stderr,"Created fd var %s @ %p\n",szName,pclv->get_pclv()); #endif return pclv; } CL_FDSolver CL_FDSolverNew() { ClFDSolver *psolver = new ClFDSolver(); return psolver; } boolean CL_ClvIsFD(const CLV var) { return var->IsFDVariable(); } boolean CL_FDCanConvertCn(CL_Constraint cn) { return ClFDBinaryOneWayConstraint::FCanConvertCn(*cn); } CL_Constraint CL_FDCnFromCn(CL_Constraint cn) { try { return new ClFDBinaryOneWayConstraint(*cn); } catch (...) { return NULL; } } boolean CL_FCnOkayForSimplexSolver(CL_Constraint cn) { return cn->FIsOkayForSimplexSolver(); } #endif } /* extern "C" */
2,823
955
<filename>tslearn/metrics/dtw_variants.py<gh_stars>100-1000 import warnings import numpy from numba import njit, prange from sklearn.metrics.pairwise import pairwise_distances from tslearn.utils import to_time_series from .utils import _cdist_generic __author__ = '<NAME> <EMAIL>[<EMAIL>' GLOBAL_CONSTRAINT_CODE = {None: 0, "": 0, "itakura": 1, "sakoe_chiba": 2} @njit() def _local_squared_dist(x, y): dist = 0. for di in range(x.shape[0]): diff = (x[di] - y[di]) dist += diff * diff return dist @njit() def njit_accumulated_matrix(s1, s2, mask): """Compute the accumulated cost matrix score between two time series. Parameters ---------- s1 : array, shape = (sz1,) First time series. s2 : array, shape = (sz2,) Second time series mask : array, shape = (sz1, sz2) Mask. Unconsidered cells must have infinite values. Returns ------- mat : array, shape = (sz1, sz2) Accumulated cost matrix. """ l1 = s1.shape[0] l2 = s2.shape[0] cum_sum = numpy.full((l1 + 1, l2 + 1), numpy.inf) cum_sum[0, 0] = 0. for i in range(l1): for j in range(l2): if numpy.isfinite(mask[i, j]): cum_sum[i + 1, j + 1] = _local_squared_dist(s1[i], s2[j]) cum_sum[i + 1, j + 1] += min(cum_sum[i, j + 1], cum_sum[i + 1, j], cum_sum[i, j]) return cum_sum[1:, 1:] @njit(nogil=True) def njit_dtw(s1, s2, mask): """Compute the dynamic time warping score between two time series. Parameters ---------- s1 : array, shape = (sz1,) First time series. s2 : array, shape = (sz2,) Second time series mask : array, shape = (sz1, sz2) Mask. Unconsidered cells must have infinite values. Returns ------- dtw_score : float Dynamic Time Warping score between both time series. """ cum_sum = njit_accumulated_matrix(s1, s2, mask) return numpy.sqrt(cum_sum[-1, -1]) @njit() def _return_path(acc_cost_mat): sz1, sz2 = acc_cost_mat.shape path = [(sz1 - 1, sz2 - 1)] while path[-1] != (0, 0): i, j = path[-1] if i == 0: path.append((0, j - 1)) elif j == 0: path.append((i - 1, 0)) else: arr = numpy.array([acc_cost_mat[i - 1][j - 1], acc_cost_mat[i - 1][j], acc_cost_mat[i][j - 1]]) argmin = numpy.argmin(arr) if argmin == 0: path.append((i - 1, j - 1)) elif argmin == 1: path.append((i - 1, j)) else: path.append((i, j - 1)) return path[::-1] def dtw_path(s1, s2, global_constraint=None, sakoe_chiba_radius=None, itakura_max_slope=None): r"""Compute Dynamic Time Warping (DTW) similarity measure between (possibly multidimensional) time series and return both the path and the similarity. DTW is computed as the Euclidean distance between aligned time series, i.e., if :math:`\pi` is the alignment path: .. math:: DTW(X, Y) = \sqrt{\sum_{(i, j) \in \pi} (X_{i} - Y_{j})^2} It is not required that both time series share the same size, but they must be the same dimension. DTW was originally presented in [1]_ and is discussed in more details in our :ref:`dedicated user-guide page <dtw>`. Parameters ---------- s1 A time series. s2 Another time series. global_constraint : {"itakura", "sakoe_chiba"} or None (default: None) Global constraint to restrict admissible paths for DTW. sakoe_chiba_radius : int or None (default: None) Radius to be used for Sakoe-Chiba band global constraint. If None and `global_constraint` is set to "sakoe_chiba", a radius of 1 is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. itakura_max_slope : float or None (default: None) Maximum slope for the Itakura parallelogram constraint. If None and `global_constraint` is set to "itakura", a maximum slope of 2. is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. Returns ------- list of integer pairs Matching path represented as a list of index pairs. In each pair, the first index corresponds to s1 and the second one corresponds to s2 float Similarity score Examples -------- >>> path, dist = dtw_path([1, 2, 3], [1., 2., 2., 3.]) >>> path [(0, 0), (1, 1), (1, 2), (2, 3)] >>> dist 0.0 >>> dtw_path([1, 2, 3], [1., 2., 2., 3., 4.])[1] 1.0 See Also -------- dtw : Get only the similarity score for DTW cdist_dtw : Cross similarity matrix between time series datasets dtw_path_from_metric : Compute a DTW using a user-defined distance metric References ---------- .. [1] <NAME>, <NAME>, "Dynamic programming algorithm optimization for spoken word recognition," IEEE Transactions on Acoustics, Speech and Signal Processing, vol. 26(1), pp. 43--49, 1978. """ s1 = to_time_series(s1, remove_nans=True) s2 = to_time_series(s2, remove_nans=True) if len(s1) == 0 or len(s2) == 0: raise ValueError( "One of the input time series contains only nans or has zero length.") if s1.shape[1] != s2.shape[1]: raise ValueError("All input time series must have the same feature size.") mask = compute_mask( s1, s2, GLOBAL_CONSTRAINT_CODE[global_constraint], sakoe_chiba_radius, itakura_max_slope ) acc_cost_mat = njit_accumulated_matrix(s1, s2, mask=mask) path = _return_path(acc_cost_mat) return path, numpy.sqrt(acc_cost_mat[-1, -1]) @njit() def njit_accumulated_matrix_from_dist_matrix(dist_matrix, mask): """Compute the accumulated cost matrix score between two time series using a precomputed distance matrix. Parameters ---------- dist_matrix : array, shape = (sz1, sz2) Array containing the pairwise distances. mask : array, shape = (sz1, sz2) Mask. Unconsidered cells must have infinite values. Returns ------- mat : array, shape = (sz1, sz2) Accumulated cost matrix. """ l1, l2 = dist_matrix.shape cum_sum = numpy.full((l1 + 1, l2 + 1), numpy.inf) cum_sum[0, 0] = 0. for i in prange(l1): for j in prange(l2): if numpy.isfinite(mask[i, j]): cum_sum[i + 1, j + 1] = dist_matrix[i, j] cum_sum[i + 1, j + 1] += min(cum_sum[i, j + 1], cum_sum[i + 1, j], cum_sum[i, j]) return cum_sum[1:, 1:] def dtw_path_from_metric(s1, s2=None, metric="euclidean", global_constraint=None, sakoe_chiba_radius=None, itakura_max_slope=None, **kwds): r"""Compute Dynamic Time Warping (DTW) similarity measure between (possibly multidimensional) time series using a distance metric defined by the user and return both the path and the similarity. Similarity is computed as the cumulative cost along the aligned time series. It is not required that both time series share the same size, but they must be the same dimension. DTW was originally presented in [1]_. Valid values for metric are the same as for scikit-learn `pairwise_distances`_ function i.e. a string (e.g. "euclidean", "sqeuclidean", "hamming") or a function that is used to compute the pairwise distances. See `scikit`_ and `scipy`_ documentations for more information about the available metrics. Parameters ---------- s1 : array, shape = (sz1, d) if metric!="precomputed", (sz1, sz2) otherwise A time series or an array of pairwise distances between samples. s2 : array, shape = (sz2, d), optional (default: None) A second time series, only allowed if metric != "precomputed". metric : string or callable (default: "euclidean") Function used to compute the pairwise distances between each points of `s1` and `s2`. If metric is "precomputed", `s1` is assumed to be a distance matrix. If metric is an other string, it must be one of the options compatible with sklearn.metrics.pairwise_distances. Alternatively, if metric is a callable function, it is called on pairs of rows of `s1` and `s2`. The callable should take two 1 dimensional arrays as input and return a value indicating the distance between them. global_constraint : {"itakura", "sakoe_chiba"} or None (default: None) Global constraint to restrict admissible paths for DTW. sakoe_chiba_radius : int or None (default: None) Radius to be used for Sakoe-Chiba band global constraint. If None and `global_constraint` is set to "sakoe_chiba", a radius of 1 is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. itakura_max_slope : float or None (default: None) Maximum slope for the Itakura parallelogram constraint. If None and `global_constraint` is set to "itakura", a maximum slope of 2. is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. **kwds Additional arguments to pass to sklearn pairwise_distances to compute the pairwise distances. Returns ------- list of integer pairs Matching path represented as a list of index pairs. In each pair, the first index corresponds to s1 and the second one corresponds to s2. float Similarity score (sum of metric along the wrapped time series). Examples -------- Lets create 2 numpy arrays to wrap: >>> import numpy as np >>> rng = np.random.RandomState(0) >>> s1, s2 = rng.rand(5, 2), rng.rand(6, 2) The wrapping can be done by passing a string indicating the metric to pass to scikit-learn pairwise_distances: >>> dtw_path_from_metric(s1, s2, ... metric="sqeuclidean") # doctest: +ELLIPSIS ([(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], 1.117...) Or by defining a custom distance function: >>> sqeuclidean = lambda x, y: np.sum((x-y)**2) >>> dtw_path_from_metric(s1, s2, metric=sqeuclidean) # doctest: +ELLIPSIS ([(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], 1.117...) Or by using a precomputed distance matrix as input: >>> from sklearn.metrics.pairwise import pairwise_distances >>> dist_matrix = pairwise_distances(s1, s2, metric="sqeuclidean") >>> dtw_path_from_metric(dist_matrix, ... metric="precomputed") # doctest: +ELLIPSIS ([(0, 0), (0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], 1.117...) Notes -------- By using a squared euclidean distance metric as shown above, the output path is the same as the one obtained by using dtw_path but the similarity score is the sum of squared distances instead of the euclidean distance. See Also -------- dtw_path : Get both the matching path and the similarity score for DTW References ---------- .. [1] <NAME>, <NAME>, "Dynamic programming algorithm optimization for spoken word recognition," IEEE Transactions on Acoustics, Speech and Signal Processing, vol. 26(1), pp. 43--49, 1978. .. _pairwise_distances: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html .. _scikit: https://scikit-learn.org/stable/modules/metrics.html .. _scipy: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html """ # noqa: E501 if metric == "precomputed": # Pairwise distance given as input sz1, sz2 = s1.shape mask = compute_mask( sz1, sz2, GLOBAL_CONSTRAINT_CODE[global_constraint], sakoe_chiba_radius, itakura_max_slope ) dist_mat = s1 else: s1 = to_time_series(s1, remove_nans=True) s2 = to_time_series(s2, remove_nans=True) mask = compute_mask( s1, s2, GLOBAL_CONSTRAINT_CODE[global_constraint], sakoe_chiba_radius, itakura_max_slope ) dist_mat = pairwise_distances(s1, s2, metric=metric, **kwds) acc_cost_mat = njit_accumulated_matrix_from_dist_matrix(dist_mat, mask) path = _return_path(acc_cost_mat) return path, acc_cost_mat[-1, -1] def dtw(s1, s2, global_constraint=None, sakoe_chiba_radius=None, itakura_max_slope=None): r"""Compute Dynamic Time Warping (DTW) similarity measure between (possibly multidimensional) time series and return it. DTW is computed as the Euclidean distance between aligned time series, i.e., if :math:`\pi` is the optimal alignment path: .. math:: DTW(X, Y) = \sqrt{\sum_{(i, j) \in \pi} \|X_{i} - Y_{j}\|^2} Note that this formula is still valid for the multivariate case. It is not required that both time series share the same size, but they must be the same dimension. DTW was originally presented in [1]_ and is discussed in more details in our :ref:`dedicated user-guide page <dtw>`. Parameters ---------- s1 A time series. s2 Another time series. global_constraint : {"itakura", "sakoe_chiba"} or None (default: None) Global constraint to restrict admissible paths for DTW. sakoe_chiba_radius : int or None (default: None) Radius to be used for Sakoe-Chiba band global constraint. If None and `global_constraint` is set to "sakoe_chiba", a radius of 1 is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. itakura_max_slope : float or None (default: None) Maximum slope for the Itakura parallelogram constraint. If None and `global_constraint` is set to "itakura", a maximum slope of 2. is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. Returns ------- float Similarity score Examples -------- >>> dtw([1, 2, 3], [1., 2., 2., 3.]) 0.0 >>> dtw([1, 2, 3], [1., 2., 2., 3., 4.]) 1.0 See Also -------- dtw_path : Get both the matching path and the similarity score for DTW cdist_dtw : Cross similarity matrix between time series datasets References ---------- .. [1] <NAME>, <NAME>, "Dynamic programming algorithm optimization for spoken word recognition," IEEE Transactions on Acoustics, Speech and Signal Processing, vol. 26(1), pp. 43--49, 1978. """ s1 = to_time_series(s1, remove_nans=True) s2 = to_time_series(s2, remove_nans=True) if len(s1) == 0 or len(s2) == 0: raise ValueError( "One of the input time series contains only nans or has zero length.") if s1.shape[1] != s2.shape[1]: raise ValueError("All input time series must have the same feature size.") mask = compute_mask( s1, s2, GLOBAL_CONSTRAINT_CODE[global_constraint], sakoe_chiba_radius=sakoe_chiba_radius, itakura_max_slope=itakura_max_slope) return njit_dtw(s1, s2, mask=mask) def _max_steps(i, j, max_length, length_1, length_2): """Maximum number of steps required in a L-DTW process to reach a given cell. Parameters ---------- i : int Cell row index j : int Cell column index max_length : int Maximum allowed length length_1 : int Length of the first time series length_2 : int Length of the second time series Returns ------- int Number of steps """ candidate_1 = i + j candidate_2 = max_length - max(length_1 - i - 1, length_2 - j - 1) return min(candidate_1, candidate_2) def _limited_warping_length_cost(s1, s2, max_length): r"""Compute accumulated scores necessary fo L-DTW. Parameters ---------- s1 A time series. s2 Another time series. max_length : int Maximum allowed warping path length. Should be an integer between XXX and YYY. # TODO Returns ------- dict Accumulated scores. This dict associates (i, j) pairs (keys) to dictionnaries with desired length as key and associated score as value. """ dict_costs = {} for i in range(s1.shape[0]): for j in range(s2.shape[0]): dict_costs[i, j] = {} # Init dict_costs[0, 0][0] = _local_squared_dist(s1[0], s2[0]) for i in range(1, s1.shape[0]): pred = dict_costs[i - 1, 0][i - 1] dict_costs[i, 0][i] = pred + _local_squared_dist(s1[i], s2[0]) for j in range(1, s2.shape[0]): pred = dict_costs[0, j - 1][j - 1] dict_costs[0, j][j] = pred + _local_squared_dist(s1[0], s2[j]) # Main loop for i in range(1, s1.shape[0]): for j in range(1, s2.shape[0]): min_s = max(i, j) max_s = _max_steps(i, j, max_length - 1, s1.shape[0], s2.shape[0]) for s in range(min_s, max_s + 1): dict_costs[i, j][s] = _local_squared_dist(s1[i], s2[j]) dict_costs[i, j][s] += min( dict_costs[i, j - 1].get(s - 1, numpy.inf), dict_costs[i - 1, j].get(s - 1, numpy.inf), dict_costs[i - 1, j - 1].get(s - 1, numpy.inf) ) return dict_costs def dtw_limited_warping_length(s1, s2, max_length): r"""Compute Dynamic Time Warping (DTW) similarity measure between (possibly multidimensional) time series under an upper bound constraint on the resulting path length and return the similarity cost. DTW is computed as the Euclidean distance between aligned time series, i.e., if :math:`\pi` is the optimal alignment path: .. math:: DTW(X, Y) = \sqrt{\sum_{(i, j) \in \pi} \|X_{i} - Y_{j}\|^2} Note that this formula is still valid for the multivariate case. It is not required that both time series share the same size, but they must be the same dimension. DTW was originally presented in [1]_. This constrained-length variant was introduced in [2]_. Both bariants are discussed in more details in our :ref:`dedicated user-guide page <dtw>` Parameters ---------- s1 A time series. s2 Another time series. max_length : int Maximum allowed warping path length. If greater than len(s1) + len(s2), then it is equivalent to unconstrained DTW. If lower than max(len(s1), len(s2)), no path can be found and a ValueError is raised. Returns ------- float Similarity score Examples -------- >>> dtw_limited_warping_length([1, 2, 3], [1., 2., 2., 3.], 5) 0.0 >>> dtw_limited_warping_length([1, 2, 3], [1., 2., 2., 3., 4.], 5) 1.0 See Also -------- dtw : Get the similarity score for DTW dtw_path_limited_warping_length : Get both the warping path and the similarity score for DTW with limited warping path length References ---------- .. [1] <NAME>, <NAME>, "Dynamic programming algorithm optimization for spoken word recognition," IEEE Transactions on Acoustics, Speech and Signal Processing, vol. 26(1), pp. 43--49, 1978. .. [2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> Dynamic time warping under limited warping path length. Information Sciences, vol. 393, pp. 91--107, 2017. """ s1 = to_time_series(s1, remove_nans=True) s2 = to_time_series(s2, remove_nans=True) if max_length < max(s1.shape[0], s2.shape[0]): raise ValueError("Cannot find a path of length {} to align given " "time series.".format(max_length)) accumulated_costs = _limited_warping_length_cost(s1, s2, max_length) idx_pair = (s1.shape[0] - 1, s2.shape[0] - 1) optimal_cost = min(accumulated_costs[idx_pair].values()) return numpy.sqrt(optimal_cost) def _return_path_limited_warping_length(accum_costs, target_indices, optimal_length): path = [target_indices] cur_length = optimal_length while path[-1] != (0, 0): i, j = path[-1] if i == 0: path.append((0, j - 1)) elif j == 0: path.append((i - 1, 0)) else: arr = numpy.array( [accum_costs[i - 1, j - 1].get(cur_length - 1, numpy.inf), accum_costs[i - 1, j].get(cur_length - 1, numpy.inf), accum_costs[i, j - 1].get(cur_length - 1, numpy.inf)] ) argmin = numpy.argmin(arr) if argmin == 0: path.append((i - 1, j - 1)) elif argmin == 1: path.append((i - 1, j)) else: path.append((i, j - 1)) cur_length -= 1 return path[::-1] def dtw_path_limited_warping_length(s1, s2, max_length): r"""Compute Dynamic Time Warping (DTW) similarity measure between (possibly multidimensional) time series under an upper bound constraint on the resulting path length and return the path as well as the similarity cost. DTW is computed as the Euclidean distance between aligned time series, i.e., if :math:`\pi` is the optimal alignment path: .. math:: DTW(X, Y) = \sqrt{\sum_{(i, j) \in \pi} \|X_{i} - Y_{j}\|^2} Note that this formula is still valid for the multivariate case. It is not required that both time series share the same size, but they must be the same dimension. DTW was originally presented in [1]_. This constrained-length variant was introduced in [2]_. Both variants are discussed in more details in our :ref:`dedicated user-guide page <dtw>` Parameters ---------- s1 A time series. s2 Another time series. max_length : int Maximum allowed warping path length. If greater than len(s1) + len(s2), then it is equivalent to unconstrained DTW. If lower than max(len(s1), len(s2)), no path can be found and a ValueError is raised. Returns ------- list of integer pairs Optimal path float Similarity score Examples -------- >>> path, cost = dtw_path_limited_warping_length([1, 2, 3], ... [1., 2., 2., 3.], 5) >>> cost 0.0 >>> path [(0, 0), (1, 1), (1, 2), (2, 3)] >>> path, cost = dtw_path_limited_warping_length([1, 2, 3], ... [1., 2., 2., 3., 4.], 5) >>> cost 1.0 >>> path [(0, 0), (1, 1), (1, 2), (2, 3), (2, 4)] See Also -------- dtw_limited_warping_length : Get the similarity score for DTW with limited warping path length dtw_path : Get both the matching path and the similarity score for DTW References ---------- .. [1] <NAME>, <NAME>, "Dynamic programming algorithm optimization for spoken word recognition," IEEE Transactions on Acoustics, Speech and Signal Processing, vol. 26(1), pp. 43--49, 1978. .. [2] <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME> Dynamic time warping under limited warping path length. Information Sciences, vol. 393, pp. 91--107, 2017. """ s1 = to_time_series(s1, remove_nans=True) s2 = to_time_series(s2, remove_nans=True) if max_length < max(s1.shape[0], s2.shape[0]): raise ValueError("Cannot find a path of length {} to align given " "time series.".format(max_length)) accumulated_costs = _limited_warping_length_cost(s1, s2, max_length) idx_pair = (s1.shape[0] - 1, s2.shape[0] - 1) optimal_length = -1 optimal_cost = numpy.inf for k, v in accumulated_costs[idx_pair].items(): if v < optimal_cost: optimal_cost = v optimal_length = k path = _return_path_limited_warping_length(accumulated_costs, idx_pair, optimal_length) return path, numpy.sqrt(optimal_cost) @njit() def _subsequence_cost_matrix(subseq, longseq): l1 = subseq.shape[0] l2 = longseq.shape[0] cum_sum = numpy.full((l1 + 1, l2 + 1), numpy.inf) cum_sum[0, :] = 0. for i in range(l1): for j in range(l2): cum_sum[i + 1, j + 1] = _local_squared_dist(subseq[i], longseq[j]) cum_sum[i + 1, j + 1] += min(cum_sum[i, j + 1], cum_sum[i + 1, j], cum_sum[i, j]) return cum_sum[1:, 1:] def subsequence_cost_matrix(subseq, longseq): """Compute the accumulated cost matrix score between a subsequence and a reference time series. Parameters ---------- subseq : array, shape = (sz1, d) Subsequence time series. longseq : array, shape = (sz2, d) Reference time series Returns ------- mat : array, shape = (sz1, sz2) Accumulated cost matrix. """ return _subsequence_cost_matrix(subseq, longseq) @njit() def _subsequence_path(acc_cost_mat, idx_path_end): sz1, sz2 = acc_cost_mat.shape path = [(sz1 - 1, idx_path_end)] while path[-1][0] != 0: i, j = path[-1] if i == 0: path.append((0, j - 1)) elif j == 0: path.append((i - 1, 0)) else: arr = numpy.array([acc_cost_mat[i - 1][j - 1], acc_cost_mat[i - 1][j], acc_cost_mat[i][j - 1]]) argmin = numpy.argmin(arr) if argmin == 0: path.append((i - 1, j - 1)) elif argmin == 1: path.append((i - 1, j)) else: path.append((i, j - 1)) return path[::-1] def subsequence_path(acc_cost_mat, idx_path_end): r"""Compute the optimal path through a accumulated cost matrix given the endpoint of the sequence. Parameters ---------- acc_cost_mat: array, shape = (sz1, sz2) The accumulated cost matrix comparing subsequence from a longer sequence. idx_path_end: int The end position of the matched subsequence in the longer sequence. Returns ------- path: list of tuples of integer pairs Matching path represented as a list of index pairs. In each pair, the first index corresponds to `subseq` and the second one corresponds to `longseq`. The startpoint of the Path is :math:`P_0 = (0, ?)` and it ends at :math:`P_L = (len(subseq)-1, idx\_path\_end)` Examples -------- >>> acc_cost_mat = numpy.array([[1., 0., 0., 1., 4.], ... [5., 1., 1., 0., 1.]]) >>> # calculate the globally optimal path >>> optimal_end_point = numpy.argmin(acc_cost_mat[-1, :]) >>> path = subsequence_path(acc_cost_mat, optimal_end_point) >>> path [(0, 2), (1, 3)] See Also -------- dtw_subsequence_path : Get the similarity score for DTW subsequence_cost_matrix: Calculate the required cost matrix """ return _subsequence_path(acc_cost_mat, idx_path_end) def dtw_subsequence_path(subseq, longseq): r"""Compute sub-sequence Dynamic Time Warping (DTW) similarity measure between a (possibly multidimensional) query and a long time series and return both the path and the similarity. DTW is computed as the Euclidean distance between aligned time series, i.e., if :math:`\pi` is the alignment path: .. math:: DTW(X, Y) = \sqrt{\sum_{(i, j) \in \pi} \|X_{i} - Y_{j}\|^2} Compared to traditional DTW, here, border constraints on admissible paths :math:`\pi` are relaxed such that :math:`\pi_0 = (0, ?)` and :math:`\pi_L = (N-1, ?)` where :math:`L` is the length of the considered path and :math:`N` is the length of the subsequence time series. It is not required that both time series share the same size, but they must be the same dimension. This implementation finds the best matching starting and ending positions for `subseq` inside `longseq`. Parameters ---------- subseq : array, shape = (sz1, d) A query time series. longseq : array, shape = (sz2, d) A reference (supposed to be longer than `subseq`) time series. Returns ------- list of integer pairs Matching path represented as a list of index pairs. In each pair, the first index corresponds to `subseq` and the second one corresponds to `longseq`. float Similarity score Examples -------- >>> path, dist = dtw_subsequence_path([2., 3.], [1., 2., 2., 3., 4.]) >>> path [(0, 2), (1, 3)] >>> dist 0.0 See Also -------- dtw : Get the similarity score for DTW subsequence_cost_matrix: Calculate the required cost matrix subsequence_path: Calculate a matching path manually """ subseq = to_time_series(subseq) longseq = to_time_series(longseq) acc_cost_mat = subsequence_cost_matrix(subseq=subseq, longseq=longseq) global_optimal_match = numpy.argmin(acc_cost_mat[-1, :]) path = subsequence_path(acc_cost_mat, global_optimal_match) return path, numpy.sqrt(acc_cost_mat[-1, :][global_optimal_match]) @njit() def sakoe_chiba_mask(sz1, sz2, radius=1): """Compute the Sakoe-Chiba mask. Parameters ---------- sz1 : int The size of the first time series sz2 : int The size of the second time series. radius : int The radius of the band. Returns ------- mask : array, shape = (sz1, sz2) Sakoe-Chiba mask. Examples -------- >>> sakoe_chiba_mask(4, 4, 1) array([[ 0., 0., inf, inf], [ 0., 0., 0., inf], [inf, 0., 0., 0.], [inf, inf, 0., 0.]]) >>> sakoe_chiba_mask(7, 3, 1) array([[ 0., 0., inf], [ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.], [ 0., 0., 0.], [inf, 0., 0.]]) """ mask = numpy.full((sz1, sz2), numpy.inf) if sz1 > sz2: width = sz1 - sz2 + radius for i in prange(sz2): lower = max(0, i - radius) upper = min(sz1, i + width) + 1 mask[lower:upper, i] = 0. else: width = sz2 - sz1 + radius for i in prange(sz1): lower = max(0, i - radius) upper = min(sz2, i + width) + 1 mask[i, lower:upper] = 0. return mask @njit() def _njit_itakura_mask(sz1, sz2, max_slope=2.): """Compute the Itakura mask without checking that the constraints are feasible. In most cases, you should use itakura_mask instead. Parameters ---------- sz1 : int The size of the first time series sz2 : int The size of the second time series. max_slope : float (default = 2) The maximum slope of the parallelogram. Returns ------- mask : array, shape = (sz1, sz2) Itakura mask. """ min_slope = 1 / float(max_slope) max_slope *= (float(sz1) / float(sz2)) min_slope *= (float(sz1) / float(sz2)) lower_bound = numpy.empty((2, sz2)) lower_bound[0] = min_slope * numpy.arange(sz2) lower_bound[1] = ((sz1 - 1) - max_slope * (sz2 - 1) + max_slope * numpy.arange(sz2)) lower_bound_ = numpy.empty(sz2) for i in prange(sz2): lower_bound_[i] = max(round(lower_bound[0, i], 2), round(lower_bound[1, i], 2)) lower_bound_ = numpy.ceil(lower_bound_) upper_bound = numpy.empty((2, sz2)) upper_bound[0] = max_slope * numpy.arange(sz2) upper_bound[1] = ((sz1 - 1) - min_slope * (sz2 - 1) + min_slope * numpy.arange(sz2)) upper_bound_ = numpy.empty(sz2) for i in prange(sz2): upper_bound_[i] = min(round(upper_bound[0, i], 2), round(upper_bound[1, i], 2)) upper_bound_ = numpy.floor(upper_bound_ + 1) mask = numpy.full((sz1, sz2), numpy.inf) for i in prange(sz2): mask[int(lower_bound_[i]):int(upper_bound_[i]), i] = 0. return mask def itakura_mask(sz1, sz2, max_slope=2.): """Compute the Itakura mask. Parameters ---------- sz1 : int The size of the first time series sz2 : int The size of the second time series. max_slope : float (default = 2) The maximum slope of the parallelogram. Returns ------- mask : array, shape = (sz1, sz2) Itakura mask. Examples -------- >>> itakura_mask(6, 6) array([[ 0., inf, inf, inf, inf, inf], [inf, 0., 0., inf, inf, inf], [inf, 0., 0., 0., inf, inf], [inf, inf, 0., 0., 0., inf], [inf, inf, inf, 0., 0., inf], [inf, inf, inf, inf, inf, 0.]]) """ mask = _njit_itakura_mask(sz1, sz2, max_slope=max_slope) # Post-check raise_warning = False for i in prange(sz1): if not numpy.any(numpy.isfinite(mask[i])): raise_warning = True break if not raise_warning: for j in prange(sz2): if not numpy.any(numpy.isfinite(mask[:, j])): raise_warning = True break if raise_warning: warnings.warn("'itakura_max_slope' constraint is unfeasible " "(ie. leads to no admissible path) for the " "provided time series sizes", RuntimeWarning) return mask def compute_mask(s1, s2, global_constraint=0, sakoe_chiba_radius=None, itakura_max_slope=None): """Compute the mask (region constraint). Parameters ---------- s1 : array A time series or integer. s2: array Another time series or integer. global_constraint : {0, 1, 2} (default: 0) Global constraint to restrict admissible paths for DTW: - "itakura" if 1 - "sakoe_chiba" if 2 - no constraint otherwise sakoe_chiba_radius : int or None (default: None) Radius to be used for Sakoe-Chiba band global constraint. If None and `global_constraint` is set to 2 (sakoe-chiba), a radius of 1 is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. itakura_max_slope : float or None (default: None) Maximum slope for the Itakura parallelogram constraint. If None and `global_constraint` is set to 1 (itakura), a maximum slope of 2. is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. Returns ------- mask : array Constraint region. """ # The output mask will be of shape (sz1, sz2) if isinstance(s1, int) and isinstance(s2, int): sz1, sz2 = s1, s2 else: sz1 = s1.shape[0] sz2 = s2.shape[0] if (global_constraint == 0 and sakoe_chiba_radius is not None and itakura_max_slope is not None): raise RuntimeWarning("global_constraint is not set for DTW, but both " "sakoe_chiba_radius and itakura_max_slope are " "set, hence global_constraint cannot be inferred " "and no global constraint will be used.") if global_constraint == 2 or (global_constraint == 0 and sakoe_chiba_radius is not None): if sakoe_chiba_radius is None: sakoe_chiba_radius = 1 mask = sakoe_chiba_mask(sz1, sz2, radius=sakoe_chiba_radius) elif global_constraint == 1 or (global_constraint == 0 and itakura_max_slope is not None): if itakura_max_slope is None: itakura_max_slope = 2. mask = itakura_mask(sz1, sz2, max_slope=itakura_max_slope) else: mask = numpy.zeros((sz1, sz2)) return mask def cdist_dtw(dataset1, dataset2=None, global_constraint=None, sakoe_chiba_radius=None, itakura_max_slope=None, n_jobs=None, verbose=0): r"""Compute cross-similarity matrix using Dynamic Time Warping (DTW) similarity measure. DTW is computed as the Euclidean distance between aligned time series, i.e., if :math:`\pi` is the alignment path: .. math:: DTW(X, Y) = \sqrt{\sum_{(i, j) \in \pi} \|X_{i} - Y_{j}\|^2} Note that this formula is still valid for the multivariate case. It is not required that time series share the same size, but they must be the same dimension. DTW was originally presented in [1]_ and is discussed in more details in our :ref:`dedicated user-guide page <dtw>`. Parameters ---------- dataset1 : array-like A dataset of time series dataset2 : array-like (default: None) Another dataset of time series. If `None`, self-similarity of `dataset1` is returned. global_constraint : {"itakura", "sakoe_chiba"} or None (default: None) Global constraint to restrict admissible paths for DTW. sakoe_chiba_radius : int or None (default: None) Radius to be used for Sakoe-Chiba band global constraint. If None and `global_constraint` is set to "sakoe_chiba", a radius of 1 is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. itakura_max_slope : float or None (default: None) Maximum slope for the Itakura parallelogram constraint. If None and `global_constraint` is set to "itakura", a maximum slope of 2. is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. n_jobs : int or None, optional (default=None) The number of jobs to run in parallel. ``None`` means 1 unless in a :obj:`joblib.parallel_backend` context. ``-1`` means using all processors. See scikit-learns' `Glossary <https://scikit-learn.org/stable/glossary.html#term-n-jobs>`__ for more details. verbose : int, optional (default=0) The verbosity level: if non zero, progress messages are printed. Above 50, the output is sent to stdout. The frequency of the messages increases with the verbosity level. If it more than 10, all iterations are reported. `Glossary <https://joblib.readthedocs.io/en/latest/parallel.html#parallel-reference-documentation>`__ for more details. Returns ------- cdist : numpy.ndarray Cross-similarity matrix Examples -------- >>> cdist_dtw([[1, 2, 2, 3], [1., 2., 3., 4.]]) array([[0., 1.], [1., 0.]]) >>> cdist_dtw([[1, 2, 2, 3], [1., 2., 3., 4.]], [[1, 2, 3], [2, 3, 4, 5]]) array([[0. , 2.44948974], [1. , 1.41421356]]) See Also -------- dtw : Get DTW similarity score References ---------- .. [1] <NAME>, <NAME>, "Dynamic programming algorithm optimization for spoken word recognition," IEEE Transactions on Acoustics, Speech and Signal Processing, vol. 26(1), pp. 43--49, 1978. """ # noqa: E501 return _cdist_generic(dist_fun=dtw, dataset1=dataset1, dataset2=dataset2, n_jobs=n_jobs, verbose=verbose, compute_diagonal=False, global_constraint=global_constraint, sakoe_chiba_radius=sakoe_chiba_radius, itakura_max_slope=itakura_max_slope) def lb_keogh(ts_query, ts_candidate=None, radius=1, envelope_candidate=None): r"""Compute LB_Keogh. LB_Keogh was originally presented in [1]_. Parameters ---------- ts_query : array-like Query time-series to compare to the envelope of the candidate. ts_candidate : array-like or None (default: None) Candidate time-series. None means the envelope is provided via `envelope_candidate` parameter and hence does not need to be computed again. radius : int (default: 1) Radius to be used for the envelope generation (the envelope at time index i will be generated based on all observations from the candidate time series at indices comprised between i-radius and i+radius). Not used if `ts_candidate` is None. envelope_candidate: pair of array-like (envelope_down, envelope_up) or None (default: None) Pre-computed envelope of the candidate time series. If set to None, it is computed based on `ts_candidate`. Notes ----- This method requires a `ts_query` and `ts_candidate` (or `envelope_candidate`, depending on the call) to be of equal size. Returns ------- float Distance between the query time series and the envelope of the candidate time series. Examples -------- >>> ts1 = [1, 2, 3, 2, 1] >>> ts2 = [0, 0, 0, 0, 0] >>> env_low, env_up = lb_envelope(ts1, radius=1) >>> lb_keogh(ts_query=ts2, ... envelope_candidate=(env_low, env_up)) # doctest: +ELLIPSIS 2.8284... >>> lb_keogh(ts_query=ts2, ... ts_candidate=ts1, ... radius=1) # doctest: +ELLIPSIS 2.8284... See also -------- lb_envelope : Compute LB_Keogh-related envelope References ---------- .. [1] <NAME>. Exact indexing of dynamic time warping. In International Conference on Very Large Data Bases, 2002. pp 406-417. """ if ts_candidate is None: envelope_down, envelope_up = envelope_candidate else: ts_candidate = to_time_series(ts_candidate) assert ts_candidate.shape[1] == 1, \ "LB_Keogh is available only for monodimensional time series" envelope_down, envelope_up = lb_envelope(ts_candidate, radius) ts_query = to_time_series(ts_query) assert ts_query.shape[1] == 1, \ "LB_Keogh is available only for monodimensional time series" indices_up = ts_query[:, 0] > envelope_up[:, 0] indices_down = ts_query[:, 0] < envelope_down[:, 0] return numpy.sqrt(numpy.linalg.norm(ts_query[indices_up, 0] - envelope_up[indices_up, 0]) ** 2 + numpy.linalg.norm(ts_query[indices_down, 0] - envelope_down[indices_down, 0]) ** 2) @njit() def njit_lb_envelope(time_series, radius): sz, d = time_series.shape enveloppe_up = numpy.empty((sz, d)) enveloppe_down = numpy.empty((sz, d)) for i in prange(sz): min_idx = i - radius max_idx = i + radius + 1 if min_idx < 0: min_idx = 0 if max_idx > sz: max_idx = sz for di in prange(d): enveloppe_down[i, di] = numpy.min(time_series[min_idx:max_idx, di]) enveloppe_up[i, di] = numpy.max(time_series[min_idx:max_idx, di]) return enveloppe_down, enveloppe_up def lb_envelope(ts, radius=1): r"""Compute time-series envelope as required by LB_Keogh. LB_Keogh was originally presented in [1]_. Parameters ---------- ts : array-like Time-series for which the envelope should be computed. radius : int (default: 1) Radius to be used for the envelope generation (the envelope at time index i will be generated based on all observations from the time series at indices comprised between i-radius and i+radius). Returns ------- array-like Lower-side of the envelope. array-like Upper-side of the envelope. Examples -------- >>> ts1 = [1, 2, 3, 2, 1] >>> env_low, env_up = lb_envelope(ts1, radius=1) >>> env_low array([[1.], [1.], [2.], [1.], [1.]]) >>> env_up array([[2.], [3.], [3.], [3.], [2.]]) See also -------- lb_keogh : Compute LB_Keogh similarity References ---------- .. [1] <NAME>. Exact indexing of dynamic time warping. In International Conference on Very Large Data Bases, 2002. pp 406-417. """ return njit_lb_envelope(to_time_series(ts), radius=radius) @njit(nogil=True) def njit_lcss_accumulated_matrix(s1, s2, eps, mask): """Compute the longest common subsequence similarity score between two time series. Parameters ---------- s1 : array, shape = (sz1,) First time series. s2 : array, shape = (sz2,) Second time series. eps : float Matching threshold. mask : array, shape = (sz1, sz2) Mask. Unconsidered cells must have infinite values. Returns ------- lcss_score : float Longest Common Subsequence similarity score between both time series. """ l1 = s1.shape[0] l2 = s2.shape[0] acc_cost_mat = numpy.full((l1 + 1, l2 + 1), 0) for i in range(1, l1 + 1): for j in range(1, l2 + 1): if numpy.isfinite(mask[i - 1, j - 1]): if numpy.sqrt(_local_squared_dist(s1[i - 1], s2[j - 1])) <= eps: acc_cost_mat[i][j] = 1 + acc_cost_mat[i - 1][j - 1] else: acc_cost_mat[i][j] = max(acc_cost_mat[i][j - 1], acc_cost_mat[i - 1][j]) return acc_cost_mat @njit(nogil=True) def njit_lcss(s1, s2, eps, mask): """Compute the longest common subsequence score between two time series. Parameters ---------- s1 : array, shape = (sz1,) First time series. s2 : array, shape = (sz2,) Second time series eps : float (default: 1.) Maximum matching distance threshold. mask : array, shape = (sz1, sz2) Mask. Unconsidered cells must have infinite values. Returns ------- lcss_score : float Longest Common Subsquence score between both time series. """ l1 = s1.shape[0] l2 = s2.shape[0] acc_cost_mat = njit_lcss_accumulated_matrix(s1, s2, eps, mask) return float(acc_cost_mat[-1][-1]) / min([l1, l2]) def lcss(s1, s2, eps=1., global_constraint=None, sakoe_chiba_radius=None, itakura_max_slope=None): r"""Compute the Longest Common Subsequence (LCSS) similarity measure between (possibly multidimensional) time series and return the similarity. LCSS is computed by matching indexes that are met up until the eps threshold, so it leaves some points unmatched and focuses on the similar parts of two sequences. The matching can occur even if the time indexes are different, regulated through the delta parameter that defines how far it can go. To retrieve a meaningful similarity value from the length of the longest common subsequence, the percentage of that value regarding the length of the shortest time series is returned. According to this definition, the values returned by LCSS range from 0 to 1, the highest value taken when two time series fully match, and vice-versa. It is not required that both time series share the same size, but they must be the same dimension. LCSS was originally presented in [1]_ and is discussed in more details in our :ref:`dedicated user-guide page <lcss>`. Note ---- Contrary to Dynamic Time Warping and variants, an LCSS path does not need to be contiguous. Parameters ---------- s1 A time series. s2 Another time series. eps : float (default: 1.) Maximum matching distance threshold. global_constraint : {"itakura", "sakoe_chiba"} or None (default: None) Global constraint to restrict admissible paths for LCSS. sakoe_chiba_radius : int or None (default: None) Radius to be used for Sakoe-Chiba band global constraint. If None and `global_constraint` is set to "sakoe_chiba", a radius of 1 is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. itakura_max_slope : float or None (default: None) Maximum slope for the Itakura parallelogram constraint. If None and `global_constraint` is set to "itakura", a maximum slope of 2. is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. Returns ------- float Similarity score Examples -------- >>> lcss([1, 2, 3], [1., 2., 2., 3.]) 1.0 >>> lcss([1, 2, 3], [1., 2., 2., 4., 7.]) 1.0 >>> lcss([1, 2, 3], [1., 2., 2., 2., 3.], eps=0) 1.0 >>> lcss([1, 2, 3], [-2., 5., 7.], eps=3) 0.6666666666666666 See Also -------- lcss_path: Get both the matching path and the similarity score for LCSS References ---------- .. [1] <NAME>, <NAME>, and <NAME>. 2002. "Discovering Similar Multidimensional Trajectories", In Proceedings of the 18th International Conference on Data Engineering (ICDE '02). IEEE Computer Society, USA, 673. """ s1 = to_time_series(s1, remove_nans=True) s2 = to_time_series(s2, remove_nans=True) mask = compute_mask( s1, s2, GLOBAL_CONSTRAINT_CODE[global_constraint], sakoe_chiba_radius=sakoe_chiba_radius, itakura_max_slope=itakura_max_slope) return njit_lcss(s1, s2, eps, mask) @njit() def _return_lcss_path(s1, s2, eps, mask, acc_cost_mat, sz1, sz2): i, j = (sz1, sz2) path = [] while i > 0 and j > 0: if numpy.isfinite(mask[i - 1, j - 1]): if numpy.sqrt(_local_squared_dist(s1[i - 1], s2[j - 1])) <= eps: path.append((i - 1, j - 1)) i, j = (i - 1, j - 1) elif acc_cost_mat[i - 1][j] > acc_cost_mat[i][j-1]: i = i - 1 else: j = j - 1 return path[::-1] @njit() def _return_lcss_path_from_dist_matrix(dist_matrix, eps, mask, acc_cost_mat, sz1, sz2): i, j = (sz1, sz2) path = [] while i > 0 and j > 0: if numpy.isfinite(mask[i - 1, j - 1]): if dist_matrix[i - 1, j - 1] <= eps: path.append((i - 1, j - 1)) i, j = (i - 1, j - 1) elif acc_cost_mat[i - 1][j] > acc_cost_mat[i][j-1]: i = i - 1 else: j = j - 1 return path[::-1] def lcss_path(s1, s2, eps=1, global_constraint=None, sakoe_chiba_radius=None, itakura_max_slope=None): r"""Compute the Longest Common Subsequence (LCSS) similarity measure between (possibly multidimensional) time series and return both the path and the similarity. LCSS is computed by matching indexes that are met up until the eps threshold, so it leaves some points unmatched and focuses on the similar parts of two sequences. The matching can occur even if the time indexes are different, which can be regulated through the sakoe chiba radius parameter that defines how far it can go. To retrieve a meaningful similarity value from the length of the longest common subsequence, the percentage of that value regarding the length of the shortest time series is returned. According to this definition, the values returned by LCSS range from 0 to 1, the highest value taken when two time series fully match, and vice-versa. It is not required that both time series share the same size, but they must be the same dimension. LCSS was originally presented in [1]_ and is discussed in more details in our :ref:`dedicated user-guide page <lcss>`. Note ---- Contrary to Dynamic Time Warping and variants, an LCSS path does not need to be contiguous. Parameters ---------- s1 A time series. s2 Another time series. eps : float (default: 1.) Maximum matching distance threshold. global_constraint : {"itakura", "sakoe_chiba"} or None (default: None) Global constraint to restrict admissible paths for LCSS. sakoe_chiba_radius : int or None (default: None) Radius to be used for Sakoe-Chiba band global constraint. If None and `global_constraint` is set to "sakoe_chiba", a radius of 1 is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. itakura_max_slope : float or None (default: None) Maximum slope for the Itakura parallelogram constraint. If None and `global_constraint` is set to "itakura", a maximum slope of 2. is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. Returns ------- list of integer pairs Matching path represented as a list of index pairs. In each pair, the first index corresponds to s1 and the second one corresponds to s2 float Similarity score Examples -------- >>> path, sim = lcss_path([1., 2., 3.], [1., 2., 2., 3.]) >>> path [(0, 1), (1, 2), (2, 3)] >>> sim 1.0 >>> lcss_path([1., 2., 3.], [1., 2., 2., 4.])[1] 1.0 See Also -------- lcss : Get only the similarity score for LCSS lcss_path_from_metric: Compute LCSS using a user-defined distance metric References ---------- .. [1] <NAME>, <NAME>, and <NAME>. 2002. "Discovering Similar Multidimensional Trajectories", In Proceedings of the 18th International Conference on Data Engineering (ICDE '02). IEEE Computer Society, USA, 673. """ s1 = to_time_series(s1, remove_nans=True) s2 = to_time_series(s2, remove_nans=True) mask = compute_mask( s1, s2, GLOBAL_CONSTRAINT_CODE[global_constraint], sakoe_chiba_radius, itakura_max_slope) l1 = s1.shape[0] l2 = s2.shape[0] acc_cost_mat = njit_lcss_accumulated_matrix(s1, s2, eps, mask) path = _return_lcss_path(s1, s2, eps, mask, acc_cost_mat, l1, l2) return path, float(acc_cost_mat[-1][-1]) / min([l1, l2]) def njit_lcss_accumulated_matrix_from_dist_matrix(dist_matrix, eps, mask): """Compute the accumulated cost matrix score between two time series using a precomputed distance matrix. Parameters ---------- dist_matrix : array, shape = (sz1, sz2) Array containing the pairwise distances. eps : float (default: 1.) Maximum matching distance threshold. mask : array, shape = (sz1, sz2) Mask. Unconsidered cells must have infinite values. Returns ------- mat : array, shape = (sz1, sz2) Accumulated cost matrix. """ l1, l2 = dist_matrix.shape acc_cost_mat = numpy.full((l1 + 1, l2 + 1), 0) for i in range(1, l1 + 1): for j in range(1, l2 + 1): if numpy.isfinite(mask[i - 1, j - 1]): if dist_matrix[i - 1, j - 1] <= eps: acc_cost_mat[i][j] = 1 + acc_cost_mat[i - 1][j - 1] else: acc_cost_mat[i][j] = max(acc_cost_mat[i][j - 1], acc_cost_mat[i - 1][j]) return acc_cost_mat def lcss_path_from_metric(s1, s2=None, eps=1, metric="euclidean", global_constraint=None, sakoe_chiba_radius=None, itakura_max_slope=None, **kwds): r"""Compute the Longest Common Subsequence (LCSS) similarity measure between (possibly multidimensional) time series using a distance metric defined by the user and return both the path and the similarity. Having the length of the longest commom subsequence between two time-series, the similarity is computed as the percentage of that value regarding the length of the shortest time series. It is not required that both time series share the same size, but they must be the same dimension. LCSS was originally presented in [1]_. Valid values for metric are the same as for scikit-learn `pairwise_distances`_ function i.e. a string (e.g. "euclidean", "sqeuclidean", "hamming") or a function that is used to compute the pairwise distances. See `scikit`_ and `scipy`_ documentations for more information about the available metrics. Parameters ---------- s1 : array, shape = (sz1, d) if metric!="precomputed", (sz1, sz2) otherwise A time series or an array of pairwise distances between samples. s2 : array, shape = (sz2, d), optional (default: None) A second time series, only allowed if metric != "precomputed". eps : float (default: 1.) Maximum matching distance threshold. metric : string or callable (default: "euclidean") Function used to compute the pairwise distances between each points of `s1` and `s2`. If metric is "precomputed", `s1` is assumed to be a distance matrix. If metric is an other string, it must be one of the options compatible with sklearn.metrics.pairwise_distances. Alternatively, if metric is a callable function, it is called on pairs of rows of `s1` and `s2`. The callable should take two 1 dimensional arrays as input and return a value indicating the distance between them. global_constraint : {"itakura", "sakoe_chiba"} or None (default: None) Global constraint to restrict admissible paths for LCSS. sakoe_chiba_radius : int or None (default: None) Radius to be used for Sakoe-Chiba band global constraint. If None and `global_constraint` is set to "sakoe_chiba", a radius of 1 is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. itakura_max_slope : float or None (default: None) Maximum slope for the Itakura parallelogram constraint. If None and `global_constraint` is set to "itakura", a maximum slope of 2. is used. If both `sakoe_chiba_radius` and `itakura_max_slope` are set, `global_constraint` is used to infer which constraint to use among the two. In this case, if `global_constraint` corresponds to no global constraint, a `RuntimeWarning` is raised and no global constraint is used. **kwds Additional arguments to pass to sklearn pairwise_distances to compute the pairwise distances. Returns ------- list of integer pairs Matching path represented as a list of index pairs. In each pair, the first index corresponds to s1 and the second one corresponds to s2. float Similarity score. Examples -------- Lets create 2 numpy arrays to wrap: >>> import numpy as np >>> rng = np.random.RandomState(0) >>> s1, s2 = rng.rand(5, 2), rng.rand(6, 2) The wrapping can be done by passing a string indicating the metric to pass to scikit-learn pairwise_distances: >>> lcss_path_from_metric(s1, s2, ... metric="sqeuclidean") # doctest: +ELLIPSIS ([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], 1.0) Or by defining a custom distance function: >>> sqeuclidean = lambda x, y: np.sum((x-y)**2) >>> lcss_path_from_metric(s1, s2, metric=sqeuclidean) # doctest: +ELLIPSIS ([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], 1.0) Or by using a precomputed distance matrix as input: >>> from sklearn.metrics.pairwise import pairwise_distances >>> dist_matrix = pairwise_distances(s1, s2, metric="sqeuclidean") >>> lcss_path_from_metric(dist_matrix, ... metric="precomputed") # doctest: +ELLIPSIS ([(0, 1), (1, 2), (2, 3), (3, 4), (4, 5)], 1.0) Notes -------- By using a squared euclidean distance metric as shown above, the output path and similarity is the same as the one obtained by using lcss_path (which uses the euclidean distance) simply because with the sum of squared distances the matching threshold is still not reached. Also, contrary to Dynamic Time Warping and variants, an LCSS path does not need to be contiguous. See Also -------- lcss: Get only the similarity score for LCSS lcss_path : Get both the matching path and the similarity score for LCSS References ---------- .. [1] <NAME>, <NAME>, and <NAME>. 2002. "Discovering Similar Multidimensional Trajectories", In Proceedings of the 18th International Conference on Data Engineering (ICDE '02). IEEE Computer Society, USA, 673. .. _pairwise_distances: https://scikit-learn.org/stable/modules/generated/sklearn.metrics.pairwise_distances.html .. _scikit: https://scikit-learn.org/stable/modules/metrics.html .. _scipy: https://docs.scipy.org/doc/scipy/reference/generated/scipy.spatial.distance.pdist.html """ # noqa: E501 if metric == "precomputed": # Pairwise distance given as input sz1, sz2 = s1.shape mask = compute_mask( sz1, sz2, GLOBAL_CONSTRAINT_CODE[global_constraint], sakoe_chiba_radius, itakura_max_slope ) dist_mat = s1 else: s1 = to_time_series(s1, remove_nans=True) s2 = to_time_series(s2, remove_nans=True) sz1 = s1.shape[0] sz2 = s2.shape[0] mask = compute_mask( s1, s2, GLOBAL_CONSTRAINT_CODE[global_constraint], sakoe_chiba_radius, itakura_max_slope ) dist_mat = pairwise_distances(s1, s2, metric=metric, **kwds) acc_cost_mat = njit_lcss_accumulated_matrix_from_dist_matrix( dist_mat, eps, mask) path = _return_lcss_path_from_dist_matrix( dist_mat, eps, acc_cost_mat, mask, sz1, sz2) return path, float(acc_cost_mat[-1][-1]) / min([sz1, sz2])
28,302
319
<reponame>Celebrate-future/openimaj /** * Copyright (c) 2011, The University of Southampton and the individual contributors. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of the University of Southampton nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.openimaj.ml.clustering.assignment.hard; import org.openimaj.citation.annotation.Reference; import org.openimaj.citation.annotation.ReferenceType; import org.openimaj.ml.clustering.assignment.HardAssigner; import org.openimaj.util.pair.IntFloatPair; /** * An assigner that wraps another hard assigner and only produces valid * assignments if the closest cluster is within (or outside) of a given * threshold distance. * <p> * Invalid assignments are marked by a cluster id of -1, and (if applicable) * distance of {@link Float#NaN}. Users of this class must check the assignments * and filter as necessary. * * @author <NAME> (<EMAIL>) * @param <DATATYPE> * the primitive array datatype which represents a centroid of this * cluster. */ @Reference( author = { "<NAME>", "<NAME>", "<NAME>", "<NAME>" }, title = "Constrained Keypoint Quantization: Towards Better Bag-of-Words Model for Large-scale Multimedia Retrieval", type = ReferenceType.Inproceedings, year = "2012", booktitle = "ACM International Conference on Multimedia Retrieval", customData = { "location", "Hong Kong, China" }) public class ConstrainedFloatAssigner<DATATYPE> implements HardAssigner<DATATYPE, float[], IntFloatPair> { HardAssigner<DATATYPE, float[], IntFloatPair> internalAssigner; boolean allowIfGreater = false; float threshold; /** * Construct the ConstrainedFloatAssigner with the given assigner and * threshold. Assignments will be rejected if the distance given by the * internal assigner are greater than the threshold. * * @param internalAssigner * the internal assigner for computing distances. * @param threshold * the threshold at which assignments are rejected. */ public ConstrainedFloatAssigner(HardAssigner<DATATYPE, float[], IntFloatPair> internalAssigner, float threshold) { this.threshold = threshold; } /** * Construct the ConstrainedFloatAssigner with the given assigner and * threshold. The greater flag determines if assignments should be rejected * if the distance generated by the internal assigner is greater than the * threshold (false) or less than the threshold (true). * * @param internalAssigner * the internal assigner for computing distances. * @param threshold * the threshold at which assignments are rejected. * @param greater * if true distances less than the threshold are rejected; if * false then distances greater than the threshold are rejected. */ public ConstrainedFloatAssigner(HardAssigner<DATATYPE, float[], IntFloatPair> internalAssigner, float threshold, boolean greater) { this.allowIfGreater = greater; this.threshold = threshold; } private boolean allow(float distance) { if (allowIfGreater) { return distance > threshold; } return distance < threshold; } @Override public int[] assign(DATATYPE[] data) { final int[] indices = new int[data.length]; final float[] distances = new float[data.length]; assignDistance(data, indices, distances); return indices; } @Override public int assign(DATATYPE data) { return assignDistance(data).first; } @Override public void assignDistance(DATATYPE[] data, int[] indices, float[] distances) { internalAssigner.assignDistance(data, indices, distances); for (int i = 0; i < data.length; i++) { if (!allow(distances[i])) { distances[i] = Float.NaN; indices[i] = -1; } } } @Override public IntFloatPair assignDistance(DATATYPE data) { final IntFloatPair res = internalAssigner.assignDistance(data); if (!allow(res.second)) { res.second = Float.NaN; res.first = -1; } return res; } @Override public int size() { return this.internalAssigner.size(); } @Override public int numDimensions() { return this.internalAssigner.numDimensions(); } }
1,757
322
package com.imooc.lib_common_ui.delegate.web.event; import android.util.Log; public class UndefinedEvent extends Event { @Override public String execute(String params) { Log.d("",""); return null; } }
73
373
<filename>bundle/src/test/java/com/adobe/acs/commons/workflow/synthetic/impl/cq/SyntheticWorkflowSessionTest.java /* * #%L * ACS AEM Commons Bundle * %% * Copyright (C) 2016 Adobe * %% * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * #L% */ package com.adobe.acs.commons.workflow.synthetic.impl.cq; import com.adobe.acs.commons.workflow.synthetic.cq.WrappedSyntheticWorkItem; import com.adobe.acs.commons.workflow.synthetic.impl.SyntheticWorkflowData; import com.adobe.acs.commons.workflow.synthetic.impl.SyntheticWorkflowRunnerImpl; import com.adobe.acs.commons.workflow.synthetic.impl.cq.exceptions.SyntheticRestartWorkflowException; import com.adobe.acs.commons.workflow.synthetic.impl.cq.exceptions.SyntheticTerminateWorkflowException; import com.day.cq.workflow.exec.Route; import com.day.cq.workflow.exec.WorkItem; import com.day.cq.workflow.exec.filter.WorkItemFilter; import com.day.cq.workflow.model.WorkflowModel; import com.day.cq.workflow.model.WorkflowModelFilter; import org.apache.jackrabbit.api.security.user.Authorizable; import org.apache.sling.api.resource.NonExistingResource; import org.apache.sling.api.resource.Resource; import org.apache.sling.api.resource.ResourceResolver; import org.junit.Assert; import org.junit.Before; import org.junit.Test; import org.junit.runner.RunWith; import org.mockito.Mock; import org.mockito.junit.MockitoJUnitRunner; import javax.jcr.Session; import java.lang.reflect.Proxy; import java.util.Dictionary; import java.util.HashMap; import java.util.Hashtable; import java.util.Map; @RunWith(MockitoJUnitRunner.class) public class SyntheticWorkflowSessionTest { private static final String UNSUPPORTED_EXCEPTION = "Operation not supported by Synthetic Workflow"; @Mock ResourceResolver resourceResolver; @Mock SyntheticWorkflowSession session; @Mock SyntheticWorkflowData workflowData; @Mock SyntheticWorkflow syntheticWorkflow; @Mock WorkItem wrappedWorkItem; @Mock WorkItemFilter workItemFilter; @Mock Route route; @Mock WorkflowModelFilter workflowModelFilter; @Before public void setUp() { session = new SyntheticWorkflowSession(new SyntheticWorkflowRunnerImpl(), resourceResolver.adaptTo(Session.class)); workflowData = new SyntheticWorkflowData("JCR_PATH", "/content/test"); workflowData.getMetaDataMap().put("cat", "meow"); workflowData.getMetaDataMap().put("bird", "ka-kaw"); syntheticWorkflow = new SyntheticWorkflow("test", workflowData); SyntheticWorkItem syntheticWorkItem = SyntheticWorkItem.createSyntheticWorkItem(syntheticWorkflow.getWorkflowData()); wrappedWorkItem = (WorkItem) Proxy.newProxyInstance(WrappedSyntheticWorkItem.class.getClassLoader(), new Class[] { WorkItem.class, WrappedSyntheticWorkItem.class }, syntheticWorkItem); SyntheticRoute syntheticRoute = new SyntheticRoute(false); route = (Route) syntheticRoute; workItemFilter = new WorkItemFilter() { @Override public boolean doInclude(WorkItem workItem) { return false; } }; workflowModelFilter = new WorkflowModelFilter() { @Override public boolean doInclude(WorkflowModel workflowModel) { return false; } }; } @Test public void test_updateWorkflowData() throws Exception { workflowData.getMetaDataMap().put("dog", "woof"); session.updateWorkflowData(syntheticWorkflow, workflowData); // This test is a bit strange since the maps should always be in sync; this updateWorkflowData simply updates itself with itself // This is to mimic CQ Workflow behavior which has to manage this persistence via JCR nodes Assert.assertEquals(3, workflowData.getMetaDataMap().size()); Assert.assertEquals("woof", workflowData.getMetaDataMap().get("dog")); } @Test public void test_getWorkflowService() throws Exception { Assert.assertNotNull(session.getWorkflowService()); } @Test public void test_getSession() throws Exception { Assert.assertNull(session.getSession()); } @Test public void test_getUser() throws Exception { Assert.assertNull(session.getUser()); } @Test public void test_isSuperuser() throws Exception { Assert.assertTrue(session.isSuperuser()); } @Test public void test_getRoutes() throws Exception { Assert.assertNotNull(session.getRoutes(wrappedWorkItem)); } @Test public void test_getRoutesWithBoolean() throws Exception { Assert.assertNotNull(session.getRoutes(wrappedWorkItem, false)); } @Test public void test_getBackRoutes() throws Exception { Assert.assertNotNull(session.getBackRoutes(wrappedWorkItem)); } @Test public void test_getBackRoutesWithBoolean() throws Exception { Assert.assertNotNull(session.getBackRoutes(wrappedWorkItem, false)); } @Test public void test_newWorkflowData() throws Exception { Assert.assertNotNull(session.newWorkflowData("test", "")); } @Test public void test_getDelegatees() throws Exception { Assert.assertNotNull(session.getDelegatees(wrappedWorkItem)); } @Test(expected = SyntheticTerminateWorkflowException.class) public void test_terminateWorkflow() throws Exception { final SyntheticWorkflowData workflowData = new SyntheticWorkflowData("JCR_PATH", "/content/test"); workflowData.getMetaDataMap().put("cat", "meow"); workflowData.getMetaDataMap().put("bird", "ka-kaw"); SyntheticWorkflow workflow = new SyntheticWorkflow("test", workflowData); session.terminateWorkflow(workflow); } // unsupported operations tests @Test(expected = UnsupportedOperationException.class) public void test_evaluate() throws Exception { final SyntheticWorkflowData workflowData = new SyntheticWorkflowData("JCR_PATH", "/content/test"); Assert.assertSame(UNSUPPORTED_EXCEPTION, session.evaluate(workflowData, "test")); } @Test(expected = UnsupportedOperationException.class) public void test_createNewModel() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.createNewModel("test")); } @Test(expected = UnsupportedOperationException.class) public void test_createNewModelTwoStrings() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.createNewModel("test", "test")); } @Test(expected = UnsupportedOperationException.class) public void test_complete() throws Exception { session.complete(wrappedWorkItem, route); } @Test(expected = SyntheticRestartWorkflowException.class) public void test_restartWorkflow() throws Exception { session.restartWorkflow(syntheticWorkflow); } @Test(expected = UnsupportedOperationException.class) public void test_deployModel() throws Exception { session.deployModel(syntheticWorkflow.getWorkflowModel()); } @Test(expected = UnsupportedOperationException.class) public void test_deleteModel() throws Exception { session.deleteModel("empty"); } @Test(expected = UnsupportedOperationException.class) public void test_getModels() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getModels()); } @Test(expected = UnsupportedOperationException.class) public void test_getModelsWithFilter() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getModels(workflowModelFilter)); } @Test(expected = UnsupportedOperationException.class) public void test_getModelsLongLong() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getModels(1L, 1L)); } @Test(expected = UnsupportedOperationException.class) public void test_getModelsLongLongWithFilter() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getModels(1L, 1L, workflowModelFilter)); } @Test(expected = NullPointerException.class) public void test_getModel() throws Exception { Assert.assertNotNull(session.getModel("test")); } @Test(expected = UnsupportedOperationException.class) public void test_getModelTwoStrings() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getModel("test", "test")); } @Test(expected = UnsupportedOperationException.class) public void test_getActiveWorkItems() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getActiveWorkItems()); } @Test(expected = UnsupportedOperationException.class) public void test_getActiveWorkItemsTwoLongs() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getActiveWorkItems(1L, 1L)); } @Test(expected = UnsupportedOperationException.class) public void test_getActiveWorkItemsTwoLongsWithFilter() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getActiveWorkItems(1L, 1L, workItemFilter)); } @Test(expected = UnsupportedOperationException.class) public void test_getAllWorkItems() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getAllWorkItems()); } @Test(expected = UnsupportedOperationException.class) public void test_getAllWorkItemsTwoLongs() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getAllWorkItems(1L, 1L)); } @Test(expected = UnsupportedOperationException.class) public void test_getWorkItem() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getWorkItem("test")); } @Test(expected = UnsupportedOperationException.class) public void test_getWorkflows() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getWorkflows(new String[]{})); } @Test(expected = UnsupportedOperationException.class) public void test_getWorkflowsLongLong() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getWorkflows(new String[]{}, 1L, 1L)); } @Test(expected = UnsupportedOperationException.class) public void test_getAllWorkflows() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getAllWorkflows()); } @Test(expected = UnsupportedOperationException.class) public void test_getWorkflow() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getWorkflow("test")); } @Test(expected = UnsupportedOperationException.class) public void test_delegateWorkItem() throws Exception { Resource resource = resourceResolver.getResource("/content/test"); if (resource != null) { Authorizable authorizable = resource.adaptTo(Authorizable.class); session.delegateWorkItem(wrappedWorkItem, authorizable); } throw new UnsupportedOperationException("Unsupported & null resource"); } @Test(expected = UnsupportedOperationException.class) public void test_getHistory() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.getHistory(syntheticWorkflow)); } @Test(expected = UnsupportedOperationException.class) public void test_startWorkflow() throws Exception { Assert.assertSame(UNSUPPORTED_EXCEPTION, session.startWorkflow(syntheticWorkflow.getWorkflowModel(), workflowData)); } @Test(expected = UnsupportedOperationException.class) public void test_startWorkflowWithDictionary() throws Exception { Dictionary<String, String> stringStringDictionary = new Hashtable<>(); Assert.assertSame(UNSUPPORTED_EXCEPTION, session.startWorkflow(syntheticWorkflow.getWorkflowModel(), workflowData, stringStringDictionary)); } @Test(expected = UnsupportedOperationException.class) public void test_startWorkflowWithMap() throws Exception { Map<String, Object> stringObjectMap = new HashMap<>(); Assert.assertSame(UNSUPPORTED_EXCEPTION, session.startWorkflow(syntheticWorkflow.getWorkflowModel(), workflowData, stringObjectMap)); } @Test(expected = UnsupportedOperationException.class) public void test_resumeWorkflow() throws Exception { session.resumeWorkflow(syntheticWorkflow); } @Test(expected = UnsupportedOperationException.class) public void test_suspendWorkflow() throws Exception { session.suspendWorkflow(syntheticWorkflow); } @Test(expected = UnsupportedOperationException.class) public void test_logout() throws Exception { session.logout(); } }
4,561
530
package org.carlspring.strongbox.users.service; import java.util.Arrays; import java.util.HashSet; import java.util.stream.Collectors; import javax.inject.Inject; import org.carlspring.strongbox.config.DataServiceConfig; import org.carlspring.strongbox.config.UsersConfig; import org.carlspring.strongbox.users.dto.User; import org.carlspring.strongbox.users.dto.UserDto; import org.carlspring.strongbox.users.service.impl.EncodedPasswordUser; import org.carlspring.strongbox.users.service.impl.YamlUserService.Yaml; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import org.springframework.boot.test.context.SpringBootTest; import org.springframework.security.crypto.password.PasswordEncoder; import org.springframework.test.context.ActiveProfiles; import org.springframework.test.context.ContextConfiguration; import static org.assertj.core.api.Assertions.assertThat; @SpringBootTest @ActiveProfiles(profiles = "test") @ContextConfiguration(classes = { DataServiceConfig.class, UsersConfig.class }) public class YamlUserServiceTest { private static final Logger logger = LoggerFactory.getLogger(YamlUserServiceTest.class); @Inject @Yaml UserService userService; @Inject private PasswordEncoder passwordEncoder; @BeforeEach public void setup() { assertThat(userService).isNotNull(); } @Test public void testFindByUsername() { // Load the user User user = userService.findByUsername("deployer"); assertThat(user).as("Unable to find user by name test-user").isNotNull(); User nullUser = userService.findByUsername(null); assertThat(nullUser).as("User should have been null").isNull(); } @Test public void testCreate() throws Exception { String testUserName = "test-user"; UserDto user = new UserDto(); user.setEnabled(true); user.setUsername(testUserName); user.setPassword("<PASSWORD>"); user.setSecurityTokenKey("some-security-token"); userService.save(new EncodedPasswordUser(user, passwordEncoder)); User foundEntity = userService.findByUsername(testUserName); assertThat(foundEntity).as("Unable to locate user " + testUserName + ". Save operation failed!").isNotNull(); logger.debug("Found stored user\n\t{}\n", foundEntity); assertThat(foundEntity.getUsername()).isEqualTo(testUserName); assertThat(foundEntity.getPassword()) .as("Expected a hashed password, received plain-text!") .isNotEqualTo("test-password"); // password should NOT be saved as "plain" assertThat(foundEntity.getPassword()).as("User contains empty password!").isNotNull(); assertThat(foundEntity.isEnabled()).isTrue(); assertThat(foundEntity.getSecurityTokenKey()).isEqualTo("some-security-token"); } @Test public void testUpdateUser() throws Exception { String testUserName = "test-update-user"; UserDto userAdd = new UserDto(); userAdd.setEnabled(true); userAdd.setUsername(testUserName); userAdd.setPassword("<PASSWORD>"); userAdd.setSecurityTokenKey("before"); userService.save(userAdd); User addedEntity = userService.findByUsername(testUserName); assertThat(addedEntity).as("Unable to locate user " + testUserName + ". Save operation failed!").isNotNull(); logger.debug("Found stored user\n\t{}\n", addedEntity); logger.debug("Updating user..."); UserDto userUpdate = new UserDto(); userUpdate.setUsername(testUserName); userUpdate.setPassword("<PASSWORD>"); userUpdate.setSecurityTokenKey("after"); userUpdate.setEnabled(false); userService.save(userUpdate); User updatedEntity = userService.findByUsername(testUserName); assertThat(updatedEntity).as("Unable to locate updated user " + testUserName + ". Update operation failed!").isNotNull(); logger.debug("Found stored updated user\n\t{}\n", updatedEntity); assertThat(updatedEntity.getUsername()).isEqualTo(testUserName); assertThat(updatedEntity.getPassword()) .as("Expected current password to have changed.") .isNotEqualTo(addedEntity.getPassword()); assertThat(updatedEntity.getPassword()).as("Expected password to be other than null").isNotNull(); assertThat(updatedEntity.isEnabled()).as("User should have been disabled, but is still enabled!").isFalse(); assertThat(updatedEntity.getSecurityTokenKey()).isEqualTo("after"); } @Test public void testUpdatingUserWithEmptyAndBlankPasswordShouldNotUpdatePasswordField() throws Exception { // 1. add initial user String testUserName = "test-update-user-empty-blank-pass"; UserDto userAdd = new UserDto(); userAdd.setEnabled(true); userAdd.setUsername(testUserName); userAdd.setPassword("<PASSWORD>"); userService.save(userAdd); User addedEntity = userService.findByUsername(testUserName); assertThat(addedEntity).as("Unable to locate user " + testUserName + ". Save operation failed!").isNotNull(); logger.debug("Found stored initial user\n\t{}\n", addedEntity); // 2. Update the user with empty/null password logger.debug("Updating user with empty/null pass..."); UserDto userNullPassUpdate = new UserDto(); userNullPassUpdate.setUsername(testUserName); userNullPassUpdate.setPassword(<PASSWORD>); userService.save(userNullPassUpdate); User updatedEntity = userService.findByUsername(testUserName); assertThat(updatedEntity).as("Unable to locate updated user " + testUserName + ". Update operation failed!").isNotNull(); logger.debug("Found stored updated with empty pass user\n\t{}\n", updatedEntity); assertThat(updatedEntity.getPassword()).as("User password has changed!").isEqualTo(addedEntity.getPassword()); // 3. Update the user with blank password (i.e. contains only whitespace) logger.debug("Updating user with blank pass..."); UserDto userBlankPassUpdate = new UserDto(); userBlankPassUpdate.setUsername(testUserName); userBlankPassUpdate.setPassword(<PASSWORD>); userService.save(userBlankPassUpdate); updatedEntity = userService.findByUsername(testUserName); assertThat(updatedEntity).as("Unable to locate updated user " + testUserName + ". Update operation failed!").isNotNull(); logger.debug("Found stored updated with empty pass user\n\t{}\n", updatedEntity); assertThat(updatedEntity.getPassword()).as("User password has changed!").isEqualTo(addedEntity.getPassword()); } @Test public void testUpdateUserAccountDetails() throws Exception { String testUserName = "test-update-user-account"; UserDto userAdd = new UserDto(); userAdd.setEnabled(true); userAdd.setUsername(testUserName); userAdd.setPassword("<PASSWORD>"); userAdd.setSecurityTokenKey("before"); userService.save(userAdd); User addedEntity = userService.findByUsername(testUserName); assertThat(addedEntity).as("Unable to locate user " + testUserName + ". Save operation failed!").isNotNull(); logger.debug("Found stored user\n\t{}\n", addedEntity); logger.debug("Updating user..."); UserDto userUpdate = new UserDto(); userUpdate.setUsername(testUserName); userUpdate.setPassword("<PASSWORD>"); userUpdate.setSecurityTokenKey("after"); userUpdate.setEnabled(false); userUpdate.setRoles(new HashSet<>(Arrays.asList("a", "b"))); userService.updateAccountDetailsByUsername(userUpdate); User updatedEntity = userService.findByUsername(testUserName); assertThat(updatedEntity).as("Unable to locate updated user " + testUserName + ". Update operation failed!").isNotNull(); logger.debug("Updated user found: \n\t{}\n", updatedEntity); assertThat(updatedEntity.getPassword()) .as("User password should have been encrypted!") .isNotEqualTo(addedEntity.getPassword()); assertThat(updatedEntity.getPassword()).as("User password was updated to null").isNotNull(); assertThat(updatedEntity.isEnabled()).isTrue(); assertThat(updatedEntity.getRoles()).as("Expected no user roles to have been updated!").isEmpty(); assertThat(updatedEntity.getSecurityTokenKey()).isEqualTo("after"); } @Test public void testThatUserNameIsUnique() { assertThat(userService.getUsers().getUsers().stream() .filter(u -> "admin".equals(u.getUsername())) .count()) .isEqualTo(1); UserDto user = new UserDto(); user.setUsername("admin"); userService.save(user); assertThat(userService.getUsers().getUsers().stream() .filter(u -> "admin".equals(u.getUsername())) .count()) .isEqualTo(1); } @Test public void testPrivilegesProcessingForAccessModel() { String testUserName = "test-user"; UserDto userAdd = new UserDto(); userAdd.setEnabled(true); userAdd.setUsername(testUserName); userAdd.setPassword("<PASSWORD>"); userAdd.setSecurityTokenKey("before"); userService.save(userAdd); // Load the user User user = userService.findByUsername("test-user"); assertThat(user).as("Unable to find user by name test-user").isNotNull(); } @Test public void testDeleteUser() throws Exception { String testUserName = "test-delete-user"; UserDto userAdd = new UserDto(); userAdd.setEnabled(true); userAdd.setUsername(testUserName); userAdd.setPassword("<PASSWORD>"); userAdd.setSecurityTokenKey("before"); userService.save(userAdd); User addedEntity = userService.findByUsername(testUserName); assertThat(addedEntity).as("Unable to locate user " + testUserName + ". Delete operation failed!").isNotNull(); logger.debug("Found stored user\n\t{}\n", addedEntity); logger.debug("Deleting user..."); userService.deleteByUsername(testUserName); User deletedEntity = userService.findByUsername(testUserName); assertThat(deletedEntity) .as("User " + testUserName + " is still present in the database. Delete operation failed!") .isNull(); } }
4,203
335
<filename>S/Subsection_noun.json { "word": "Subsection", "definitions": [ "A division of a section." ], "parts-of-speech": "Noun" }
71
3,055
/* Fontname: open_iconic_gui_8x Copyright: https://github.com/iconic/open-iconic, SIL OPEN FONT LICENSE Glyphs: 30/30 BBX Build Mode: 0 */ const uint8_t u8g2_font_open_iconic_gui_8x_t[3257] U8G2_FONT_SECTION("u8g2_font_open_iconic_gui_8x_t") = "\36\0\6\6\7\7\5\6\10@@\0\0@\0@\0\0R\0\0\14\234@R@\34D\201\61\364\17" "G\376\300\300\205\0\5\26\2\34T\10\260\60!@\203\204\0\17\21\2L<\10P\341\314\232\332bS" "n\311-\261)\265e\326 @\205\203\0\23\17\2<D\10\320 !\300\302\204\0\7\25\2\24X\10" "\60p!p\344\17C\377\0AZ\77\134D\201/\364\357F\376\234\0\212h\4(\202\31\240\10\245\200" "\42\22\3\212@\16(\302H\240\210B\201\42\210\25\202\20Z\10\42p!w\10\42p!\10\241\205 " "\210\25\202(\24(\302H\240\10\344\200\42\22\3\212P\12(\202\31\240\210F\200\42\234\0\212\270\221\277" "\13\375\3BY\276WL\201-\5,I\61$'\364x\242\215\224\231.\4\202\251\12Pe\42P\224" "\210`Q \201&y\4\234\244\21x\22&\0F\26\1\30Y\2b\304\205$\1\62\342Bs#." "\364wzB\316\250)\61\247%\245\254\222\26\362:b\306\254\250\21\267!'\354\2\7\0C\134\276W" "L\201=\5,I\61$'\364x\242\215\224\231.\14\202\250*Pa\242P\204\210b\21 \211&x" "$\234\240\221x\2&\2\6\26\11\30X\42b@\201\264\21\62\200@\134\250\66\322B\134\350wzB" "\316\250)\61\247%\245\254\222\26\362:b\306\254\250\21\267!'\354\2\27\0D\60@ \4\201\201\377" "\377\17\200\374\300_\346\31\71\245\346\304\244\226\225\322J^H\354\230\31\265\342F\344\206\235\320\13~\340" "\377\37\374\3\376\37\200\0Er@ \4\201\1\4\212\375\377\373\2\10\306\23 \230n\200`\70\2\202" "\331\12\10F\63 \230\354\200`\60\4\202\271\22\10\306R \230j\201`(\6\202\231\32\10Fr " "\230\350\201`\242\7\202\221\34\10fj \30\212\201`\252\5\202\261\24\10\346J \30\14\201`\262\3" "\202\321\14\10f+ \30\216\200`\272\1\202\361\4\10\346\13 \30\24\373\377C!Fr@ \4\201" "\1\10\212\375\377C\2\37\6\21x\30d\320a\20\2\207A\12\33\6\61h\30\344\220a\20\4\206A" "\22\27\6QX\30dQa\20\6\205A\32\23\6qH\30\344\21a\220G\204A\34\22\6iL\30" "\204Aa\220E\205A\24\26\6I\134\30\4\201a\220C\206A\14\32\6)l\30\204\300a\220A\207" "A\4\36\6\11|\30\24\373\377C\21G\60@ \4\201\201\377\17@\374\300\377\77X\320\23vC\216" "\270\25\65cv\304\220W\322RVK\212\71\65%g\364\374\300_\366\3\376\377\37\0\1H\277@ " "\4\201\21\4\216X\5\13B%(\210\324\220 \320C\202\70\17\12\302<,\210\362@\354\12\235\221;" "p\10~\20\202c@\210\255\1!\64\7\204\310\36\20\2;\4\10\10q\35\4\4\204\260\16\3\2B" "T\7\2\1!\250C\201\200\20\323\301@@\10\351p Dt@\20\20\2:$\10\10\361\34\24" "\4\204p\16\13\2B\64\7\6\1!\230C\203\200\20\313\301A@\10\345\360 Dr\200\220\220C" "\204\200\20\307AB@\10\343\60! Dq\240\20\20\202\70T\10\10\61\34,\4\204\20\16\27\2B" "\4\7\14\1=\62\4\344\320\20PcC@\14\16\1\341\20\374\7#p\344\207\0\0I\354@ \4" "\201\65\6LQ\61eG\4y\244\315\204\251\252\210\22\231\36\320#jX\215\247\201\65\234\5\267h\26" "\336bY\200\13E!*\22\5D\200\24\207\2\62@\211#\1!\240\204\221\200\20P\242HD\210(" "A $\244!\61\31\246\204\220\240\10TB@$\4\212\220 \4\4b\220\14\10\42H\34\10\10q" "D\220\304\203@ \210\302A \20d\321 \20H\262h\14\11DY\64\4\5\262@*\10\23\231 " "MD\202\70\221\10\362@\242\310\3y\22\1\342\201<\10\1\361@\36\204\220p NBH\70\20'" "\61 \34\210\203 \22\216\303I\24\211\306\321$\214\204\303p\22G\302Q\70\11D\361\30\36E\242\20" "\25\312\2\134,\13o\321,\270\205\323\300\30P\303jD\17\350\61E\224H\225\312L\33\311\3\211\34" "\23\243R`\206\6\0JV._\16\201-\301JH\15'\242T\214\214\320\361A:\311F\311,\27" "\246\322L\234\310\363\200\32\321BJL\7\252P\15\254\300\365\3\377[]\1kP\25\250\303\224\220\26" "Q\3z<\21g\322T\230\313\222Q\66I\7\371\34!cT\224\210\323\220\22V\201\5\0K\60@" " \4\201\201\377\17@\374\300_\346\31\71\245\346\304\244\226\225\322J^H\354\230\31\265\342F\344\206\235" "\320\13~\340\377\37\374\3\376\377\37\0\1Lr@ \4\201\1\4\212\375\377C\2\37\6\21x\30d" "\320a\20\2\207A\12\33\6\61h\30\344\220a\20\4\206A\22\27\6QX\30dQa\20\6\205A" "\32\23\6qH\30\344\21a\220G\204A\34\22\6iL\30\204Aa\220E\205A\24\26\6I\134\30" "\4\201a\220C\206A\14\32\6)l\30\204\300a\220A\207A\4\36\6\11|\30\24\373\377C!M" "r@ \4\201\1\10\212\375\377\373\2\10\306\23 \230n\200`\70\2\202\331\12\10F\63 \230\354\200" "`\60\4\202\271\22\10\306R \230j\201`(\6\202\231\32\10Fr \230\350\201`\242\7\202\221\34" "\10fj \30\212\201`\252\5\202\261\24\10\346J \30\14\201`\262\3\202\321\14\10f+ \30\216" "\200`\272\1\202\361\4\10\346\13 \30\24\373\377C\21N\60@ \4\201\201\377\377\17\200\374\300\377\77" "X\320\23vC\216\270\25\65cv\304\220W\322RVK\212\71\65%g\364\374\300_\366\3\376\37\200" "\0O\217@ \4\201\201O\350\21;#\207\334\224\32\63sb\320KjQ++\205\245\260\225\325\242" "^R\14\232\71\65\346\246\134\10\17RA<\306\204\361\20\21\310#<(\216\320\300\60\304\202\243\30\11" "\20\202\34H\206R@\21\314\200%\370\201E\2+`\22(Ai\220\6$\2\42\214\12\247 \62\30" "C\350P\16\341\3A\210\17#\61>\10\5\371\20\26\345\306\324\234\31\24\223^T\313Za\251\326\212" "jI/(\346\314\230\232rCr\306\216\350\11\77P\217>_\14\201\15\201\25 \11\224`\64H\3" "\21\1\21B\205S\0\31\214\341s(\207\357\201 \300\207\221\10\37\204B|\10\213qCj\312\214\211" "\71/\250%\255\250Ti\5\265\234\27\23SfH\315\270\21\71a\7\364|\37\377\363y\300N\310\21" "\67\243\206\314\224\30\363rZ\320JJQ)j%\265\240\227\23cfJ\15\271\31\27\302CT\20\217" "\60a<@\4\362\370\16\212\343\63\60\14\260\340(B\2\204\20\7\222a\24P\4\62`\11\14\0Q" "w@ \4\201\1\20\10\376\377!\10\10\204 \20\202\200@\10\2\2!\10\10\204 \20\202\200" "@\10\2\2!\10\10\204 \20\202\200@\10\2\2!\10\10\204 \20\202\200@\10\2G \10" "\24\201 P\4\202@\21\10\2E \10\24\201 P\4\202@\21\10\2\2!\10\10\204 \20\202" "\200@\10\2\2!\10\10\204 \20\202\200@\10\2G\376@\360\17RP@\34D\201\5\202\2\13" "\14\11\16\301\207\300\2C\2\23\10\12\354\7\376\24\202\2\13\14\11\16\301\207\300\2C\2\23\10\12\354" "\7\376\24\202\2\13\14\11\16\301\207\300\2C\2\23\10\12\354\7\376\24\202\2\13\14\11\16\301\207\300\2" "C\2\23\10\12\14S\37@\34D\201\1\14\202\377\241\330\37\202\375\377~\340\17\302 \370\37\212\375!" "\330\377\17\1\0T\35@\34\4\201\201\377\17@\374\300\377\37\364\3\376\37\200\370\201\377\77\350\7\374\77" "\0\1U\204\66[L\201\65\301L\210\15/\242U\254\214\324qBJ\311H\11-'\302E.\14\60" "\10&\60\11\30\4#\210\6\14\202\31<\4\6\301\20\232\2\203`\12\213\201A\60\206\344\300 \230\303" "\1+d\3-\370\1\255\200\66\310\12\330q`\20\314!\61\60\10\306\260\24\30\4Sh\10\14\202!" "<\3\6\301\14\42\1\203`\4S\200A\60\201\20\341\42wZB\312()!\247#e\254\212\26\361" "\32b\302,\320\0V\67@ \4\201!\20\10\376\377~\340\17\373\1\377\17\200\300\320\37\2B \10" "\10\201 \4\202\200\20\10\2B \10\10\201 \4\202\200\20\4\4\2\301\377\37\4\0W\227@" " \4\201\303\217\350\31;$\247\334\230\232\63\203b\322\213jY+,\245\235b)kE\265\244\27\24" "s.\4\206\271 \26\345\302P\220\13$\61.\24\204\270`\16\341\302\61\200\13\250\360-$\202\267\240" "\6\335\302\22p\13,\260-\264@\266\330\4\327\202\33T\213\216\60-<\202\264\370\12\320\2\64\70\213" "\360\300,D\204\262\30\23\310\202T\30\213rA,L\206\260\70\61\350%\265\250\225\225\302N\332IK" "a+\253E\275\244\30\64sj\314M\311!;\243G\374\204\0XX(\37\16\201'AI\30\15!" "\342Sx\212\216\261\71\62\310%\251(\24e\262D\230G\323p\26\217\342I@\207\250 \15\246\0\5" "!@\344\377\7\241\2\323@*D\7(\361(\236\205\323h\36Ld\231(\24\245\222\134\220\314\261\61" ":\205\247\370\20\241a$\224\202\11\0Y}>T\204\201\255\0\24X\5\220 \35@\203\223\0\21\312" "\2T\20\17\320\341C@\210.\1%\66\5\244\310\26\320\302j@\215\311\1\71\242\7\364x \20\244" "\211@\21e\2M\16\134\241\33x\301\17\350\5\274AW\340\216\11\64Q\42P\244\201@\220\347\1=" "\42\7\344\230\32P\303Z@\213L\1)\266\4\224\350\20\20\342;@\7\261\0\25J\2D\70\7\320" " \25@\202\65\0\5,\0Zy\77`\4\201\37\4\2\371\20\10\344C \220\17\201@>\4\2\371" "\20\10\344C \220\17\201@>\304\215\374!\230\13\201`.\4\202\271\20\10\346B \230\13\201`." "\4\202\271\20\10\346B\334\310\37\2z\20\10\350A \240\7\201\200\36\4\2z\20\10\350A \240\7" "\201\200\36\304\215\374P\223F\342D\36(\344\220\32\23\203ZV\211\13\201\35\62\243F\340\6\235\300\13" "V\0[y\77`\4\201\37\4\2z\20\10\350A \240\7\201\200\36\4\2z\20\10\350A \240\7" "\201\200\36\304\215\374!\230\13\201`.\4\202\271\20\10\346B \230\13\201`.\4\202\271\20\10\346B" "\334\310\37\2\371\20\10\344C \220\17\201@>\4\2\371\20\10\344C \220\17\201@>\304\215\374P" "\223F\342D\36(\344\220\32\23\203ZV\211\13\201\35\62\243F\340\6\235\300\13V\0\134\302@ \4" "\201/\205,Y\61&g\364\200\242\216\264\231\262G\2\271\32P\243bP\214\211a\61$\205\246\210\24" "\234\342Qx\12'\1&\32\5\203`\22M\242A\64\11\6\341 -\11\7\341$\24\304\203\244 \36" "$\5\361 %\10\10\361$\20\4\202A\220\20\14\202\204`\20$\4\203 !\30\4\11\301 H\10" "\6AB\60\10\2I<\210'\221 \36$\5\361 )\210\7II\70\10'\261 \34$&\321 " "\232\4\243`\20L\302I\200\11G\321-\36\5\307x\24\232\303aX\21\207AM\34\207sq \310" "\306\231\367\310E\226\301\62I\214\312\245X(\355\304\225\274\21\30\22;\346\6\275\260\21\4\0]\264@" " \4\201/\205,Y\61&g\364\200\242\216\264\231\262G\2\271\32P\243bP\214\211a\61$\205\246" "\210\24\234\342Qx\12'\1&\32\5\230h\22b\202A\220\210%A&\24\204\211P\20&BA\230" "\210\4q&\20\4\202A\220\20\14\202\204`\20$\4\203 !\30\4\11\301 H\10\6AB\60\10" "\2I\230\211\4a\42\24\204\211P\20&BI\220\211\5A\42\230\204\230`\24`\302I\200\11G\321" "-\36\5\307x\24\232\303aX\21\207AM\34\207sq \310\306\231\367\310E\226\301\62I\214\312\245" "X(\355\304\225\274\21\30\22;\346\6\275\260\21\4\0\0\0\0\4\377\377\0";
5,743
1,350
// Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. // Code generated by Microsoft (R) AutoRest Code Generator. package com.azure.resourcemanager.avs.fluent.models; import com.azure.core.annotation.Fluent; import com.azure.core.util.logging.ClientLogger; import com.azure.resourcemanager.avs.models.DatastoreProvisioningState; import com.azure.resourcemanager.avs.models.DatastoreStatus; import com.azure.resourcemanager.avs.models.DiskPoolVolume; import com.azure.resourcemanager.avs.models.NetAppVolume; import com.fasterxml.jackson.annotation.JsonIgnore; import com.fasterxml.jackson.annotation.JsonProperty; /** The properties of a datastore. */ @Fluent public final class DatastoreProperties { @JsonIgnore private final ClientLogger logger = new ClientLogger(DatastoreProperties.class); /* * The state of the datastore provisioning */ @JsonProperty(value = "provisioningState", access = JsonProperty.Access.WRITE_ONLY) private DatastoreProvisioningState provisioningState; /* * An Azure NetApp Files volume */ @JsonProperty(value = "netAppVolume") private NetAppVolume netAppVolume; /* * An iSCSI volume */ @JsonProperty(value = "diskPoolVolume") private DiskPoolVolume diskPoolVolume; /* * The operational status of the datastore */ @JsonProperty(value = "status", access = JsonProperty.Access.WRITE_ONLY) private DatastoreStatus status; /** * Get the provisioningState property: The state of the datastore provisioning. * * @return the provisioningState value. */ public DatastoreProvisioningState provisioningState() { return this.provisioningState; } /** * Get the netAppVolume property: An Azure NetApp Files volume. * * @return the netAppVolume value. */ public NetAppVolume netAppVolume() { return this.netAppVolume; } /** * Set the netAppVolume property: An Azure NetApp Files volume. * * @param netAppVolume the netAppVolume value to set. * @return the DatastoreProperties object itself. */ public DatastoreProperties withNetAppVolume(NetAppVolume netAppVolume) { this.netAppVolume = netAppVolume; return this; } /** * Get the diskPoolVolume property: An iSCSI volume. * * @return the diskPoolVolume value. */ public DiskPoolVolume diskPoolVolume() { return this.diskPoolVolume; } /** * Set the diskPoolVolume property: An iSCSI volume. * * @param diskPoolVolume the diskPoolVolume value to set. * @return the DatastoreProperties object itself. */ public DatastoreProperties withDiskPoolVolume(DiskPoolVolume diskPoolVolume) { this.diskPoolVolume = diskPoolVolume; return this; } /** * Get the status property: The operational status of the datastore. * * @return the status value. */ public DatastoreStatus status() { return this.status; } /** * Validates the instance. * * @throws IllegalArgumentException thrown if the instance is not valid. */ public void validate() { if (netAppVolume() != null) { netAppVolume().validate(); } if (diskPoolVolume() != null) { diskPoolVolume().validate(); } } }
1,253
903
<reponame>infinite8co/tacticalrmm import pyotp from django.core.management.base import BaseCommand class Command(BaseCommand): help = "Generates TOTP Random Base 32" def handle(self, *args, **kwargs): self.stdout.write(pyotp.random_base32())
98
2,406
<reponame>pazamelin/openvino<filename>src/common/legacy/src/ngraph_ops/tile_ie.cpp // Copyright (C) 2018-2021 Intel Corporation // SPDX-License-Identifier: Apache-2.0 // #include "legacy/ngraph_ops/tile_ie.hpp" #include <algorithm> #include <memory> #include "ngraph/util.hpp" #include "ngraph/validation_util.hpp" using namespace std; using namespace ngraph; BWDCMP_RTTI_DEFINITION(op::TileIE); op::TileIE::TileIE(const Output<ngraph::Node>& data1, const int64_t axis, const int64_t tiles) : Op({data1}), axis(axis), tiles(tiles) { constructor_validate_and_infer_types(); } std::shared_ptr<Node> op::TileIE::clone_with_new_inputs(const OutputVector& new_args) const { check_new_args_count(this, new_args); return make_shared<TileIE>(new_args.at(0), axis, tiles); } void op::TileIE::validate_and_infer_types() { const auto & input_pshape = get_input_partial_shape(0); auto output_pshape = PartialShape::dynamic(); if (input_pshape.rank().is_static()) { const auto & rank = input_pshape.rank().get_length(); NODE_VALIDATION_CHECK(this, axis >= 0 && axis < rank, "Axis: ", axis, " must be >= 0 and less than ", rank, "(input rank)"); output_pshape = input_pshape; if (output_pshape[axis].is_static()) { output_pshape[axis] *= tiles; } } set_output_type(0, get_input_element_type(0), output_pshape); } bool op::TileIE::visit_attributes(AttributeVisitor& visitor) { visitor.on_attribute("axis", axis); visitor.on_attribute("tiles", tiles); return true; }
693
17,104
// // CodedOutputData.h // PBCoder // // Created by <NAME> on 4/17/14. // Copyright (c 2014 Tencent. All rights reserved. // #pragma once #ifdef __OBJC__ #import <Foundation/Foundation.h> #import <UIKit/UIKit.h> #endif #ifdef __cplusplus #import <MacTypes.h> #endif #ifdef __cplusplus @class WXPBGeneratedMessage; class CodedOutputData { uint8_t* const bufferPointer; size_t bufferLength; int32_t position; void checkNoSpaceLeft(); public: CodedOutputData(void* ptr, size_t len); CodedOutputData(NSMutableData* odata); int32_t spaceLeft(); void seek(size_t addedSize); uint8_t* curWritePointer(); void writeRawByte(uint8_t value); void writeTag(int32_t fieldNumber, int32_t format); void writeRawLittleEndian32(int32_t value); void writeRawLittleEndian64(int64_t value); /** * Encode and write a varint. value is treated as * unsigned, so it won't be sign-extended if negative. */ void writeRawVarint32(int32_t value); void writeRawVarint64(int64_t value); void writeRawData(NSData* data); void writeRawData(NSData* data, int32_t offset, int32_t length); void writeData(int32_t fieldNumber, NSData* value); void writeDouble(int32_t fieldNumber, Float64 value); void writeFloat(int32_t fieldNumber , Float32 value); void writeUInt64(int32_t fieldNumber , int64_t value); void writeInt64(int32_t fieldNumber , int64_t value); void writeInt32(int32_t fieldNumber , int32_t value); void writeFixed64(int32_t fieldNumber , int64_t value); void writeFixed32(int32_t fieldNumber , int32_t value); void writeBool(int32_t fieldNumber , BOOL value); void writeString(int32_t fieldNumber , NSString* value); void writeUInt32(int32_t fieldNumber , int32_t value); void writeSFixed32(int32_t fieldNumber , int32_t value); void writeSFixed64(int32_t fieldNumber , int64_t value); void writeSInt32(int32_t fieldNumber , int32_t value); void writeSInt64(int32_t fieldNumber , int64_t value); void writeDoubleNoTag(Float64 value); void writeFloatNoTag(Float32 value); void writeUInt64NoTag(int64_t value); void writeInt64NoTag(int64_t value); void writeInt32NoTag(int32_t value); void writeFixed64NoTag(int64_t value); void writeFixed32NoTag(int32_t value); void writeBoolNoTag(BOOL value); void writeStringNoTag(NSString* value); void writeStringNoTag(NSString* value, NSUInteger numberOfBytes); void writeDataNoTag(NSData* value); void writeUInt32NoTag(int32_t value); void writeEnumNoTag(int32_t value); void writeSFixed32NoTag(int32_t value); void writeSFixed64NoTag(int64_t value); void writeSInt32NoTag(int32_t value); void writeSInt64NoTag(int64_t value); /** * Write an enum field, including tag, to the stream. Caller is responsible * for converting the enum value to its numeric value. */ void writeEnum(int32_t fieldNumber , int32_t value); void writeMessageNoTag(WXPBGeneratedMessage* value); void writeMessage(int32_t fieldNumber, WXPBGeneratedMessage* value); }; #endif
1,091
540
// ***************************************************************************** // Copyright (c) 2020, Intel Corporation All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // // Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, // THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; // OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, // WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR // OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, // EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // ***************************************************************************** #include "utils.hpp" #include "tbb/parallel_invoke.h" #include <iostream> #include <array> #include <vector> using namespace utils; namespace { template<class T> struct buffer_queue { using v_type = T; v_type* head; v_type* tail; buffer_queue(v_type* _head, int size) { head = _head; tail = head + size; } inline v_type* pop() { return head++; } inline bool not_empty() const { return head < tail; } inline void push(v_type* val) { *(tail++) = *val; } inline uint64_t size() const { return tail - head; } inline int copy_size() const { return size(); } }; template<class T, class Compare = utils::less<T>> inline void merge_sorted_main_loop(buffer_queue<T>& left, buffer_queue<T>& right, buffer_queue<T>& out, const Compare& compare = Compare()) { while (left.not_empty() && right.not_empty()) { if (compare(*right.head, *left.head)) out.push(right.pop()); else out.push(left.pop()); } } template<class T, class Compare = utils::less<T>> void merge_sorted(T* left, int left_size, T* right, int right_size, T* out, const Compare& compare = Compare()) { auto left_buffer = buffer_queue<T>(left, left_size); auto right_buffer = buffer_queue<T>(right, right_size); auto out_buffer = buffer_queue<T>(out, 0); merge_sorted_main_loop(left_buffer, right_buffer, out_buffer, compare); // only one buffer still have items, don't need to shift out_buffer.tail std::copy_n(left_buffer.head, left_buffer.copy_size(), out_buffer.tail); if (out_buffer.tail != right_buffer.head) std::copy_n(right_buffer.head, right_buffer.copy_size(), out_buffer.tail); } template<class T, class Compare = utils::less<T>> void merge_sorted_parallel(T* left, int left_size, T* right, int right_size, T* out, const Compare& compare = Compare()) { auto split = [](T* first, int f_size, T* second, int s_size, T* out, const Compare& compare = Compare(), bool swap = false) { auto f_middle_pos = f_size/2; auto first_middle = std::next(first, f_middle_pos); auto first_end = std::next(first, f_size); auto second_end = std::next(second, s_size); const auto& first_middle_value = *first_middle; auto equal = [](const T& left, const T& right, const Compare& compare) { return !compare(left, right) && !compare(right, left); }; if (std::next(first_middle) != first_end && equal(*std::next(first_middle), *first_middle, compare)) { first_middle = std::upper_bound(first, first_end, first_middle_value, compare); } else { first_middle = std::next(first_middle); } f_middle_pos = std::distance(first, first_middle); auto second_middle = std::upper_bound(second, second_end, first_middle_value, compare); decltype(f_middle_pos) s_middle_pos = std::distance(second, second_middle); auto out_middle = std::next(out, f_middle_pos + s_middle_pos); auto out_end = std::next(out, f_size + s_size); // in order to keep order, it is import to pass 'left' buffer as // first parameter to merge_sorted_parallel. // So, if 'first' is actually 'right' buffer, we must swap them back if (swap) { std::swap(first, second); std::swap(f_middle_pos, s_middle_pos); std::swap(f_size, s_size); std::swap(first_middle, second_middle); std::swap(first_end, second_end); } if (((first_middle == first_end) && (second_middle == second_end)) || (second >= out && second <= out_end)) { merge_sorted(first, f_size, second, s_size, out, compare); } else { tbb::parallel_invoke( [&] () { merge_sorted_parallel(first, f_middle_pos, second, s_middle_pos, out, compare); }, [&] () { merge_sorted_parallel(first_middle, f_size - f_middle_pos, second_middle, s_size - s_middle_pos, out_middle, compare); } ); } }; auto constexpr limit = 512; if (left_size == 0) { parallel_copy(right, out, right_size); } else if (right_size == 0) { parallel_copy(left, out, left_size); } else if (left_size >= right_size && left_size > limit) { split(left, left_size, right, right_size, out, compare); } else if (left_size < right_size && right_size > limit) { split(right, right_size, left, left_size, out, compare, true); } else { merge_sorted(left, left_size, right, right_size, out, compare); } } template<class T, class Compare = utils::less<T>> void stable_sort_inner_sort(T* data, int begin, int end, const Compare& compare = Compare()) { std::stable_sort(data + begin, data + end, compare); } template<class T, class Compare = utils::less<T>> T* stable_sort_impl(T* data, T* temp, int begin, int end, const Compare& compare = Compare()) { auto constexpr limit = 512; if (end - begin <= limit) { stable_sort_inner_sort(data, begin, end, compare); return data; } auto middle = begin + (end - begin) / 2; T* left = nullptr; T* right = nullptr; tbb::parallel_invoke( [&] () { left = stable_sort_impl(data, temp, begin, middle, compare); }, [&] () { right = stable_sort_impl(data, temp, middle, end, compare); } ); auto out = data; if (left == data) out = temp; merge_sorted_parallel<T>(std::next(left, begin), middle - begin, std::next(right, middle), end - middle, std::next(out, begin), compare); return out; } template<class T, class Compare = utils::less<T>> void parallel_stable_sort_(T* data, uint64_t len, const Compare& compare = Compare()) { #if SUPPORTED_TBB_VERSION std::unique_ptr<T[]> temp(new T[len]); T* result = nullptr; tbb_control::get_arena().execute([&]() { result = stable_sort_impl(data, temp.get(), 0, static_cast<int>(len), compare); }); if (result == temp.get()) { parallel_copy(result, data, len); } #else std::stable_sort(data, data + len, compare); #endif } template<class I, class Compare> void parallel_stable_argsort__(I* index, uint64_t len, const Compare& compare) { fill_index_parallel(index, static_cast<I>(len)); parallel_stable_sort_(index, len, compare); } template<class I, class T, class Compare = utils::less<T>> void parallel_stable_argsort_(I* index, T* data, uint64_t len, const Compare& compare = Compare()) { parallel_stable_argsort__(index, len, IndexCompare<T, Compare>(data, compare)); } template<class I> void parallel_stable_argsort_(I* index, void* data, uint64_t len, uint64_t size, compare_func compare) { using comparator_t = IndexCompare<void, compare_func>; auto comparator = comparator_t(data, size, compare); parallel_stable_argsort__(index, len, comparator); } template<int ItemSize> struct parallel_sort_fixed_size { static void call(void* begin, uint64_t len, compare_func cmp) { using comparator_t = ExternalCompare<compare_func, ItemSize>; auto range = byte_range<ItemSize>(begin, len); auto comparator = comparator_t(cmp); parallel_stable_sort_(range.begin(), len, comparator); } }; } // namespace #define declare_single_argsort(index_prefix, type_prefix, ity, ty) \ void parallel_stable_argsort_##index_prefix##type_prefix(ity* index, void* begin, uint64_t len, uint8_t ascending) \ { \ if (ascending) { \ auto cmp = utils::less<ty>(); \ parallel_stable_argsort_(reinterpret_cast<ity*>(index), reinterpret_cast<ty*>(begin), len, cmp); \ } else { \ auto cmp = utils::greater<ty>(); \ parallel_stable_argsort_(reinterpret_cast<ity*>(index), reinterpret_cast<ty*>(begin), len, cmp); \ } \ } #define declare_argsort(prefix, ty) \ declare_single_argsort(u8, prefix, uint8_t, ty) \ declare_single_argsort(u16, prefix, uint16_t, ty) \ declare_single_argsort(u32, prefix, uint32_t, ty) \ declare_single_argsort(u64, prefix, uint64_t, ty) #define declare_generic_argsort(prefix, ity) \ void parallel_stable_argsort_##prefix##v(void* index, void* begin, uint64_t len, uint64_t size, void* compare) \ { \ auto cmp = reinterpret_cast<compare_func>(compare); \ parallel_stable_argsort_(reinterpret_cast<ity*>(index), begin, len, size, cmp); \ } #define declare_sort(prefix, ty) \ void parallel_stable_sort_##prefix(void* begin, uint64_t len) \ { parallel_stable_sort_(reinterpret_cast<ty*>(begin), len); } \ declare_argsort(prefix, ty) #define declare_int_sort(bits) \ declare_sort(i##bits, int##bits##_t) \ declare_sort(u##bits, uint##bits##_t) extern "C" { declare_int_sort(8) declare_int_sort(16) declare_int_sort(32) declare_int_sort(64) declare_sort(f32, float) declare_sort(f64, double) declare_generic_argsort(u8, uint8_t) declare_generic_argsort(u16, uint16_t) declare_generic_argsort(u32, uint32_t) declare_generic_argsort(u64, uint64_t) void parallel_stable_sort(void* begin, uint64_t len, uint64_t size, void* compare) { static const constexpr auto MaxFixSize = 32; static const std::array<parallel_sort_call, MaxFixSize> fixed_size_sort = fill_parallel_sort_array<MaxFixSize, parallel_sort_fixed_size>(); auto cmp = reinterpret_cast<compare_func>(compare); if (size <= MaxFixSize) { fixed_size_sort[size - 1](begin, len, cmp); } else { sort_by_argsort<uint64_t>(begin, len, size, cmp, parallel_stable_argsort_<uint64_t>); } } } #undef declare_int_sort #undef declare_sort #undef declare_argsort #undef declare_single_argsort
4,718
3,710
<reponame>wofogen/tahoma2d #pragma once #ifndef TIIO_MESH_H #define TIIO_MESH_H #include "tlevel_io.h" //***************************************************************************************** // TLevelWriterMesh declaration //***************************************************************************************** class TLevelWriterMesh final : public TLevelWriter { public: TLevelWriterMesh(const TFilePath &path, TPropertyGroup *winfo); ~TLevelWriterMesh(); TImageWriterP getFrameWriter(TFrameId fid) override; public: static TLevelWriter *create(const TFilePath &f, TPropertyGroup *winfo) { return new TLevelWriterMesh(f, winfo); } private: // not implemented TLevelWriterMesh(const TLevelWriterMesh &); TLevelWriterMesh &operator=(const TLevelWriterMesh &); }; //***************************************************************************************** // TLevelReaderMesh declaration //***************************************************************************************** class TLevelReaderMesh final : public TLevelReader { public: TLevelReaderMesh(const TFilePath &path); ~TLevelReaderMesh(); TImageReaderP getFrameReader(TFrameId fid) override; public: static TLevelReader *create(const TFilePath &f) { return new TLevelReaderMesh(f); } private: // not implemented TLevelReaderMesh(const TLevelReaderMesh &); TLevelReaderMesh &operator=(const TLevelReaderMesh &); }; #endif /* TIIO_MESH_H */
409
517
<reponame>msercheli/openexr // // SPDX-License-Identifier: BSD-3-Clause // Copyright (c) Contributors to the OpenEXR Project. // #ifdef NDEBUG # undef NDEBUG #endif #include <ImfRgbaFile.h> #include <ImfArray.h> #include <ImfThreading.h> #include "IlmThread.h" #include "ImathMath.h" #include <stdio.h> #include <assert.h> #include <algorithm> using namespace OPENEXR_IMF_NAMESPACE; using namespace std; using namespace IMATH_NAMESPACE; namespace { void fillPixelsColor (Array2D <Rgba> &pixels, int w, int h) { for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { Rgba &p = pixels[y][x]; p.r = 0.8 + 0.5 * sin (x * 0.05); p.g = 0.8 + 0.5 * sin (x * 0.02 + y * 0.02); p.b = 0.8 + 0.5 * sin (y * 0.03); float t = 0.8 + 0.5 * sin (x * 0.05 - y * 0.05); p.r *= t; p.g *= t; p.b *= t; p.a = t; } } } void fillPixelsGray (Array2D <Rgba> &pixels, int w, int h) { for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { Rgba &p = pixels[y][x]; p.r = 0.8 + 0.5 * sin (x * 0.05 - y * 0.05); p.g = p.r; p.b = p.r; p.a = 0.5 + 0.5 * cos (x * 0.05 - y * 0.05); } } } void writeReadYca (const char fileName[], const Box2i &dw, RgbaChannels channels, LineOrder writeOrder, LineOrder readOrder, void (* fillPixels) (Array2D <Rgba> &pixels, int w, int h)) { int w = dw.max.x - dw.min.x + 1; int h = dw.max.y - dw.min.y + 1; Array2D <Rgba> pixels1 (h, w); Array2D <Rgba> pixels2 (h, w); cout << w << " by " << h << " pixels, " "channels " << channels << ", " "write order " << writeOrder << ", " "read order " << readOrder << endl; fillPixels (pixels1, w, h); cout << "writing " << flush; { RgbaOutputFile out (fileName, dw, dw, // display window, data window channels, 1, // pixelAspectRatio V2f (0, 0), // screenWindowCenter 1, // screenWindowWidth writeOrder); out.setYCRounding (9, 9); out.setFrameBuffer (&pixels1[-dw.min.y][-dw.min.x], 1, w); out.writePixels (h); } cout << "reading " << flush; { RgbaInputFile in (fileName); in.setFrameBuffer (&pixels2[-dw.min.y][-dw.min.x], 1, w); switch (readOrder) { case INCREASING_Y: for (int y = dw.min.y; y <= dw.max.y; ++y) in.readPixels (y); break; case DECREASING_Y: for (int y = dw.max.y; y >= dw.min.y; --y) in.readPixels (y); break; case RANDOM_Y: assert (h % 5 != 0); for (int i = 0; i < h; ++i) { int y = dw.min.y + (i * 5) % h; in.readPixels (y); } break; case NUM_LINEORDERS: default: cerr << "invalid line order " << int(readOrder) << std::endl; break; } } cout << "comparing" << endl; for (int y = 0; y < h; ++y) { for (int x = 0; x < w; ++x) { const Rgba &p1 = pixels1[y][x]; const Rgba &p2 = pixels2[y][x]; if (channels & WRITE_C) { float p1Max = max (p1.r, max (p1.g, p1.b)); float p2Max = max (p2.r, max (p2.g, p2.b)); assert (equalWithAbsError (p1Max, p2Max, 0.03f)); } else { assert (p1.g == p2.g); assert (p1.b == p2.b); } if (channels & WRITE_A) { assert (p1.a == p2.a); } } } remove (fileName); } } // namespace void testYca (const std::string &tempDir) { try { cout << "Testing luminance/chroma input and output" << endl; std::string fileName = tempDir + "imf_test_yca.exr"; Box2i dataWindow[6]; dataWindow[0] = Box2i (V2i (0, 0), V2i (1, 17)); dataWindow[1] = Box2i (V2i (0, 0), V2i (5, 17)); dataWindow[2] = Box2i (V2i (0, 0), V2i (17, 1)); dataWindow[3] = Box2i (V2i (0, 0), V2i (17, 5)); dataWindow[4] = Box2i (V2i (0, 0), V2i (1, 1)); dataWindow[5] = Box2i (V2i (-18, -28), V2i (247, 255)); int maxThreads = ILMTHREAD_NAMESPACE::supportsThreads()? 3: 0; for (int n = 0; n <= maxThreads; ++n) { if (ILMTHREAD_NAMESPACE::supportsThreads()) { setGlobalThreadCount (n); cout << "\nnumber of threads: " << globalThreadCount() << endl; } for (int i = 0; i < 6; ++i) { for (int writeOrder = INCREASING_Y; writeOrder <= DECREASING_Y; ++writeOrder) { for (int readOrder = INCREASING_Y; readOrder <= RANDOM_Y; ++readOrder) { writeReadYca (fileName.c_str(), dataWindow[i], WRITE_YCA, LineOrder (writeOrder), LineOrder (readOrder), fillPixelsColor); writeReadYca (fileName.c_str(), dataWindow[i], WRITE_YC, LineOrder (writeOrder), LineOrder (readOrder), fillPixelsColor); writeReadYca (fileName.c_str(), dataWindow[i], WRITE_YA, LineOrder (writeOrder), LineOrder (readOrder), fillPixelsGray); writeReadYca (fileName.c_str(), dataWindow[i], WRITE_Y, LineOrder (writeOrder), LineOrder (readOrder), fillPixelsGray); } } } } cout << "ok\n" << endl; } catch (const std::exception &e) { cerr << "ERROR -- caught exception: " << e.what() << endl; assert (false); } }
2,621
60,067
#pragma once #include <ATen/core/Macros.h> #include <ATen/core/function_schema.h> #include <c10/util/either.h> #include <string> namespace torch { namespace jit { TORCH_API c10::either<c10::OperatorName, c10::FunctionSchema> parseSchemaOrName( const std::string& schemaOrName); TORCH_API c10::FunctionSchema parseSchema(const std::string& schema); TORCH_API c10::OperatorName parseName(const std::string& name); } // namespace jit } // namespace torch
167
399
#pragma once #include "Common.hpp" #include "graphics/GPUObjects.hpp" #include <deque> struct GPUContext; /** \brief Manage descriptor set allocations by creating and reusing internal descriptor pools. \details By default each pool will contain up to DEFAULT_SET_COUNT of each kind defined in createPool. \ingroup Graphics */ class DescriptorAllocator { public: /** Setup the allocator. \param context the GPU context \param poolCount the maximum number of pools to allocate */ void init(GPUContext* context, uint poolCount); /** Allocate a descriptor set from an available pool, using the specified layout. \param setLayout the layout to use \return the allocated descriptor set info */ DescriptorSet allocateSet(VkDescriptorSetLayout& setLayout); /** Mark an allocated descriptor set as unused \param set the set to free */ void freeSet(const DescriptorSet& set); /** Reset all descriptor pools */ void clean(); /** \return the ImGui dedicated descriptor pool */ VkDescriptorPool getImGuiPool(){ return _imguiPool.handle; } private: /** \brief Descriptor pool management info. */ struct DescriptorPool { VkDescriptorPool handle = VK_NULL_HANDLE; ///< Native handle. uint64_t lastFrame = 0; ///< Last frame used. uint allocated = 0; ///< Number of currently used descriptors. uint id = 0; ///< Pool id. }; /** Create a descriptor pool containing count descriptors. \param count the maximmum number of descriptors of each type to store in the poll \param combined should images and samplers be represented by combined descriptors or separate sampled image/sampler descriptors. \return descriptor pool info */ DescriptorPool createPool(uint count, bool combined); GPUContext* _context = nullptr; ///< The GPU context. std::deque<DescriptorPool> _pools; ///< Available pools. DescriptorPool _imguiPool; ///< ImGui dedicated pool. uint _maxPoolCount = 2; ///< Maximum number of pools to create. uint _currentPoolCount = 0; ///< Current number of created pools. };
583
3,138
<gh_stars>1000+ # Lint as: python3 # Copyright 2021 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # https://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Tests for the shape utilitis.""" from absl.testing import parameterized import tensorflow.compat.v2 as tf import tf_quant_finance as tff from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import @test_util.run_all_in_graph_and_eager_modes class ShapeUtilsTest(parameterized.TestCase, tf.test.TestCase): @parameterized.named_parameters( { 'testcase_name': 'BoolTensor', 'x': [[True], [False]], 'expected_shape': [2, 1] }, { 'testcase_name': 'RealTensor', 'x': [[1], [2]], 'expected_shape': [2, 1] }, ) def test_prefer_static_shape(self, x, expected_shape): shape = tff.utils.get_shape(x) self.assertAllEqual(shape.as_list(), expected_shape) @parameterized.named_parameters( { 'testcase_name': 'PartiallyKnown', 'shape': [1, None], }, { 'testcase_name': 'Unkown', 'shape': None, }, ) def test_prefer_static_shape_dynamic(self, shape): x = tf.ones([1, 2], dtype=tf.float64) @tf.function(input_signature=[tf.TensorSpec(shape, dtype=x.dtype)]) def fn(x): return tff.utils.get_shape(x) shape = self.evaluate(fn(x)) self.assertAllEqual(shape, x.shape.as_list()) def test_broadcast_tensors_shapes(self): args = [tf.ones([1, 2], dtype=tf.float64), tf.constant([[True], [False]]), tf.zeros([1], dtype=tf.float32)] @tf.function(input_signature=[ tf.TensorSpec([1, None], dtype=tf.float64), tf.TensorSpec([2, 1], dtype=tf.bool), tf.TensorSpec(None, dtype=tf.float32)]) def fn(x, y, z): return tff.utils.broadcast_tensors(x, y, z) x, y, z = self.evaluate(fn(*args)) with self.subTest('Arg1Dtype'): self.assertAllEqual(x.dtype, tf.float64) with self.subTest('Arg1Value'): self.assertAllEqual(x, [[1, 1], [1, 1]]) with self.subTest('Arg2Dtype'): self.assertAllEqual(y.dtype, tf.bool) with self.subTest('Arg2Value'): self.assertAllEqual(y, [[True, True], [False, False]]) with self.subTest('Arg3Dtype'): self.assertAllEqual(z.dtype, tf.float32) with self.subTest('Arg3Value'): self.assertAllEqual(z, [[0, 0], [0, 0]]) def test_broadcast_tensors_shapes_incompatible(self): args = [tf.ones([1, 2], dtype=tf.float64), tf.zeros([3, 3], dtype=tf.float32)] with self.assertRaises(ValueError): tff.utils.broadcast_tensors(*args) @parameterized.named_parameters( ('DynamicShapedInputs', True), ('StaticShapedInputs', False) ) def test_common_shape(self, dynamic): args = [tf.ones([1, 2], dtype=tf.float64), tf.constant([[True], [False]]), tf.zeros([1], dtype=tf.float32)] def fn(x, y, z): return tff.utils.common_shape(x, y, z) if dynamic: fn = tf.function(fn, input_signature=[ tf.TensorSpec([1, None], dtype=tf.float64), tf.TensorSpec([2, 1], dtype=tf.bool), tf.TensorSpec(None, dtype=tf.float32)]) shape = fn(*args) self.assertAllEqual(shape, [2, 2]) def test_common_shape_incompatible(self): args = [tf.ones([1, 2], dtype=tf.float64), tf.zeros([3, 3], dtype=tf.float32)] with self.assertRaises(ValueError): tff.utils.common_shape(*args) if __name__ == '__main__': tf.test.main()
1,751
324
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.jclouds.azurecompute.arm.compute.config; import javax.inject.Singleton; import org.jclouds.azurecompute.arm.compute.AzureComputeService; import org.jclouds.azurecompute.arm.compute.AzureComputeServiceAdapter; import org.jclouds.azurecompute.arm.compute.domain.ResourceGroupAndNameAndIngressRules; import org.jclouds.azurecompute.arm.compute.extensions.AzureComputeImageExtension; import org.jclouds.azurecompute.arm.compute.extensions.AzureComputeSecurityGroupExtension; import org.jclouds.azurecompute.arm.compute.functions.LocationToLocation; import org.jclouds.azurecompute.arm.compute.functions.NetworkSecurityGroupToSecurityGroup; import org.jclouds.azurecompute.arm.compute.functions.NetworkSecurityRuleToIpPermission; import org.jclouds.azurecompute.arm.compute.functions.VMHardwareToHardware; import org.jclouds.azurecompute.arm.compute.functions.VMImageToImage; import org.jclouds.azurecompute.arm.compute.functions.VirtualMachineToNodeMetadata; import org.jclouds.azurecompute.arm.compute.loaders.CreateSecurityGroupIfNeeded; import org.jclouds.azurecompute.arm.compute.loaders.DefaultResourceGroup; import org.jclouds.azurecompute.arm.compute.options.AzureTemplateOptions; import org.jclouds.azurecompute.arm.compute.strategy.CreateResourcesThenCreateNodes; import org.jclouds.azurecompute.arm.domain.Location; import org.jclouds.azurecompute.arm.domain.NetworkSecurityGroup; import org.jclouds.azurecompute.arm.domain.NetworkSecurityRule; import org.jclouds.azurecompute.arm.domain.ResourceGroup; import org.jclouds.azurecompute.arm.domain.VMHardware; import org.jclouds.azurecompute.arm.domain.VMImage; import org.jclouds.azurecompute.arm.domain.VirtualMachine; import org.jclouds.compute.ComputeService; import org.jclouds.compute.ComputeServiceAdapter; import org.jclouds.compute.config.ComputeServiceAdapterContextModule; import org.jclouds.compute.domain.Hardware; import org.jclouds.compute.domain.NodeMetadata; import org.jclouds.compute.domain.SecurityGroup; import org.jclouds.compute.extensions.ImageExtension; import org.jclouds.compute.extensions.SecurityGroupExtension; import org.jclouds.compute.functions.NodeAndTemplateOptionsToStatement; import org.jclouds.compute.functions.NodeAndTemplateOptionsToStatementWithoutPublicKey; import org.jclouds.compute.options.TemplateOptions; import org.jclouds.compute.strategy.CreateNodesInGroupThenAddToSet; import org.jclouds.net.domain.IpPermission; import org.jclouds.util.PasswordGenerator; import com.google.common.base.Function; import com.google.common.cache.CacheBuilder; import com.google.common.cache.CacheLoader; import com.google.common.cache.LoadingCache; import com.google.inject.Provides; import com.google.inject.TypeLiteral; public class AzureComputeServiceContextModule extends ComputeServiceAdapterContextModule<VirtualMachine, VMHardware, VMImage, Location> { @Override protected void configure() { super.configure(); bind(new TypeLiteral<ComputeServiceAdapter<VirtualMachine, VMHardware, VMImage, Location>>() { }).to(AzureComputeServiceAdapter.class); bind(new TypeLiteral<Function<VMImage, org.jclouds.compute.domain.Image>>() { }).to(VMImageToImage.class); bind(new TypeLiteral<Function<VMHardware, Hardware>>() { }).to(VMHardwareToHardware.class); bind(new TypeLiteral<Function<VirtualMachine, NodeMetadata>>() { }).to(VirtualMachineToNodeMetadata.class); bind(new TypeLiteral<Function<Location, org.jclouds.domain.Location>>() { }).to(LocationToLocation.class); bind(new TypeLiteral<Function<NetworkSecurityGroup, SecurityGroup>>() { }).to(NetworkSecurityGroupToSecurityGroup.class); bind(new TypeLiteral<Function<NetworkSecurityRule, IpPermission>>() { }).to(NetworkSecurityRuleToIpPermission.class); bind(ComputeService.class).to(AzureComputeService.class); install(new LocationsFromComputeServiceAdapterModule<VirtualMachine, VMHardware, VMImage, Location>() { }); bind(TemplateOptions.class).to(AzureTemplateOptions.class); bind(NodeAndTemplateOptionsToStatement.class).to(NodeAndTemplateOptionsToStatementWithoutPublicKey.class); bind(CreateNodesInGroupThenAddToSet.class).to(CreateResourcesThenCreateNodes.class); bind(new TypeLiteral<CacheLoader<ResourceGroupAndNameAndIngressRules, String>>() { }).to(CreateSecurityGroupIfNeeded.class); bind(new TypeLiteral<CacheLoader<String, ResourceGroup>>() { }).to(DefaultResourceGroup.class); bind(new TypeLiteral<ImageExtension>() { }).to(AzureComputeImageExtension.class); bind(new TypeLiteral<SecurityGroupExtension>() { }).to(AzureComputeSecurityGroupExtension.class); } @Provides @Singleton protected PasswordGenerator.Config providePasswordGenerator() { // Guest passwords must be between 6-72 characters long. // Must contain an upper case character. // Must contain a lower case character. // Must contain a numeric digit. // Must contain a special character. Control characters are not allowed. return new PasswordGenerator() .lower().min(2).max(10) .upper().min(2).max(10) .numbers().min(2).max(10) .symbols().min(2).max(10); } @Provides @Singleton protected final LoadingCache<ResourceGroupAndNameAndIngressRules, String> securityGroupMap( CacheLoader<ResourceGroupAndNameAndIngressRules, String> in) { return CacheBuilder.newBuilder().build(in); } @Provides @Singleton protected final LoadingCache<String, ResourceGroup> defaultResourceGroup(CacheLoader<String, ResourceGroup> in) { return CacheBuilder.newBuilder().build(in); } }
2,172
777
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/views/bubble/bubble_window_targeter.h" #include "ui/aura/window.h" #include "ui/gfx/path.h" #include "ui/gfx/skia_util.h" #include "ui/views/bubble/bubble_dialog_delegate.h" #include "ui/views/bubble/bubble_frame_view.h" namespace views { BubbleWindowTargeter::BubbleWindowTargeter(BubbleDialogDelegateView* bubble) : wm::MaskedWindowTargeter(bubble->GetWidget()->GetNativeView()), bubble_(bubble) {} BubbleWindowTargeter::~BubbleWindowTargeter() { } bool BubbleWindowTargeter::GetHitTestMask(aura::Window* window, gfx::Path* mask) const { mask->addRect( gfx::RectToSkRect(bubble_->GetBubbleFrameView()->GetContentsBounds())); return true; } } // namespace views
367
945
<reponame>arobert01/ITK<gh_stars>100-1000 /*========================================================================= Program: GDCM (Grassroots DICOM). A DICOM library Copyright (c) 2006-2011 <NAME> All rights reserved. See Copyright.txt or http://gdcm.sourceforge.net/Copyright.html for details. This software is distributed WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the above copyright notice for more information. =========================================================================*/ #ifndef GDCMJPEGLSCODEC_H #define GDCMJPEGLSCODEC_H #include "gdcmImageCodec.h" namespace gdcm { class JPEGLSInternals; /** * \brief JPEG-LS * \note codec that implement the JPEG-LS compression * this is an implementation of ImageCodec for JPEG-LS * * It uses the CharLS JPEG-LS implementation https://github.com/team-charls/charls */ class GDCM_EXPORT JPEGLSCodec : public ImageCodec { friend class ImageRegionReader; public: JPEGLSCodec(); ~JPEGLSCodec() override; bool CanDecode(TransferSyntax const &ts) const override; bool CanCode(TransferSyntax const &ts) const override; unsigned long GetBufferLength() const { return BufferLength; } void SetBufferLength(unsigned long l) { BufferLength = l; } bool Decode(DataElement const &is, DataElement &os) override; bool Decode(DataElement const &in, char* outBuffer, size_t inBufferLength, uint32_t inXMin, uint32_t inXMax, uint32_t inYMin, uint32_t inYMax, uint32_t inZMin, uint32_t inZMax); bool Code(DataElement const &in, DataElement &out) override; bool GetHeaderInfo(std::istream &is, TransferSyntax &ts) override; ImageCodec * Clone() const override; void SetLossless(bool l); bool GetLossless() const; /* * test.acr can look pretty bad, even with a lossy error of 2. Explanation follows: * I agree that the test image looks ugly. In this particular case I can * explain though. * * The image is 8 bit, but it does not use the full 8 bit dynamic range. The * black pixels have value 234 and the white 255. If you set allowed lossy * error to 2, you allow an error of about 10% of the actual dynamic range. * That is of course very visible. */ /// [0-3] generally void SetLossyError(int error); protected: bool DecodeExtent( char *buffer, unsigned int xmin, unsigned int xmax, unsigned int ymin, unsigned int ymax, unsigned int zmin, unsigned int zmax, std::istream & is ); bool StartEncode( std::ostream & ) override; bool IsRowEncoder() override; bool IsFrameEncoder() override; bool AppendRowEncode( std::ostream & out, const char * data, size_t datalen ) override; bool AppendFrameEncode( std::ostream & out, const char * data, size_t datalen ) override; bool StopEncode( std::ostream & ) override; private: bool DecodeByStreamsCommon(const char *buffer, size_t totalLen, std::vector<unsigned char> &rgbyteOut); bool CodeFrameIntoBuffer(char * outdata, size_t outlen, size_t & complen, const char * indata, size_t inlen ); unsigned long BufferLength; int LossyError; }; } // end namespace gdcm #endif //GDCMJPEGLSCODEC_H
1,026
511
#!/usr/bin/python import os import sys import platform import subprocess import multiprocessing # help message def helpmsg(script): helpstr = ''' Usage: build: python %s <targetbuild> Allowed values for <target_build>: all, linux_unsecured, linux_secured, linux_unsecured_with_ra, linux_secured_with_ra, linux_unsecured_with_rd, linux_secured_with_rd, android, arduino, tizen, simulator, darwin, windows, msys Note: \"linux\" will build \"linux_unsecured\", \"linux_secured\", \"linux_unsecured_with_ra\", \"linux_secured_with_ra\", \"linux_secured_with_rd\", \"linux_unsecured_with_mq\", \"linux_secured_with_tcp\" & \"linux_unsecured_with_tcp\" & \"linux_unsecured_with_rd\". Any selection will build both debug and release versions of all available targets in the scope you've selected. To choose any specific command, please use the SCons commandline directly. Please refer to [IOTIVITY_REPO]/Readme.scons.txt. clean: python %s -c ''' print (helpstr % (script, script)) sys.exit() def call_scons(build_options, extra_option_str): """ This function formats and runs a scons command Arguments: build_options -- {Dictionary} build flags (keys) associated with values; extra_option_str -- {String} extra options to append to scons command """ cmd_line = "scons VERBOSE=" + VERBOSE for key in build_options: cmd_line += " " + key + "=" + str(build_options[key]) cmd_line += " " + str(extra_option_str) print ("Running : " + cmd_line) sys.stdout.flush() exit_code = subprocess.Popen([cmd_line], shell=True).wait() if exit_code != 0: exit(exit_code) def build_all(flag, extra_option_str): if platform.system() == "Linux": build_linux_unsecured(flag, extra_option_str) build_linux_secured(flag, extra_option_str) build_linux_unsecured_with_ra(flag, extra_option_str) build_linux_secured_with_ra(flag, extra_option_str) build_linux_unsecured_with_rm(flag, extra_option_str) build_linux_unsecured_with_rd(flag, extra_option_str) build_linux_secured_with_rd(flag, extra_option_str) build_linux_unsecured_with_mq(flag, extra_option_str) build_linux_unsecured_with_tcp(flag, extra_option_str) build_linux_secured_with_tcp(flag, extra_option_str) build_simulator(flag, extra_option_str) build_android(flag, extra_option_str) build_arduino(flag, extra_option_str) build_tizen(flag, extra_option_str) if platform.system() == "Windows": build_windows(flag, extra_option_str) if platform.system() == "Darwin": build_darwin(flag, extra_option_str) def build_linux(flag, extra_option_str): build_linux_unsecured(flag, extra_option_str) build_linux_secured(flag, extra_option_str) def build_linux_unsecured(flag, extra_option_str): print ("*********** Build for linux ************") build_options = { 'RELEASE':flag, } call_scons(build_options, extra_option_str) def build_linux_secured_with_tcp(flag, extra_option_str): print ("*********** Build for linux with Secured TCP ************") build_options = { 'RELEASE':flag, 'WITH_TCP': 1, 'WITH_CLOUD':1, 'SECURED':1, } call_scons(build_options, extra_option_str) def build_linux_unsecured_with_tcp(flag, extra_option_str): print ("*********** Build for linux with TCP ************") build_options = { 'RELEASE':flag, 'WITH_TCP': 1, 'TARGET_TRANSPORT': 'IP', } call_scons(build_options, extra_option_str) def build_linux_unsecured_with_rm(flag, extra_option_str): print ("*********** Build for linux with RoutingManager************") build_options = { 'ROUTING':'GW', 'RELEASE':flag, } call_scons(build_options, extra_option_str) def build_linux_secured(flag, extra_option_str): print ("*********** Build for linux with Security *************") build_options = { 'RELEASE':flag, 'SECURED':1, } call_scons(build_options, extra_option_str) def build_linux_unsecured_with_ra(flag, extra_option_str): print ("*********** Build for linux With Remote Access *************") build_options = { 'RELEASE':flag, 'WITH_RA':1, 'WITH_RA_IBB':1, } call_scons(build_options, extra_option_str) def build_linux_secured_with_ra(flag, extra_option_str): print ("*********** Build for linux With Remote Access & Security ************") build_options = { 'RELEASE':flag, 'WITH_RA':1, 'WITH_RA_IBB':1, 'SECURED':1, } call_scons(build_options, extra_option_str) def build_linux_unsecured_with_rd(flag, extra_option_str): print ("*********** Build for linux With Resource Directory *************") build_options = { 'RELEASE':flag, 'RD_MODE':'all', } call_scons(build_options, extra_option_str) def build_linux_secured_with_rd(flag, extra_option_str): print ("*********** Build for linux With Resource Directory & Security ************") build_options = { 'RELEASE':flag, 'RD_MODE':'all', 'SECURED':1, } call_scons(build_options, extra_option_str) def build_linux_unsecured_with_mq(flag, extra_option_str): print ("*********** Build for linux With Message Queue ************") build_options = { 'RELEASE':flag, 'WITH_MQ':'PUB,SUB,BROKER', } call_scons(build_options, extra_option_str) def build_linux_unsecured_with_tcp(flag, extra_option_str): print ("*********** Build for linux With tcp ************") build_options = { 'RELEASE':flag, 'WITH_TCP':'1', } call_scons(build_options, extra_option_str) def build_android(flag, extra_option_str): # Note: for android, as oic-resource uses C++11 feature stoi and to_string, # it requires gcc-4.9, currently only android-ndk-r10(for linux) # and windows android-ndk-r10(64bit target version) support these features. print ("*********** Build for android armeabi *************") build_options = { 'TARGET_OS':'android', 'TARGET_ARCH':'armeabi', 'RELEASE':flag, } call_scons(build_options, extra_option_str) def build_android_x86(flag, extra_option_str): print ("*********** Build for android x86 *************") build_options = { 'TARGET_OS':'android', 'TARGET_ARCH':'x86', 'RELEASE':flag, 'TARGET_TRANSPORT':'IP', } call_scons(build_options, extra_option_str) build_options['TARGET_TRANSPORT'] = 'BT' call_scons(build_options, extra_option_str) build_options['TARGET_TRANSPORT'] = 'BLE' call_scons(build_options, extra_option_str) def build_android_x86_with_rm(flag, extra_option_str): print ("*********** Build for android x86 with Routing Manager *************") build_options = { 'TARGET_OS':'android', 'TARGET_ARCH':'x86', 'ROUTING':'GW', 'RELEASE':flag, 'TARGET_TRANSPORT':'IP', } call_scons(build_options, extra_option_str) build_options['TARGET_TRANSPORT'] = 'BT' call_scons(build_options, extra_option_str) build_options['TARGET_TRANSPORT'] = 'BLE' call_scons(build_options, extra_option_str) def build_android_armeabi(flag, extra_option_str): print ("*********** Build for android armeabi *************") build_options = { 'TARGET_OS':'android', 'TARGET_ARCH':'armeabi', 'RELEASE':flag, 'TARGET_TRANSPORT':'IP', } call_scons(build_options, extra_option_str) build_options['TARGET_TRANSPORT'] = 'BT' call_scons(build_options, extra_option_str) build_options['TARGET_TRANSPORT'] = 'BLE' call_scons(build_options, extra_option_str) def build_android_armeabi_with_rm(flag, extra_option_str): print ("*********** Build for android armeabi with Routing Manager*************") build_options = { 'TARGET_OS':'android', 'TARGET_ARCH':'armeabi', 'ROUTING':'GW', 'RELEASE':flag, 'TARGET_TRANSPORT':'IP', } call_scons(build_options, extra_option_str) build_options['TARGET_TRANSPORT'] = 'BT' call_scons(build_options, extra_option_str) build_options['TARGET_TRANSPORT'] = 'BLE' call_scons(build_options, extra_option_str) def build_arduino(flag, extra_option_str): print ("*********** Build for arduino avr *************") extra_option_str = "resource " + extra_option_str build_options = { 'TARGET_OS':'arduino', 'UPLOAD':'false', 'BOARD':'mega', 'TARGET_ARCH':'avr', 'TARGET_TRANSPORT':'IP', 'SHIELD':'ETH', 'RELEASE':flag, } call_scons(build_options, extra_option_str) build_options['SHIELD'] = 'WIFI' call_scons(build_options, extra_option_str) build_options['TARGET_TRANSPORT'] = 'BLE' build_options['SHIELD'] = 'RBL_NRF8001' call_scons(build_options, extra_option_str) print ("*********** Build for arduino arm *************") build_options['BOARD'] = 'arduino_due_x' build_options['TARGET_ARCH'] = 'arm' build_options['TARGET_TRANSPORT'] = 'IP' build_options['SHIELD'] = 'ETH' call_scons(build_options, extra_option_str) build_options['SHIELD'] = 'WIFI' call_scons(build_options, extra_option_str) # BLE support for the Arduino Due is currently unavailable. def build_tizen(flag, extra_option_str): print ("*********** Build for Tizen *************") cmd_line = "/bin/sh " + os.getcwd() + "/gbsbuild.sh" print ("Running : " + cmd_line) subprocess.Popen([cmd_line], shell=True).wait() print ("*********** Build for Tizen octbstack lib and sample *************") extra_option_str = "-f resource/csdk/stack/samples/tizen/build/SConscript " + extra_option_str build_options = { 'TARGET_OS':'tizen', 'TARGET_TRANSPORT':'IP', 'LOGGING':'true', 'RELEASE':flag, } call_scons(build_options, extra_option_str) print ("*********** Build for Tizen octbstack lib and sample with Security*************") build_options['SECURED'] = 1 call_scons(build_options, extra_option_str) print ("*********** Build for Tizen octbstack lib and sample with Routing Manager*************") del build_options['SECURED'] build_options['ROUTING'] = 'GW' call_scons(build_options, extra_option_str) # Mac OS and iOS def build_darwin(flag, extra_option_str): print ("*********** Build for OSX *************") build_options = { 'TARGET_OS':'darwin', 'SYS_VERSION':'10.9', 'RELEASE':flag, } call_scons(build_options, extra_option_str) print ("*********** Build for IOS i386 *************") build_options = { 'TARGET_OS':'ios', 'TARGET_ARCH':'i386', 'SYS_VERSION':'7.0', 'RELEASE':flag, } call_scons(build_options, extra_option_str) print ("*********** Build for IOS x86_64 *************") build_options['TARGET_ARCH'] = 'x86_64' call_scons(build_options, extra_option_str) print ("*********** Build for IOS armv7 *************") build_options['TARGET_ARCH'] = 'armv7' call_scons(build_options, extra_option_str) print ("*********** Build for IOS armv7s *************") build_options['TARGET_ARCH'] = 'armv7s' call_scons(build_options, extra_option_str) print ("*********** Build for IOS arm64 *************") build_options['TARGET_ARCH'] = 'arm64' call_scons(build_options, extra_option_str) # Windows def build_windows(flag, extra_option_str): print ("*********** Build for Windows *************") os.environ["SCONSFLAGS"] = "" build_options = { 'TARGET_OS':'windows', 'TARGET_ARCH':'amd64', 'RELEASE':flag, 'WITH_RA':0, 'TARGET_TRANSPORT':'IP', 'SECURED':1, 'WITH_TCP':0, 'BUILD_SAMPLE':'ON', 'LOGGING':'off', 'TEST':1, 'RD_MODE':'all', } call_scons(build_options, extra_option_str) # Windows msys def build_msys(flag, extra_option_str): print ("*********** Build for msys_nt *************") os.environ["SCONSFLAGS"] = "" build_options = { 'TARGET_OS':'msys_nt', 'TARGET_ARCH':'x86_64', 'RELEASE':flag, 'WITH_RA':0, 'TARGET_TRANSPORT':'IP', 'SECURED':1, 'WITH_TCP':0, 'BUILD_SAMPLE':'ON', 'LOGGING':'off', 'TEST':1, 'RD_MODE':'all', } call_scons(build_options, extra_option_str) def build_simulator(flag, extra_option_str): print ("*********** Build for simulator plugin *************") build_options = { 'SIMULATOR':1, 'RELEASE':flag, } call_scons(build_options, extra_option_str) def unit_tests(): print ("*********** Unit test Start *************") build_options = { 'RELEASE':'false', } extra_option_str = "resource -c" call_scons(build_options, extra_option_str) build_options = { 'LOGGING':'false', 'RELEASE':'false', } extra_option_str = "resource" call_scons(build_options, extra_option_str) build_options = { 'TEST':1, 'RELEASE':'false', } call_scons(build_options, extra_option_str) build_options = { 'TEST':1, 'SECURED':1, 'RELEASE':'false', } call_scons(build_options, extra_option_str) print ("*********** Unit test Stop *************") # Main module starts here if os.getenv("SCONSFLAGS", "") == "": os.environ["SCONSFLAGS"] = "-Q -j " + str(multiprocessing.cpu_count()) arg_num = len(sys.argv) script_name = sys.argv[0] # May be overridden in user's shell VERBOSE = os.getenv("VERBOSE", "1") if arg_num == 1: build_all("true", "") build_all("false", "") unit_tests() elif arg_num == 2: if str(sys.argv[1]) == '-c': build_all("true", "-c") build_all("false", "-c") elif str(sys.argv[1]) == "all": build_all("true", "") build_all("false", "") unit_tests() elif str(sys.argv[1]) == "linux": build_linux("true", "") build_linux("false", "") elif str(sys.argv[1]) == "linux_unsecured": build_linux_unsecured("true", "") build_linux_unsecured("false", "") build_linux_unsecured_with_rm("true", "") build_linux_unsecured_with_rm("false", "") elif str(sys.argv[1]) == "linux_secured": build_linux_secured("true", "") build_linux_secured("false", "") elif str(sys.argv[1]) == "linux_unsecured_with_ra": build_linux_unsecured_with_ra("true", "") build_linux_unsecured_with_ra("false", "") elif str(sys.argv[1]) == "linux_secured_with_ra": build_linux_secured_with_ra("true", "") build_linux_secured_with_ra("false", "") elif str(sys.argv[1]) == "linux_unsecured_with_rd": build_linux_unsecured_with_rd("true", "") build_linux_unsecured_with_rd("false", "") elif str(sys.argv[1]) == "linux_secured_with_rd": build_linux_secured_with_rd("true", "") build_linux_secured_with_rd("false", "") elif str(sys.argv[1]) == "linux_unsecured_with_mq": build_linux_unsecured_with_mq("true", "") build_linux_unsecured_with_mq("false", "") elif str(sys.argv[1]) == "linux_unsecured_with_tcp": build_linux_unsecured_with_tcp("true", "") build_linux_unsecured_with_tcp("false", "") elif str(sys.argv[1]) == "linux_secured_with_tcp": build_linux_secured_with_tcp("false", "") build_linux_secured_with_tcp("true", "") elif str(sys.argv[1]) == "android": build_android("true", "") build_android("false", "") elif str(sys.argv[1]) == "android_x86": build_android_x86("true", "") build_android_x86("false", "") build_android_x86_with_rm("true", "") build_android_x86_with_rm("false", "") elif str(sys.argv[1]) == "android_armeabi": build_android_armeabi("true", "") build_android_armeabi("false", "") build_android_armeabi_with_rm("true", "") build_android_armeabi_with_rm("false", "") elif str(sys.argv[1]) == "arduino": build_arduino("true", "") build_arduino("false", "") elif str(sys.argv[1]) == "windows": build_windows("true", "") build_windows("false", "") elif str(sys.argv[1]) == "msys": build_msys("true", "") build_msys("false", "") elif str(sys.argv[1]) == "tizen": build_tizen("true", "") build_tizen("false", "") elif str(sys.argv[1]) == "simulator": build_simulator("true", "") build_simulator("false", "") elif str(sys.argv[1]) == "darwin": build_darwin("true", "") build_darwin("false", "") elif str(sys.argv[1]) == "unit_tests": unit_tests() else: helpmsg(script_name) else: helpmsg(script_name) print ("===================== done =====================")
9,491
2,040
<filename>tests/profiles/cyclic/test_spec.json [ { "id": "default_output", "method": "cmp_with_file_contents", "command": "{{pipdeptree}}", "expected_output_file": "default.out", "expected_err_file": "default.err", "expected_returncode": 0 }, { "id": "warning_silenced", "method": "cmp_with_file_contents", "command": "{{pipdeptree}} -w silence", "expected_output_file": "default.out", "expected_err_file": null, "expected_returncode": 0 }, { "id": "fail_if_cyclic", "method": "cmp_with_file_contents", "command": "{{pipdeptree}} -w fail", "expected_output_file": "default.out", "expected_err_file": "default.err", "expected_returncode": 1 } ]
310
2,568
{ "StatusCake": { "domain": "statuscake.com", "tfa": [ "sms", "totp" ], "documentation": "https://www.statuscake.com/kb/knowledge-base/how-to-change-the-statuscake-password-for-login/", "keywords": [ "developer" ] } }
128
320
#include "File.hh" #include "Filename.hh" #include "LocalFile.hh" #include "GZFileAdapter.hh" #include "ZipFileAdapter.hh" #include "checked_cast.hh" #include <cstring> #include <memory> namespace openmsx { File::File() = default; [[nodiscard]] static std::unique_ptr<FileBase> init(std::string filename, File::OpenMode mode) { static constexpr uint8_t GZ_HEADER[3] = { 0x1F, 0x8B, 0x08 }; static constexpr uint8_t ZIP_HEADER[4] = { 0x50, 0x4B, 0x03, 0x04 }; std::unique_ptr<FileBase> file = std::make_unique<LocalFile>(std::move(filename), mode); if (file->getSize() >= 4) { uint8_t buf[4]; file->read(buf, 4); file->seek(0); if (memcmp(buf, GZ_HEADER, 3) == 0) { file = std::make_unique<GZFileAdapter>(std::move(file)); } else if (memcmp(buf, ZIP_HEADER, 4) == 0) { file = std::make_unique<ZipFileAdapter>(std::move(file)); } else { // only pre-cache non-compressed files if (mode == File::PRE_CACHE) { checked_cast<LocalFile*>(file.get())->preCacheFile(); } } } return file; } File::File(std::string filename, OpenMode mode) : file(init(std::move(filename), mode)) { } File::File(const Filename& filename, OpenMode mode) : File(filename.getResolved(), mode) { } File::File(Filename&& filename, OpenMode mode) : File(std::move(filename).getResolved(), mode) { } File::File(std::string filename, const char* mode) : file(std::make_unique<LocalFile>(std::move(filename), mode)) { } File::File(const Filename& filename, const char* mode) : File(filename.getResolved(), mode) { } File::File(Filename&& filename, const char* mode) : File(std::move(filename).getResolved(), mode) { } File::File(File&& other) noexcept : file(std::move(other.file)) { } File::File(std::unique_ptr<FileBase> file_) : file(std::move(file_)) { } File::~File() = default; File& File::operator=(File&& other) noexcept { file = std::move(other.file); return *this; } void File::close() { file.reset(); } void File::read(void* buffer, size_t num) { file->read(buffer, num); } void File::write(const void* buffer, size_t num) { file->write(buffer, num); } span<const uint8_t> File::mmap() { return file->mmap(); } void File::munmap() { file->munmap(); } size_t File::getSize() { return file->getSize(); } void File::seek(size_t pos) { file->seek(pos); } size_t File::getPos() { return file->getPos(); } void File::truncate(size_t size) { return file->truncate(size); } void File::flush() { file->flush(); } const std::string& File::getURL() const { return file->getURL(); } std::string File::getLocalReference() const { return file->getLocalReference(); } std::string_view File::getOriginalName() { std::string_view orig = file->getOriginalName(); return !orig.empty() ? orig : getURL(); } bool File::isReadOnly() const { return file->isReadOnly(); } time_t File::getModificationDate() { return file->getModificationDate(); } } // namespace openmsx
1,144
5,813
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.apache.druid.indexing.overlord.supervisor.autoscaler; public class LagStats { private final long maxLag; private final long totalLag; private final long avgLag; public LagStats(long maxLag, long totalLag, long avgLag) { this.maxLag = maxLag; this.totalLag = totalLag; this.avgLag = avgLag; } public long getMaxLag() { return maxLag; } public long getTotalLag() { return totalLag; } public long getAvgLag() { return avgLag; } }
400
1,799
package io.cucumber.core.options; import io.cucumber.core.options.CucumberProperties.CucumberPropertiesMap; import org.junit.jupiter.api.Test; import java.util.Collections; import java.util.Map; import static org.hamcrest.MatcherAssert.assertThat; import static org.hamcrest.core.Is.is; import static org.hamcrest.core.IsEqual.equalTo; import static org.hamcrest.core.IsNull.notNullValue; import static org.hamcrest.core.IsNull.nullValue; class CucumberPropertiesTest { @Test void looks_up_value_from_environment() { assertThat(CucumberProperties.fromEnvironment().get("PATH"), is(notNullValue())); } @Test void returns_null_for_absent_key() { CucumberPropertiesMap properties = new CucumberPropertiesMap(Collections.emptyMap()); assertThat(properties.get("pxfj54#"), is(nullValue())); } @Test void returns_default_for_absent_key() { CucumberPropertiesMap properties = new CucumberPropertiesMap(Collections.emptyMap()); assertThat(properties.getOrDefault("pxfj54#", "default"), is("default")); } @Test void looks_up_dotted_value_from_resource_bundle_with_dots() { Map<String, String> delegate = Collections.singletonMap("a.b", "a.b"); CucumberPropertiesMap properties = new CucumberPropertiesMap(delegate); assertThat(properties.get("a.b"), is(equalTo("a.b"))); } @Test void looks_up_underscored_value_from_resource_bundle_with_dots() { Map<String, String> delegate = Collections.singletonMap("B_C", "B_C"); CucumberPropertiesMap properties = new CucumberPropertiesMap(delegate); assertThat(properties.get("b.c"), is(equalTo("B_C"))); } @Test void looks_up_underscored_value_from_resource_bundle_with_underscores() { Map<String, String> delegate = Collections.singletonMap("B_C", "B_C"); CucumberPropertiesMap properties = new CucumberPropertiesMap(delegate); assertThat(properties.get("B_C"), is(equalTo("B_C"))); } @Test void looks_up_value_by_exact_case_key() { Map<String, String> delegate = Collections.singletonMap("c.D", "C_D"); CucumberPropertiesMap properties = new CucumberPropertiesMap(delegate); assertThat(properties.get("c.D"), is(equalTo("C_D"))); } }
898
4,269
<reponame>FredericDesgreniers/ribbon<filename>ribbon-eureka/src/test/java/com/netflix/niws/loadbalancer/LoadBalancerTestUtils.java package com.netflix.niws.loadbalancer; import com.netflix.appinfo.DataCenterInfo; import com.netflix.appinfo.InstanceInfo; import com.netflix.appinfo.MyDataCenterInfo; import com.netflix.discovery.DefaultEurekaClientConfig; import com.netflix.discovery.DiscoveryClient; import com.netflix.discovery.DiscoveryManager; import java.util.ArrayList; import java.util.List; import static org.easymock.EasyMock.expect; import static org.powermock.api.easymock.PowerMock.createMock; public class LoadBalancerTestUtils { static List<InstanceInfo> getDummyInstanceInfo(String appName, String host, String ipAddr, int port){ List<InstanceInfo> list = new ArrayList<InstanceInfo>(); InstanceInfo info = InstanceInfo.Builder.newBuilder().setAppName(appName) .setHostName(host) .setIPAddr(ipAddr) .setPort(port) .setDataCenterInfo(new MyDataCenterInfo(DataCenterInfo.Name.MyOwn)) .build(); list.add(info); return list; } static DiscoveryClient mockDiscoveryClient() { DiscoveryClient mockedDiscoveryClient = createMock(DiscoveryClient.class); expect(mockedDiscoveryClient.getEurekaClientConfig()).andReturn(new DefaultEurekaClientConfig()).anyTimes(); return mockedDiscoveryClient; } }
556
14,668
<reponame>zealoussnow/chromium<filename>ui/ozone/platform/x11/x11_canvas_surface.cc // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "ui/ozone/platform/x11/x11_canvas_surface.h" #include "base/bind.h" #include "third_party/skia/include/core/SkCanvas.h" #include "third_party/skia/include/core/SkImageInfo.h" #include "ui/base/x/x11_display_util.h" #include "ui/base/x/x11_xrandr_interval_only_vsync_provider.h" #include "ui/gfx/vsync_provider.h" #include "ui/gfx/x/connection.h" namespace ui { X11CanvasSurface::X11CanvasSurface(gfx::AcceleratedWidget widget) : x11_software_bitmap_presenter_(x11::Connection::Get(), widget, true) {} X11CanvasSurface::~X11CanvasSurface() = default; SkCanvas* X11CanvasSurface::GetCanvas() { return x11_software_bitmap_presenter_.GetSkCanvas(); } void X11CanvasSurface::ResizeCanvas(const gfx::Size& viewport_size, float scale) { x11_software_bitmap_presenter_.Resize(viewport_size); } void X11CanvasSurface::PresentCanvas(const gfx::Rect& damage) { x11_software_bitmap_presenter_.EndPaint(damage); } std::unique_ptr<gfx::VSyncProvider> X11CanvasSurface::CreateVSyncProvider() { return std::make_unique<XrandrIntervalOnlyVSyncProvider>(); } bool X11CanvasSurface::SupportsAsyncBufferSwap() const { return true; } void X11CanvasSurface::OnSwapBuffers(SwapBuffersCallback swap_ack_callback) { x11_software_bitmap_presenter_.OnSwapBuffers(std::move(swap_ack_callback)); } int X11CanvasSurface::MaxFramesPending() const { return x11_software_bitmap_presenter_.MaxFramesPending(); } } // namespace ui
669
1,056
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. */ package org.netbeans.modules.web.jsf.navigation.graph.actions; import java.awt.event.ActionEvent; import java.io.IOException; import javax.swing.AbstractAction; import javax.swing.Action; import javax.swing.JOptionPane; import org.netbeans.modules.web.jsf.navigation.PageFlowController; import org.netbeans.modules.web.jsf.navigation.graph.PageFlowScene; import org.openide.filesystems.FileObject; import org.openide.filesystems.FileUtil; import org.openide.loaders.DataFolder; import org.openide.loaders.DataObject; import org.openide.util.ContextAwareAction; import org.openide.util.Exceptions; import org.openide.util.Lookup; import org.openide.util.NbBundle; /** * * @author joelle */ public class AddPageActionProvider extends AbstractAction implements ContextAwareAction { public AddPageActionProvider() { super(); } public Action createContextAwareInstance(Lookup lookup) { Action addPageAction; final PageFlowScene scene = lookup.lookup(PageFlowScene.class); if( scene != null) { setEnabled(true); addPageAction = new AddPageAction(scene); } else { setEnabled(false); addPageAction = null; } return addPageAction; } // @Override // public boolean isEnabled() { // return super.isEnabled(); // } public void actionPerformed(ActionEvent arg0) { throw new UnsupportedOperationException("Not supported yet."); } private static final String LBL_AddPage = NbBundle.getMessage(AddPageActionProvider.class, "LBL_AddPage"); private class AddPageAction extends AbstractAction { private final PageFlowScene scene; /** Creates a new instance of OpenPageAction * @param scene */ public AddPageAction(PageFlowScene scene) { super(); putValue(NAME, LBL_AddPage); this.scene = scene; } /** * * @return The Display Name of this option. */ protected String getDisplayName() { return LBL_AddPage; } public void actionPerformed(ActionEvent e) { try { final PageFlowController pfc = scene.getPageFlowView().getPageFlowController(); final FileObject webFileObject = pfc.getWebFolder(); String name = FileUtil.findFreeFileName(webFileObject, "page", "jsp"); name = JOptionPane.showInputDialog("Select Page Name", name); createIndexJSP(webFileObject, name); } catch (IOException ex) { Exceptions.printStackTrace(ex); } // } } private void createIndexJSP(FileObject targetFolder, String name ) throws IOException { final FileObject jspTemplate = FileUtil.getConfigFile( "Templates/JSP_Servlet/JSP.jsp" ); // NOI18N if (jspTemplate == null) { return; // Don't know the template } final DataObject mt = DataObject.find(jspTemplate); final DataFolder webDf = DataFolder.findFolder(targetFolder); mt.createFromTemplate(webDf, name); // NOI18N } } }
1,726
14,668
<filename>components/web_package/web_bundle_parser_factory.cc // Copyright 2019 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. #include "components/web_package/web_bundle_parser_factory.h" #include "base/callback_helpers.h" #include "components/web_package/web_bundle_parser.h" #include "mojo/public/cpp/bindings/remote.h" #include "net/http/http_util.h" namespace web_package { namespace { class FileDataSource final : public mojom::BundleDataSource { public: FileDataSource(mojo::PendingReceiver<mojom::BundleDataSource> receiver, base::File file) : receiver_(this, std::move(receiver)), file_(std::move(file)) { receiver_.set_disconnect_handler(base::BindOnce( &base::DeletePointer<FileDataSource>, base::Unretained(this))); } FileDataSource(const FileDataSource&) = delete; FileDataSource& operator=(const FileDataSource&) = delete; private: // Implements mojom::BundleDataSource. void Read(uint64_t offset, uint64_t length, ReadCallback callback) override { std::vector<uint8_t> buf(length); int bytes = file_.Read(offset, reinterpret_cast<char*>(buf.data()), length); if (bytes > 0) { buf.resize(bytes); std::move(callback).Run(std::move(buf)); } else { std::move(callback).Run(absl::nullopt); } } mojo::Receiver<mojom::BundleDataSource> receiver_; base::File file_; }; } // namespace WebBundleParserFactory::WebBundleParserFactory() = default; WebBundleParserFactory::~WebBundleParserFactory() = default; std::unique_ptr<mojom::BundleDataSource> WebBundleParserFactory::CreateFileDataSourceForTesting( mojo::PendingReceiver<mojom::BundleDataSource> receiver, base::File file) { return std::make_unique<FileDataSource>(std::move(receiver), std::move(file)); } void WebBundleParserFactory::GetParserForFile( mojo::PendingReceiver<mojom::WebBundleParser> receiver, base::File file) { mojo::PendingRemote<mojom::BundleDataSource> remote_data_source; auto data_source = std::make_unique<FileDataSource>( remote_data_source.InitWithNewPipeAndPassReceiver(), std::move(file)); GetParserForDataSource(std::move(receiver), std::move(remote_data_source)); // |data_source| will be destructed on |remote_data_source| destruction. data_source.release(); } void WebBundleParserFactory::GetParserForDataSource( mojo::PendingReceiver<mojom::WebBundleParser> receiver, mojo::PendingRemote<mojom::BundleDataSource> data_source) { // TODO(crbug.com/1247939): WebBundleParserFactory doesn't support |base_url|. // For features::kWebBundlesFromNetwork should support |base_url|. auto parser = std::make_unique<WebBundleParser>(std::move(receiver), std::move(data_source), /*base_url=*/GURL()); // |parser| will be destructed on remote mojo ends' disconnection. parser.release(); } } // namespace web_package
1,132
2,228
<filename>src/test/java/org/springframework/data/jpa/repository/support/JpaPersistableEntityInformationUnitTests.java<gh_stars>1000+ /* * Copyright 2011-2021 the original author or authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.springframework.data.jpa.repository.support; import static org.assertj.core.api.Assertions.*; import static org.mockito.Mockito.*; import javax.persistence.metamodel.EntityType; import javax.persistence.metamodel.Metamodel; import javax.persistence.metamodel.Type; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; import org.springframework.data.domain.Persistable; import org.springframework.data.repository.core.EntityInformation; /** * Unit tests for {@link JpaPersistableEntityInformation}. * * @author <NAME> * @author <NAME> */ @ExtendWith(MockitoExtension.class) @MockitoSettings(strictness = Strictness.LENIENT) class JpaPersistableEntityInformationUnitTests { @Mock Metamodel metamodel; @Mock EntityType<Foo> type; @Mock @SuppressWarnings("rawtypes") Type idType; @BeforeEach @SuppressWarnings("unchecked") void setUp() { when(metamodel.managedType(Object.class)).thenThrow(IllegalArgumentException.class); when(metamodel.managedType(Foo.class)).thenReturn(type); when(type.getIdType()).thenReturn(idType); } @Test void usesPersistableMethodsForIsNewAndGetId() { EntityInformation<Foo, Long> entityInformation = new JpaPersistableEntityInformation<Foo, Long>(Foo.class, metamodel); Foo foo = new Foo(); assertThat(entityInformation.isNew(foo)).isFalse(); assertThat(entityInformation.getId(foo)).isNull(); foo.id = 1L; assertThat(entityInformation.isNew(foo)).isTrue(); assertThat(entityInformation.getId(foo)).isEqualTo(1L); } @SuppressWarnings("serial") class Foo implements Persistable<Long> { Long id; @Override public Long getId() { return id; } @Override public boolean isNew() { return id != null; } } }
925
1,736
<gh_stars>1000+ /* Copyright (c) 2005-2021 Intel Corporation Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ // Example program that computes number of prime numbers up to n, // where n is a command line argument. The algorithm here is a // fairly efficient version of the sieve of Eratosthenes. // The parallel version demonstrates how to use parallel_reduce, // and in particular how to exploit lazy splitting. #include <cassert> #include <cstdio> #include <cstring> #include <cmath> #include <cstdlib> #include <cctype> #include <algorithm> #include "oneapi/tbb/parallel_reduce.h" #include "oneapi/tbb/global_control.h" #include "primes.hpp" //! If true, then print primes on stdout. static bool printPrimes = false; class Multiples { inline NumberType strike(NumberType start, NumberType limit, NumberType stride) { // Hoist "my_is_composite" into register for sake of speed. bool* is_composite = my_is_composite; assert(stride >= 2); for (; start < limit; start += stride) is_composite[start] = true; return start; } //! Window into conceptual sieve bool* my_is_composite; //! Indexes into window /** my_striker[k] is an index into my_composite corresponding to an odd multiple multiple of my_factor[k]. */ NumberType* my_striker; //! Prime numbers less than m. NumberType* my_factor; public: //! NumberType of factors in my_factor. NumberType n_factor; NumberType m; Multiples(NumberType n) { m = NumberType(sqrt(double(n))); // Round up to even m += m & 1; my_is_composite = new bool[m / 2]; my_striker = new NumberType[m / 2]; my_factor = new NumberType[m / 2]; n_factor = 0; memset(my_is_composite, 0, m / 2); for (NumberType i = 3; i < m; i += 2) { if (!my_is_composite[i / 2]) { if (printPrimes) printf("%d\n", (int)i); my_striker[n_factor] = strike(i / 2, m / 2, i); my_factor[n_factor++] = i; } } } //! Find primes in range [start,window_size), advancing my_striker as we go. /** Returns number of primes found. */ NumberType find_primes_in_window(NumberType start, NumberType window_size) { bool* is_composite = my_is_composite; memset(is_composite, 0, window_size / 2); for (std::size_t k = 0; k < n_factor; ++k) my_striker[k] = strike(my_striker[k] - m / 2, window_size / 2, my_factor[k]); NumberType count = 0; for (NumberType k = 0; k < window_size / 2; ++k) { if (!is_composite[k]) { if (printPrimes) printf("%ld\n", long(start + 2 * k + 1)); ++count; } } return count; } ~Multiples() { delete[] my_factor; delete[] my_striker; delete[] my_is_composite; } //------------------------------------------------------------------------ // Begin extra members required by parallel version //------------------------------------------------------------------------ // Splitting constructor Multiples(const Multiples& f, oneapi::tbb::split) : n_factor(f.n_factor), m(f.m), my_is_composite(nullptr), my_striker(nullptr), my_factor(f.my_factor) {} bool is_initialized() const { return my_is_composite != nullptr; } void initialize(NumberType start) { assert(start >= 1); my_is_composite = new bool[m / 2]; my_striker = new NumberType[m / 2]; for (std::size_t k = 0; k < n_factor; ++k) { NumberType f = my_factor[k]; NumberType p = (start - 1) / f * f % m; my_striker[k] = (p & 1 ? p + 2 * f : p + f) / 2; assert(m / 2 <= my_striker[k]); } } // Move other to *this. void move(Multiples& other) { // The swap moves the contents of other to *this and causes the old contents // of *this to be deleted later when other is destroyed. std::swap(my_striker, other.my_striker); std::swap(my_is_composite, other.my_is_composite); // other.my_factor is a shared pointer that was copied by the splitting constructor. // Set it to nullptr to prevent premature deletion by the destructor of ~other. assert(my_factor == other.my_factor); other.my_factor = nullptr; } //------------------------------------------------------------------------ // End extra methods required by parallel version //------------------------------------------------------------------------ }; //! Count number of primes between 0 and n /** This is the serial version. */ NumberType SerialCountPrimes(NumberType n) { // Two is special case NumberType count = n >= 2; if (n >= 3) { Multiples multiples(n); count += multiples.n_factor; if (printPrimes) printf("---\n"); NumberType window_size = multiples.m; for (NumberType j = multiples.m; j <= n; j += window_size) { if (j + window_size > n + 1) window_size = n + 1 - j; count += multiples.find_primes_in_window(j, window_size); } } return count; } //! Range of a sieve window. class SieveRange { //! Width of full-size window into sieve. const NumberType my_stride; //! Always multiple of my_stride NumberType my_begin; //! One past last number in window. NumberType my_end; //! Width above which it is worth forking. const NumberType my_grainsize; bool assert_okay() const { assert(my_begin % my_stride == 0); assert(my_begin <= my_end); assert(my_stride <= my_grainsize); return true; } public: //------------------------------------------------------------------------ // Begin signatures required by parallel_reduce //------------------------------------------------------------------------ bool is_divisible() const { return my_end - my_begin > my_grainsize; } bool empty() const { return my_end <= my_begin; } SieveRange(SieveRange& r, oneapi::tbb::split) : my_stride(r.my_stride), my_grainsize(r.my_grainsize), my_end(r.my_end) { assert(r.is_divisible()); assert(r.assert_okay()); NumberType middle = r.my_begin + (r.my_end - r.my_begin + r.my_stride - 1) / 2; middle = middle / my_stride * my_stride; my_begin = middle; r.my_end = middle; assert(assert_okay()); assert(r.assert_okay()); } //------------------------------------------------------------------------ // End of signatures required by parallel_reduce //------------------------------------------------------------------------ NumberType begin() const { return my_begin; } NumberType end() const { return my_end; } SieveRange(NumberType begin, NumberType end, NumberType stride, NumberType grainsize) : my_begin(begin), my_end(end), my_stride(stride), my_grainsize(grainsize < stride ? stride : grainsize) { assert(assert_okay()); } }; //! Loop body for parallel_reduce. /** parallel_reduce splits the sieve into subsieves. Each subsieve handles a subrange of [0..n]. */ class Sieve { public: //! Prime Multiples to consider, and working storage for this subsieve. ::Multiples multiples; //! NumberType of primes found so far by this subsieve. NumberType count; //! Construct Sieve for counting primes in [0..n]. Sieve(NumberType n) : multiples(n), count(0) {} //------------------------------------------------------------------------ // Begin signatures required by parallel_reduce //------------------------------------------------------------------------ void operator()(const SieveRange& r) { NumberType m = multiples.m; if (multiples.is_initialized()) { // Simply reuse "Multiples" structure from previous window // This works because parallel_reduce always applies // *this from left to right. } else { // Need to initialize "Multiples" because *this is a forked copy // that needs to be set up to start at r.begin(). multiples.initialize(r.begin()); } NumberType window_size = m; for (NumberType j = r.begin(); j < r.end(); j += window_size) { assert(j % multiples.m == 0); if (j + window_size > r.end()) window_size = r.end() - j; count += multiples.find_primes_in_window(j, window_size); } } void join(Sieve& other) { count += other.count; // Final value of multiples needs to final value of other multiples, // so that *this can correctly process next window to right. multiples.move(other.multiples); } Sieve(Sieve& other, oneapi::tbb::split) : multiples(other.multiples, oneapi::tbb::split()), count(0) {} //------------------------------------------------------------------------ // End of signatures required by parallel_reduce //------------------------------------------------------------------------ }; //! Count number of primes between 0 and n /** This is the parallel version. */ NumberType ParallelCountPrimes(NumberType n, int number_of_threads, NumberType grain_size) { oneapi::tbb::global_control c(oneapi::tbb::global_control::max_allowed_parallelism, number_of_threads); // Two is special case NumberType count = n >= 2; if (n >= 3) { Sieve s(n); count += s.multiples.n_factor; if (printPrimes) printf("---\n"); // Explicit grain size and simple_partitioner() used here instead of automatic grainsize // determination because we want SieveRange to be decomposed down to grainSize or smaller. // Doing so improves odds that the working set fits in cache when evaluating Sieve::operator(). oneapi::tbb::parallel_reduce(SieveRange(s.multiples.m, n, s.multiples.m, grain_size), s, oneapi::tbb::simple_partitioner()); count += s.count; } return count; }
4,452
2,757
/* $NetBSD: float.h,v 1.6 2005/12/11 12:16:47 christos Exp $ */ #ifndef _ARM_FLOAT_H_ #define _ARM_FLOAT_H_ #ifndef __VFP_FP__ #define LDBL_MANT_DIG 64 #define LDBL_EPSILON 1.0842021724855044340E-19L #define LDBL_DIG 18 #define LDBL_MIN_EXP (-16381) #define LDBL_MIN 1.6810515715560467531E-4932L #define LDBL_MIN_10_EXP (-4931) #define LDBL_MAX_EXP 16384 #define LDBL_MAX 1.1897314953572317650E+4932L #define LDBL_MAX_10_EXP 4932 #endif #include <sys/float_ieee754.h> #ifndef __VFP_FP__ #if !defined(_ANSI_SOURCE) && !defined(_POSIX_C_SOURCE) && \ !defined(_XOPEN_SOURCE) || \ ((__STDC_VERSION__ - 0) >= 199901L) || \ ((_POSIX_C_SOURCE - 0) >= 200112L) || \ ((_XOPEN_SOURCE - 0) >= 600) || \ defined(_ISOC99_SOURCE) || defined(_NETBSD_SOURCE) #define DECIMAL_DIG 21 #endif /* !defined(_ANSI_SOURCE) && ... */ #endif /* !__VFP_FP__ */ #endif /* !_ARM_FLOAT_H_ */
459
478
<reponame>Illbatting/AdaptiveCards // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. #pragma once #include "AdaptiveInputLabelConfig.g.h" namespace winrt::AdaptiveCards::Rendering::Uwp::implementation { struct AdaptiveInputLabelConfig : AdaptiveInputLabelConfigT<AdaptiveInputLabelConfig> { AdaptiveInputLabelConfig(::AdaptiveCards::InputLabelConfig const& inputLabelConfig = {}); property<winrt::ForegroundColor> Color; property<bool> IsSubtle; property<winrt::TextSize> Size; property<hstring> Suffix; property<winrt::TextWeight> Weight; }; } namespace winrt::AdaptiveCards::Rendering::Uwp::factory_implementation { struct AdaptiveInputLabelConfig : AdaptiveInputLabelConfigT<AdaptiveInputLabelConfig, implementation::AdaptiveInputLabelConfig> { }; }
303
319
<reponame>Celebrate-future/openimaj<filename>core/core/src/main/java/org/openimaj/util/queue/InvertedPriorityQueue.java<gh_stars>100-1000 /** * Copyright (c) 2011, The University of Southampton and the individual contributors. * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * * Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * * Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * * Neither the name of the University of Southampton nor the names of its * contributors may be used to endorse or promote products derived from this * software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.openimaj.util.queue; import java.util.Comparator; import java.util.PriorityQueue; /** * This class provides an inverted {@link PriorityQueue} implementation, where * objects that are higher (according to the provided {@link Comparator} or the * natural order) come first. * <p> * The Iterator provided in method {@link #iterator()} is <em>not</em> * guaranteed to traverse the elements of the priority queue in any particular * order. * * @author <NAME> (<EMAIL>) * * @param <T> * Type of objects stored in the queue */ public class InvertedPriorityQueue<T> extends PriorityQueue<T> { private static final long serialVersionUID = 1L; private static final int DEFAULT_INITIAL_CAPACITY = 11; /** * Creates a {@code InvertedPriorityQueue} with the default initial capacity * (11) that orders its elements according to their inverted * {@linkplain Comparable natural ordering}. */ public InvertedPriorityQueue() { super(DEFAULT_INITIAL_CAPACITY, InvertedComparableComparator.INSTANCE); } /** * Creates a {@code InvertedPriorityQueue} with the specified initial * capacity that orders its elements according to the inverse of the * specified comparator. * * @param initialCapacity * the initial capacity for this priority queue * @param comparator * the comparator that will be used to order this priority queue. * If {@code null}, the {@linkplain Comparable natural ordering} * of the elements will be used. Internally, the comparator is * inverted to reverse its meaning. * @throws IllegalArgumentException * if {@code initialCapacity} is less than 1 */ @SuppressWarnings("unchecked") public InvertedPriorityQueue(int initialCapacity, Comparator<? super T> comparator) { super(initialCapacity, comparator == null ? (Comparator<T>) InvertedComparableComparator.INSTANCE : new InvertedComparator<T>(comparator)); } /** * Creates a {@code InvertedPriorityQueue} with the specified initial * capacity that orders its elements according to their inverse * {@linkplain Comparable natural ordering}. * * @param initialCapacity * the initial capacity for this priority queue * @throws IllegalArgumentException * if {@code initialCapacity} is less than 1 */ public InvertedPriorityQueue(int initialCapacity) { super(initialCapacity, InvertedComparableComparator.INSTANCE); } protected Comparator<? super T> originalComparator() { if (comparator() instanceof InvertedComparator) return ((InvertedComparator<? super T>) comparator()).innerComparator; else return ComparableComparator.INSTANCE; } /** * Inverted natural order comparator. * * @author <NAME> (j<EMAIL>) */ private static class InvertedComparableComparator implements Comparator<Object> { public static final InvertedComparableComparator INSTANCE = new InvertedComparableComparator(); @Override @SuppressWarnings("unchecked") public int compare(Object o1, Object o2) { return ((Comparable<Object>) o2).compareTo(o1); } } /** * Natural order comparator. * * @author <NAME> (<EMAIL>) */ private static class ComparableComparator implements Comparator<Object> { public static final ComparableComparator INSTANCE = new ComparableComparator(); @Override @SuppressWarnings("unchecked") public int compare(Object o1, Object o2) { return ((Comparable<Object>) o1).compareTo(o2); } } /** * Inverted natural order comparator. * * @author <NAME> (<EMAIL>) */ private static class InvertedComparator<T> implements Comparator<T> { protected Comparator<? super T> innerComparator; public InvertedComparator(Comparator<? super T> innerComparator) { this.innerComparator = innerComparator; } @Override public int compare(T o1, T o2) { return innerComparator.compare(o2, o1); } } }
1,767
341
package auth.service; import auth.dto.AuthDto; import auth.entity.User; import edu.fudan.common.util.Response; import org.springframework.http.HttpHeaders; import java.util.List; import java.util.UUID; /** * @author fdse */ public interface UserService { /** * save user * * @param user user * @return user */ User saveUser(User user); /** * get all users * * @param headers headers * @return List<User> */ List<User> getAllUser(HttpHeaders headers); /** * create default auth user * * @param dto dto * @return user */ User createDefaultAuthUser(AuthDto dto); /** * delete by user id * * @param userId user id * @param headers headers * @return Response */ Response deleteByUserId(UUID userId, HttpHeaders headers); }
356
509
/* * Copyright (C)2016 - SMBJ Contributors * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.hierynomus.mssmb2; import com.hierynomus.protocol.commons.EnumWithValue; /** * [MS-SMB2].pdf 2.2.14 SMB2 CREATE Response- CreateAction * <p> * The action taken in establishing the open. This field MUST contain one of * the following values. */ public enum SMB2CreateAction implements EnumWithValue<SMB2CreateAction> { /** * An existing file was deleted and a new file was created in its place. */ FILE_SUPERSEDED(0x00000000L), /** * An existing file was opened. */ FILE_OPENED(0x00000001L), /** * A new file was created. */ FILE_CREATED(0x00000002L), /** * An existing file was overwritten */ FILE_OVERWRITTEN(0x00000003L); private long value; SMB2CreateAction(long value) { this.value = value; } public long getValue() { return value; } }
509
374
<reponame>mattdsteele/pibooth # -*- coding: utf-8 -*- from pibooth.utils import LOGGER from pibooth.camera.rpi import RpiCamera, get_rpi_camera_proxy from pibooth.camera.gphoto import GpCamera, get_gp_camera_proxy from pibooth.camera.opencv import CvCamera, get_cv_camera_proxy from pibooth.camera.hybrid import HybridRpiCamera, HybridCvCamera def close_proxy(rpi_cam_proxy, gp_cam_proxy, cv_cam_proxy): """Close proxy drivers. """ if rpi_cam_proxy: RpiCamera(rpi_cam_proxy).quit() if gp_cam_proxy: GpCamera(gp_cam_proxy).quit() if cv_cam_proxy: CvCamera(cv_cam_proxy).quit() def find_camera(): """Initialize the camera depending of the connected one. The priority order is chosen in order to have best rendering during preview and to take captures. The gPhoto2 camera is first (drivers most restrictive) to avoid connection concurence in case of DSLR compatible with OpenCV. """ rpi_cam_proxy = get_rpi_camera_proxy() gp_cam_proxy = get_gp_camera_proxy() cv_cam_proxy = get_cv_camera_proxy() if rpi_cam_proxy and gp_cam_proxy: LOGGER.info("Configuring hybrid camera (Picamera + gPhoto2) ...") close_proxy(None, None, cv_cam_proxy) return HybridRpiCamera(rpi_cam_proxy, gp_cam_proxy) elif cv_cam_proxy and gp_cam_proxy: LOGGER.info("Configuring hybrid camera (OpenCV + gPhoto2) ...") close_proxy(rpi_cam_proxy, None, None) return HybridCvCamera(cv_cam_proxy, gp_cam_proxy) elif gp_cam_proxy: LOGGER.info("Configuring gPhoto2 camera ...") close_proxy(rpi_cam_proxy, None, cv_cam_proxy) return GpCamera(gp_cam_proxy) elif rpi_cam_proxy: LOGGER.info("Configuring Picamera camera ...") close_proxy(None, gp_cam_proxy, cv_cam_proxy) return RpiCamera(rpi_cam_proxy) elif cv_cam_proxy: LOGGER.info("Configuring OpenCV camera ...") close_proxy(rpi_cam_proxy, gp_cam_proxy, None) return CvCamera(cv_cam_proxy) raise EnvironmentError("Neither Raspberry Pi nor GPhoto2 nor OpenCV camera detected")
856
350
<reponame>Eyalcohenx/tonic<filename>tonic/torch/models/encoders.py import torch class ObservationEncoder(torch.nn.Module): def initialize( self, observation_space, action_space=None, observation_normalizer=None, ): self.observation_normalizer = observation_normalizer observation_size = observation_space.shape[0] return observation_size def forward(self, observations): if self.observation_normalizer: observations = self.observation_normalizer(observations) return observations class ObservationActionEncoder(torch.nn.Module): def initialize( self, observation_space, action_space, observation_normalizer=None ): self.observation_normalizer = observation_normalizer observation_size = observation_space.shape[0] action_size = action_space.shape[0] return observation_size + action_size def forward(self, observations, actions): if self.observation_normalizer: observations = self.observation_normalizer(observations) return torch.cat([observations, actions], dim=-1)
424
412
public class Test { private static Test static_test = new Test(); private Test test = null; public Test() { } public int foo(int x) { if (x > 10) { return x; } else { return x * 10; } } public void obsolete() { } }
108
2,151
<filename>cmds/appwidget/src/com/android/commands/appwidget/AppWidget.java /* ** Copyright 2014, The Android Open Source Project ** ** Licensed under the Apache License, Version 2.0 (the "License"); ** you may not use this file except in compliance with the License. ** You may obtain a copy of the License at ** ** http://www.apache.org/licenses/LICENSE-2.0 ** ** Unless required by applicable law or agreed to in writing, software ** distributed under the License is distributed on an "AS IS" BASIS, ** WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. ** See the License for the specific language governing permissions and ** limitations under the License. */ package com.android.commands.appwidget; import android.content.Context; import android.os.IBinder; import android.os.RemoteException; import android.os.ServiceManager; import android.os.UserHandle; import android.text.TextUtils; import com.android.internal.appwidget.IAppWidgetService; /** * This class is a command line utility for manipulating app widgets. A client * can grant or revoke the permission for a given package to bind app widgets. */ public class AppWidget { private static final String USAGE = "usage: adb shell appwidget [subcommand] [options]\n" + "\n" + "usage: adb shell appwidget grantbind --package <PACKAGE> " + " [--user <USER_ID> | current]\n" + " <PACKAGE> an Android package name.\n" + " <USER_ID> The user id under which the package is installed.\n" + " Example:\n" + " # Grant the \"foo.bar.baz\" package to bind app widgets for the current user.\n" + " adb shell grantbind --package foo.bar.baz --user current\n" + "\n" + "usage: adb shell appwidget revokebind --package <PACKAGE> " + "[--user <USER_ID> | current]\n" + " <PACKAGE> an Android package name.\n" + " <USER_ID> The user id under which the package is installed.\n" + " Example:\n" + " # Revoke the permisison to bind app widgets from the \"foo.bar.baz\" package.\n" + " adb shell revokebind --package foo.bar.baz --user current\n" + "\n"; private static class Parser { private static final String ARGUMENT_GRANT_BIND = "grantbind"; private static final String ARGUMENT_REVOKE_BIND = "revokebind"; private static final String ARGUMENT_PACKAGE = "--package"; private static final String ARGUMENT_USER = "--user"; private static final String ARGUMENT_PREFIX = "--"; private static final String VALUE_USER_CURRENT = "current"; private final Tokenizer mTokenizer; public Parser(String[] args) { mTokenizer = new Tokenizer(args); } public Runnable parseCommand() { try { String operation = mTokenizer.nextArg(); if (ARGUMENT_GRANT_BIND.equals(operation)) { return parseSetGrantBindAppWidgetPermissionCommand(true); } else if (ARGUMENT_REVOKE_BIND.equals(operation)) { return parseSetGrantBindAppWidgetPermissionCommand(false); } else { throw new IllegalArgumentException("Unsupported operation: " + operation); } } catch (IllegalArgumentException iae) { System.out.println(USAGE); System.out.println("[ERROR] " + iae.getMessage()); return null; } } private SetBindAppWidgetPermissionCommand parseSetGrantBindAppWidgetPermissionCommand( boolean granted) { String packageName = null; int userId = UserHandle.USER_SYSTEM; for (String argument; (argument = mTokenizer.nextArg()) != null;) { if (ARGUMENT_PACKAGE.equals(argument)) { packageName = argumentValueRequired(argument); } else if (ARGUMENT_USER.equals(argument)) { String user = argumentValueRequired(argument); if (VALUE_USER_CURRENT.equals(user)) { userId = UserHandle.USER_CURRENT; } else { userId = Integer.parseInt(user); } } else { throw new IllegalArgumentException("Unsupported argument: " + argument); } } if (packageName == null) { throw new IllegalArgumentException("Package name not specified." + " Did you specify --package argument?"); } return new SetBindAppWidgetPermissionCommand(packageName, granted, userId); } private String argumentValueRequired(String argument) { String value = mTokenizer.nextArg(); if (TextUtils.isEmpty(value) || value.startsWith(ARGUMENT_PREFIX)) { throw new IllegalArgumentException("No value for argument: " + argument); } return value; } } private static class Tokenizer { private final String[] mArgs; private int mNextArg; public Tokenizer(String[] args) { mArgs = args; } private String nextArg() { if (mNextArg < mArgs.length) { return mArgs[mNextArg++]; } else { return null; } } } private static class SetBindAppWidgetPermissionCommand implements Runnable { final String mPackageName; final boolean mGranted; final int mUserId; public SetBindAppWidgetPermissionCommand(String packageName, boolean granted, int userId) { mPackageName = packageName; mGranted = granted; mUserId = userId; } @Override public void run() { IBinder binder = ServiceManager.getService(Context.APPWIDGET_SERVICE); IAppWidgetService appWidgetService = IAppWidgetService.Stub.asInterface(binder); try { appWidgetService.setBindAppWidgetPermission(mPackageName, mUserId, mGranted); } catch (RemoteException re) { re.printStackTrace(); } } } public static void main(String[] args) { Parser parser = new Parser(args); Runnable command = parser.parseCommand(); if (command != null) { command.run(); } } }
2,835
3,045
/*- * Copyright (c) 2020 Varnish Software AS * All rights reserved. * * Author: <NAME> <<EMAIL>> * * SPDX-License-Identifier: BSD-2-Clause * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * */ #include "config.h" #include <sys/types.h> #include <stdlib.h> #include <string.h> #include "vtc.h" #include "vtc_http.h" struct thread_arg { unsigned magic; #define THREAD_ARG_MAGIC 0xd5dc5f1c void *priv; sess_conn_f *conn_f; sess_disc_f *disc_f; const char *listen_addr; struct vtc_sess *vsp; int *asocket; const char *spec; }; struct vtc_sess * Sess_New(struct vtclog *vl, const char *name) { struct vtc_sess *vsp; ALLOC_OBJ(vsp, VTC_SESS_MAGIC); AN(vsp); vsp->vl = vl; REPLACE(vsp->name, name); vsp->repeat = 1; return (vsp); } void Sess_Destroy(struct vtc_sess **vspp) { struct vtc_sess *vsp; TAKE_OBJ_NOTNULL(vsp, vspp, VTC_SESS_MAGIC); REPLACE(vsp->name, NULL); FREE_OBJ(vsp); } int Sess_GetOpt(struct vtc_sess *vsp, char * const **avp) { char * const *av; int rv = 0; CHECK_OBJ_NOTNULL(vsp, VTC_SESS_MAGIC); AN(avp); av = *avp; AN(*av); if (!strcmp(*av, "-rcvbuf")) { AN(av[1]); vsp->rcvbuf = atoi(av[1]); av += 1; rv = 1; } else if (!strcmp(*av, "-repeat")) { AN(av[1]); vsp->repeat = atoi(av[1]); av += 1; rv = 1; } else if (!strcmp(*av, "-keepalive")) { vsp->keepalive = 1; rv = 1; } *avp = av; return (rv); } int sess_process(struct vtclog *vl, struct vtc_sess *vsp, const char *spec, int sock, int *sfd, const char *addr) { int rv; CHECK_OBJ_NOTNULL(vsp, VTC_SESS_MAGIC); rv = http_process(vl, vsp, spec, sock, sfd, addr, vsp->rcvbuf); return (rv); } static void * sess_thread(void *priv) { struct vtclog *vl; struct vtc_sess *vsp; struct thread_arg ta, *tap; int i, fd = -1; CAST_OBJ_NOTNULL(tap, priv, THREAD_ARG_MAGIC); ta = *tap; FREE_OBJ(tap); vsp = ta.vsp; CHECK_OBJ_NOTNULL(vsp, VTC_SESS_MAGIC); vl = vtc_logopen("%s", vsp->name); pthread_cleanup_push(vtc_logclose, vl); assert(vsp->repeat > 0); vtc_log(vl, 2, "Started on %s (%u iterations%s)", ta.listen_addr, vsp->repeat, vsp->keepalive ? " using keepalive" : ""); for (i = 0; i < vsp->repeat; i++) { if (fd < 0) fd = ta.conn_f(ta.priv, vl); fd = sess_process(vl, ta.vsp, ta.spec, fd, ta.asocket, ta.listen_addr); if (! vsp->keepalive) ta.disc_f(ta.priv, vl, &fd); } if (vsp->keepalive) ta.disc_f(ta.priv, vl, &fd); vtc_log(vl, 2, "Ending"); pthread_cleanup_pop(0); vtc_logclose(vl); return (NULL); } pthread_t Sess_Start_Thread( void *priv, struct vtc_sess *vsp, sess_conn_f *conn, sess_disc_f *disc, const char *listen_addr, int *asocket, const char *spec ) { struct thread_arg *ta; pthread_t pt; AN(priv); CHECK_OBJ_NOTNULL(vsp, VTC_SESS_MAGIC); AN(conn); AN(disc); AN(listen_addr); ALLOC_OBJ(ta, THREAD_ARG_MAGIC); AN(ta); ta->priv = priv; ta->vsp = vsp; ta->conn_f = conn; ta->disc_f = disc; ta->listen_addr = listen_addr; ta->asocket = asocket; ta->spec = spec; AZ(pthread_create(&pt, NULL, sess_thread, ta)); return (pt); }
1,877
427
<gh_stars>100-1000 package modern.challenge; import java.util.concurrent.ExecutorService; import java.util.concurrent.Executors; import java.util.concurrent.TimeUnit; public class Main { public static void main(String[] args) throws InterruptedException { Incrementator nonAtomicInc = new Incrementator(); AtomicIncrementator atomicInc = new AtomicIncrementator(); ExecutorService executor = Executors.newFixedThreadPool(5); System.out.println("Counting via non atomic incrementator ..."); for (int i = 0; i < 1_000_000; i++) { executor.execute(nonAtomicInc); } System.out.println("Counting via atomic incrementator ..."); for (int i = 0; i < 1_000_000; i++) { executor.execute(atomicInc); } executor.shutdown(); executor.awaitTermination(Integer.MAX_VALUE, TimeUnit.MILLISECONDS); System.out.println("Non atomic counting result (expected 1_000_000): " + nonAtomicInc.getCount()); System.out.println("Atomic counting result (expected 1_000_000): " + atomicInc.getCount()); } }
427
1,140
# -*- coding: utf-8 -*- """ flaskbb.cli ~~~~~~~~~~~ FlaskBB's Command Line Interface. To make it work, you have to install FlaskBB via ``pip install -e .``. Plugin and Theme templates are generated via cookiecutter. In order to generate those project templates you have to cookiecutter first:: pip install cookiecutter :copyright: (c) 2016 by the FlaskBB Team. :license: BSD, see LICENSE for more details. """ from flaskbb.cli.main import flaskbb # noqa from flaskbb.cli.plugins import plugins # noqa from flaskbb.cli.themes import themes # noqa from flaskbb.cli.translations import translations # noqa from flaskbb.cli.users import users # noqa
229
2,486
<reponame>priya1puresoftware/python-slack-sdk import json import unittest from slack_sdk.scim.v1.internal_utils import _to_snake_cased class TEstInternals(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def test_snake_cased(self): response_body = """{"totalResults":441,"itemsPerPage":1,"startIndex":1,"schemas":["urn:scim:schemas:core:1.0"],"Resources":[{"schemas":["urn:scim:schemas:core:1.0"],"id":"W111","externalId":"","meta":{"created":"2020-08-13T04:15:35-07:00","location":"https://api.slack.com/scim/v1/Users/W111"},"userName":"test-app","nickName":"test-app","name":{"givenName":"","familyName":""},"displayName":"","profileUrl":"https://test-test-test.enterprise.slack.com/team/test-app","title":"","timezone":"America/Los_Angeles","active":true,"emails":[{"value":"<EMAIL>","primary":true}],"photos":[{"value":"https://secure.gravatar.com/avatar/xxx.jpg","type":"photo"}],"groups":[]}]}""" result = _to_snake_cased(json.loads(response_body)) self.assertEqual(result["start_index"], 1) self.assertIsNotNone(result["resources"][0]["id"])
430
1,260
#pragma once // Use this for packaging up a float into a WPARAM in order to losslessly // pass it in a windows message. union PackagedFloatVersion { unsigned u; float f; }; class CVersionChecker { public: CVersionChecker() noexcept; ~CVersionChecker(); void StartVersionCheckerThread(CWnd* pWindow) noexcept; private: static DWORD __stdcall StaticVersionCheckerThread(LPVOID); void VersionCheckerThread(); HANDLE hThread_ = nullptr; CWnd* pWindow_ = nullptr; CVersionChecker(const CVersionChecker&) = delete; CVersionChecker(const CVersionChecker&&) = delete; CVersionChecker& operator=(const CVersionChecker&) = delete; CVersionChecker& operator=(const CVersionChecker&&) = delete; };
265
11,868
<filename>samples/server/petstore/java-vertx-web/src/main/java/org/openapitools/vertxweb/server/api/PetApiImpl.java package org.openapitools.vertxweb.server.api; import io.vertx.ext.web.FileUpload; import org.openapitools.vertxweb.server.model.ModelApiResponse; import org.openapitools.vertxweb.server.model.Pet; import org.openapitools.vertxweb.server.ApiResponse; import io.vertx.core.Future; import io.vertx.core.json.JsonObject; import io.vertx.ext.web.handler.impl.HttpStatusException; import java.util.List; import java.util.Map; // Implement this class public class PetApiImpl implements PetApi { public Future<ApiResponse<Pet>> addPet(Pet pet) { return Future.failedFuture(new HttpStatusException(501)); } public Future<ApiResponse<Void>> deletePet(Long petId, String apiKey) { return Future.failedFuture(new HttpStatusException(501)); } public Future<ApiResponse<List<Pet>>> findPetsByStatus(List<String> status) { return Future.failedFuture(new HttpStatusException(501)); } public Future<ApiResponse<List<Pet>>> findPetsByTags(List<String> tags) { return Future.failedFuture(new HttpStatusException(501)); } public Future<ApiResponse<Pet>> getPetById(Long petId) { return Future.failedFuture(new HttpStatusException(501)); } public Future<ApiResponse<Pet>> updatePet(Pet pet) { return Future.failedFuture(new HttpStatusException(501)); } public Future<ApiResponse<Void>> updatePetWithForm(Long petId, JsonObject formBody) { return Future.failedFuture(new HttpStatusException(501)); } public Future<ApiResponse<ModelApiResponse>> uploadFile(Long petId, FileUpload file) { return Future.failedFuture(new HttpStatusException(501)); } }
642
8,092
<filename>tests/core/test_providers_manager.py # # Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import logging import re from typing import Dict from unittest.mock import patch import pytest from flask_appbuilder.fieldwidgets import BS3TextFieldWidget from flask_babel import lazy_gettext from wtforms import BooleanField, Field, StringField from airflow.exceptions import AirflowOptionalProviderFeatureException from airflow.providers_manager import HookClassProvider, ProviderInfo, ProvidersManager class TestProviderManager: @pytest.fixture(autouse=True) def inject_fixtures(self, caplog): self._caplog = caplog def test_providers_are_loaded(self): with self._caplog.at_level(logging.WARNING): provider_manager = ProvidersManager() provider_list = list(provider_manager.providers.keys()) # No need to sort the list - it should be sorted alphabetically ! for provider in provider_list: package_name = provider_manager.providers[provider].data['package-name'] version = provider_manager.providers[provider].version assert re.search(r'[0-9]*\.[0-9]*\.[0-9]*.*', version) assert package_name == provider # just a coherence check - no exact number as otherwise we would have to update # several tests if we add new connections/provider which is not ideal assert len(provider_list) > 65 assert [] == self._caplog.records def test_hooks_deprecation_warnings_generated(self): with pytest.warns(expected_warning=DeprecationWarning, match='hook-class-names') as warning_records: providers_manager = ProvidersManager() providers_manager._provider_dict['test-package'] = ProviderInfo( version='0.0.1', data={'hook-class-names': ['airflow.providers.sftp.hooks.sftp.SFTPHook']}, package_or_source='package', ) providers_manager._discover_hooks() assert warning_records def test_hooks_deprecation_warnings_not_generated(self): with pytest.warns(expected_warning=None) as warning_records: providers_manager = ProvidersManager() providers_manager._provider_dict['apache-airflow-providers-sftp'] = ProviderInfo( version='0.0.1', data={ 'hook-class-names': ['airflow.providers.sftp.hooks.sftp.SFTPHook'], 'connection-types': [ { 'hook-class-name': 'airflow.providers.sftp.hooks.sftp.SFTPHook', 'connection-type': 'sftp', } ], }, package_or_source='package', ) providers_manager._discover_hooks() assert [] == [w.message for w in warning_records.list if "hook-class-names" in str(w.message)] def test_warning_logs_generated(self): with self._caplog.at_level(logging.WARNING): providers_manager = ProvidersManager() providers_manager._provider_dict['apache-airflow-providers-sftp'] = ProviderInfo( version='0.0.1', data={ 'hook-class-names': ['airflow.providers.sftp.hooks.sftp.SFTPHook'], 'connection-types': [ { 'hook-class-name': 'airflow.providers.sftp.hooks.sftp.SFTPHook', 'connection-type': 'wrong-connection-type', } ], }, package_or_source='package', ) providers_manager._discover_hooks() _ = providers_manager._hooks_lazy_dict['wrong-connection-type'] assert len(self._caplog.records) == 1 assert "Inconsistency!" in self._caplog.records[0].message assert "sftp" not in providers_manager.hooks def test_warning_logs_not_generated(self): with self._caplog.at_level(logging.WARNING): providers_manager = ProvidersManager() providers_manager._provider_dict['apache-airflow-providers-sftp'] = ProviderInfo( version='0.0.1', data={ 'hook-class-names': ['airflow.providers.sftp.hooks.sftp.SFTPHook'], 'connection-types': [ { 'hook-class-name': 'airflow.providers.sftp.hooks.sftp.SFTPHook', 'connection-type': 'sftp', } ], }, package_or_source='package', ) providers_manager._discover_hooks() _ = providers_manager._hooks_lazy_dict['sftp'] assert not self._caplog.records assert "sftp" in providers_manager.hooks def test_hooks(self): with pytest.warns(expected_warning=None) as warning_records: with self._caplog.at_level(logging.WARNING): provider_manager = ProvidersManager() connections_list = list(provider_manager.hooks.keys()) assert len(connections_list) > 60 assert [] == [w.message for w in warning_records.list if "hook-class-names" in str(w.message)] assert len(self._caplog.records) == 0 def test_hook_values(self): with pytest.warns(expected_warning=None) as warning_records: with self._caplog.at_level(logging.WARNING): provider_manager = ProvidersManager() connections_list = list(provider_manager.hooks.values()) assert len(connections_list) > 60 assert [] == [w.message for w in warning_records.list if "hook-class-names" in str(w.message)] assert len(self._caplog.records) == 0 def test_connection_form_widgets(self): provider_manager = ProvidersManager() connections_form_widgets = list(provider_manager.connection_form_widgets.keys()) assert len(connections_form_widgets) > 29 @pytest.mark.parametrize( 'scenario', [ 'prefix', 'no_prefix', 'both_1', 'both_2', ], ) def test_connection_form__add_widgets_prefix_backcompat(self, scenario): """ When the field name is prefixed, it should be used as is. When not prefixed, we should add the prefix When there's a collision, the one that appears first in the list will be used. """ class MyHook: conn_type = 'test' provider_manager = ProvidersManager() widget_field = StringField(lazy_gettext('My Param'), widget=BS3TextFieldWidget()) dummy_field = BooleanField(label=lazy_gettext('Dummy param'), description="dummy") widgets: Dict[str, Field] = {} if scenario == 'prefix': widgets['extra__test__my_param'] = widget_field elif scenario == 'no_prefix': widgets['my_param'] = widget_field elif scenario == 'both_1': widgets['my_param'] = widget_field widgets['extra__test__my_param'] = dummy_field elif scenario == 'both_2': widgets['extra__test__my_param'] = widget_field widgets['my_param'] = dummy_field else: raise Exception('unexpected') provider_manager._add_widgets( package_name='abc', hook_class=MyHook, widgets=widgets, ) assert provider_manager.connection_form_widgets['extra__test__my_param'].field == widget_field def test_field_behaviours(self): provider_manager = ProvidersManager() connections_with_field_behaviours = list(provider_manager.field_behaviours.keys()) assert len(connections_with_field_behaviours) > 16 def test_extra_links(self): provider_manager = ProvidersManager() extra_link_class_names = list(provider_manager.extra_links_class_names) assert len(extra_link_class_names) > 6 def test_logging(self): provider_manager = ProvidersManager() logging_class_names = list(provider_manager.logging_class_names) assert len(logging_class_names) > 5 def test_secrets_backends(self): provider_manager = ProvidersManager() secrets_backends_class_names = list(provider_manager.secrets_backend_class_names) assert len(secrets_backends_class_names) > 4 def test_auth_backends(self): provider_manager = ProvidersManager() auth_backend_module_names = list(provider_manager.auth_backend_module_names) assert len(auth_backend_module_names) > 0 @patch("airflow.providers_manager.import_string") def test_optional_feature_no_warning(self, mock_importlib_import_string): with self._caplog.at_level(logging.WARNING): mock_importlib_import_string.side_effect = AirflowOptionalProviderFeatureException() providers_manager = ProvidersManager() providers_manager._hook_provider_dict["test_connection"] = HookClassProvider( package_name="test_package", hook_class_name="HookClass" ) providers_manager._import_hook( hook_class_name=None, provider_info=None, package_name=None, connection_type="test_connection" ) assert [] == self._caplog.messages @patch("airflow.providers_manager.import_string") def test_optional_feature_debug(self, mock_importlib_import_string): with self._caplog.at_level(logging.INFO): mock_importlib_import_string.side_effect = AirflowOptionalProviderFeatureException() providers_manager = ProvidersManager() providers_manager._hook_provider_dict["test_connection"] = HookClassProvider( package_name="test_package", hook_class_name="HookClass" ) providers_manager._import_hook( hook_class_name=None, provider_info=None, package_name=None, connection_type="test_connection" ) assert [ "Optional provider feature disabled when importing 'HookClass' from " "'test_package' package" ] == self._caplog.messages
4,872
471
import os from couchdbkit import Database from django.apps import apps from corehq.preindex import PreindexPlugin from corehq.util.couchdb_management import CouchConfig from dimagi.utils.couch.sync_docs import DesignInfo class DefaultPreindexPlugin(PreindexPlugin): def __init__(self, app_label): self.app_label = app_label self.app_config = apps.get_app_config(self.app_label) self.dir = os.path.join(self.app_config.path, '_design') def _get_designs(self): # Instantiate here to make sure that it's instantiated after the dbs settings # are patched for tests couch_config = CouchConfig() db = Database( couch_config.get_db_uri_for_app_label(self.app_config.label), create=True) return [ DesignInfo(app_label=self.app_config.label, db=db, design_path=self.dir) ]
386
3,012
/** @file GUID for Shell Map for Get/Set via runtime services. Copyright (c) 2009 - 2018, Intel Corporation. All rights reserved.<BR> SPDX-License-Identifier: BSD-2-Clause-Patent **/ #ifndef _SHELL_MAP_GUID_H_ #define _SHELL_MAP_GUID_H_ #define SHELL_MAP_GUID \ { \ 0x51271e13, 0x7de3, 0x43af, { 0x8b, 0xc2, 0x71, 0xad, 0x3b, 0x82, 0x43, 0x25 } \ } extern EFI_GUID gShellMapGuid; #endif
206
854
__________________________________________________________________________________________________ sample 56 ms submission # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class CBTInserter: def __init__(self, root: TreeNode): self.root = root temp = [] stack = [root] while stack: a = stack.pop(0) #print(a) if a: temp.append(a) stack.append(a.left) stack.append(a.right) #print(temp) self.temp = temp def insert(self, v: int) -> int: n = len(self.temp) #print(n) p = self.temp[(n-1)//2] if n % 2 == 0: p.right = TreeNode(v) self.temp.append(p.right) else: p.left = TreeNode(v) self.temp.append(p.left) return p.val def get_root(self) -> TreeNode: #print(type(self.root)) return self.root # Your CBTInserter object will be instantiated and called as such: # obj = CBTInserter(root) # param_1 = obj.insert(v) # param_2 = obj.get_root() __________________________________________________________________________________________________ sample 13676 kb submission # Definition for a binary tree node. # class TreeNode: # def __init__(self, x): # self.val = x # self.left = None # self.right = None class CBTInserter: def __init__(self, root: TreeNode): self.tree = [root] for n in self.tree: if n.left: self.tree.append(n.left) if n.right: self.tree.append(n.right) self.tree.insert(0, None) def insert(self, v: int) -> int: newNode = TreeNode(v) self.tree.append(newNode) parent = self.tree[(len(self.tree) - 1) // 2] if parent.left == None: parent.left = newNode else: parent.right = newNode return parent.val def get_root(self) -> TreeNode: return self.tree[1] # Your CBTInserter object will be instantiated and called as such: # obj = CBTInserter(root) # param_1 = obj.insert(v) # param_2 = obj.get_root() __________________________________________________________________________________________________
1,130
494
/** * Copyright 2013 Netflix, Inc. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.netflix.astyanax.cql.test; import junit.framework.Assert; import org.junit.AfterClass; import org.junit.BeforeClass; import org.junit.Test; import com.netflix.astyanax.MutationBatch; import com.netflix.astyanax.model.Column; import com.netflix.astyanax.model.ColumnFamily; import com.netflix.astyanax.model.ColumnList; import com.netflix.astyanax.serializers.IntegerSerializer; import com.netflix.astyanax.serializers.StringSerializer; public class RowCopierTests extends KeyspaceTests { private static final ColumnFamily<Integer, String> CF_ROW_COPY = new ColumnFamily<Integer, String>("testrowcopy", IntegerSerializer.get(), StringSerializer.get(), IntegerSerializer.get()); private static final ColumnFamily<Integer, String> CF_ROW_COPY2 = new ColumnFamily<Integer, String>("testrowcopy2", IntegerSerializer.get(), StringSerializer.get(), IntegerSerializer.get()); @BeforeClass public static void init() throws Exception { initContext(); keyspace.createColumnFamily(CF_ROW_COPY, null); keyspace.createColumnFamily(CF_ROW_COPY2, null); CF_ROW_COPY.describe(keyspace); CF_ROW_COPY2.describe(keyspace); } @AfterClass public static void tearDown() throws Exception { keyspace.dropColumnFamily(CF_ROW_COPY); keyspace.dropColumnFamily(CF_ROW_COPY2); } @Test public void runRowCopyTest() throws Exception { MutationBatch m = keyspace.prepareMutationBatch(); m.withRow(CF_ROW_COPY, 10).putColumn("c1", 1).putColumn("c2", 2); m.execute(); ColumnList<String> result = keyspace.prepareQuery(CF_ROW_COPY).getRow(10).execute().getResult(); Column<String> column = result.getColumnByIndex(0); Assert.assertEquals("c1", column.getName()); Assert.assertEquals(1, column.getIntegerValue()); column = result.getColumnByIndex(1); Assert.assertEquals("c2", column.getName()); Assert.assertEquals(2, column.getIntegerValue()); keyspace.prepareQuery(CF_ROW_COPY).getRow(10).copyTo(CF_ROW_COPY2, 11).execute(); ColumnList<String> result2 = keyspace.prepareQuery(CF_ROW_COPY2).getRow(11).execute().getResult(); column = result2.getColumnByIndex(0); Assert.assertEquals("c1", column.getName()); Assert.assertEquals(1, column.getIntegerValue()); column = result2.getColumnByIndex(1); Assert.assertEquals("c2", column.getName()); Assert.assertEquals(2, column.getIntegerValue()); } }
1,028
4,813
# coding: utf-8 from datetime import timedelta as td from unittest import skipIf from unittest.mock import patch, Mock from django.utils.timezone import now from hc.api.models import Channel, Check, Notification from hc.test import BaseTestCase from django.test.utils import override_settings try: import apprise except ImportError: apprise = None @skipIf(apprise is None, "apprise not installed") class NotifyTestCase(BaseTestCase): def _setup_data(self, value, status="down", email_verified=True): self.check = Check(project=self.project) self.check.status = status self.check.last_ping = now() - td(minutes=61) self.check.save() self.channel = Channel(project=self.project) self.channel.kind = "apprise" self.channel.value = value self.channel.email_verified = email_verified self.channel.save() self.channel.checks.add(self.check) @patch("apprise.Apprise") @override_settings(APPRISE_ENABLED=True) def test_apprise_enabled(self, mock_apprise): self._setup_data("123") mock_aobj = Mock() mock_aobj.add.return_value = True mock_aobj.notify.return_value = True mock_apprise.return_value = mock_aobj self.channel.notify(self.check) self.assertEqual(Notification.objects.count(), 1) self.check.status = "up" self.assertEqual(Notification.objects.count(), 1) @patch("apprise.Apprise") @override_settings(APPRISE_ENABLED=False) def test_apprise_disabled(self, mock_apprise): self._setup_data("123") mock_aobj = Mock() mock_aobj.add.return_value = True mock_aobj.notify.return_value = True mock_apprise.return_value = mock_aobj self.channel.notify(self.check) self.assertEqual(Notification.objects.count(), 1)
757
4,879
<gh_stars>1000+ #pragma once #include "shaders/program_params.hpp" #include "drape/vulkan/vulkan_base_context.hpp" #include "drape/vulkan/vulkan_gpu_program.hpp" #include "drape/vulkan/vulkan_object_manager.hpp" #include "drape/vulkan/vulkan_utils.hpp" #include "base/thread_checker.hpp" #include <vulkan_wrapper.h> #include <vulkan/vulkan.h> #include <cstdint> #include <vector> namespace gpu { namespace vulkan { class VulkanProgramParamsSetter : public ProgramParamsSetter { public: struct UniformBuffer { dp::vulkan::VulkanObject m_object; uint8_t * m_pointer = nullptr; uint32_t m_freeOffset = 0; }; explicit VulkanProgramParamsSetter(ref_ptr<dp::vulkan::VulkanBaseContext> context); ~VulkanProgramParamsSetter() override; void Destroy(ref_ptr<dp::vulkan::VulkanBaseContext> context); void Flush(); void Finish(uint32_t inflightFrameIndex); void Apply(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, MapProgramParams const & params) override; void Apply(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, RouteProgramParams const & params) override; void Apply(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, TrafficProgramParams const & params) override; void Apply(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, TransitProgramParams const & params) override; void Apply(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, GuiProgramParams const & params) override; void Apply(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, ShapesProgramParams const & params) override; void Apply(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, Arrow3dProgramParams const & params) override; void Apply(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, DebugRectProgramParams const & params) override; void Apply(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, ScreenQuadProgramParams const & params) override; void Apply(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, SMAAProgramParams const & params) override; private: template<typename T> void ApplyImpl(ref_ptr<dp::GraphicsContext> context, ref_ptr<dp::GpuProgram> program, T const & params) { ASSERT_EQUAL(T::GetName(), ProgramParams::GetBoundParamsName(program), ("Mismatched program and parameters", program->GetName())); ApplyBytes(context, reinterpret_cast<void const *>(&params), sizeof(params)); } void ApplyBytes(ref_ptr<dp::vulkan::VulkanBaseContext> context, void const * data, uint32_t sizeInBytes); ref_ptr<dp::vulkan::VulkanObjectManager> m_objectManager; std::array<std::vector<UniformBuffer>, dp::vulkan::kMaxInflightFrames> m_uniformBuffers; uint32_t m_offsetAlignment = 0; uint32_t m_sizeAlignment = 0; uint32_t m_flushHandlerId = 0; uint32_t m_finishHandlerId = 0; uint32_t m_updateInflightFrameId = 0; ThreadChecker m_threadChecker; uint32_t m_currentInflightFrameIndex = 0; }; } // namespace vulkan } // namespace gpu
1,231
990
class DemoPlainClass: a: int # <1> b: float = 1.1 # <2> c = 'spam' # <3>
65
408
import gym import readchar import numpy as np # # MACROS Push_Left = 0 No_Push = 1 Push_Right = 2 # Key mapping arrow_keys = { '\x1b[D': Push_Left, '\x1b[B': No_Push, '\x1b[C': Push_Right} env = gym.make('MountainCar-v0') trajectories = [] episode_step = 0 for episode in range(20): # n_trajectories : 20 trajectory = [] step = 0 env.reset() print("episode_step", episode_step) while True: env.render() print("step", step) key = readchar.readkey() if key not in arrow_keys.keys(): break action = arrow_keys[key] state, reward, done, _ = env.step(action) if state[0] >= env.env.goal_position and step > 129: # trajectory_length : 130 break trajectory.append((state[0], state[1], action)) step += 1 # trajectory_numpy = np.array(trajectory, float) # print("trajectory_numpy.shape", trajectory_numpy.shape) # episode_step += 1 # trajectories.append(trajectory) # np_trajectories = np.array(trajectories, float) # print("np_trajectories.shape", np_trajectories.shape) # np.save("expert_trajectories", arr=np_trajectories)
502
4,959
<gh_stars>1000+ from .meta import TouchUpMeta from .service import TouchUp __all__ = ( "TouchUp", "TouchUpMeta", )
49
1,086
<reponame>3bdullahnf/ring /* SDL_mixer: An audio mixer library based on the SDL library Copyright (C) 1997-2016 <NAME> <<EMAIL>> This software is provided 'as-is', without any express or implied warranty. In no event will the authors be held liable for any damages arising from the use of this software. Permission is granted to anyone to use this software for any purpose, including commercial applications, and to alter it and redistribute it freely, subject to the following restrictions: 1. The origin of this software must not be misrepresented; you must not claim that you wrote the original software. If you use this software in a product, an acknowledgment in the product documentation would be appreciated but is not required. 2. Altered source versions must be plainly marked as such, and must not be misrepresented as being the original software. 3. This notice may not be removed or altered from any source distribution. */ /* $Id$ */ /* This file supports streaming WAV files, without volume adjustment */ #include <stdlib.h> #include <string.h> #include "SDL_audio.h" #include "SDL_mutex.h" #include "SDL_rwops.h" #include "SDL_endian.h" #include "SDL_mixer.h" #include "wavestream.h" /* Taken with permission from SDL_wave.h, part of the SDL library, available at: http://www.libsdl.org/ and placed under the same license as this mixer library. */ /* WAVE files are little-endian */ /*******************************************/ /* Define values for Microsoft WAVE format */ /*******************************************/ #define RIFF 0x46464952 /* "RIFF" */ #define WAVE 0x45564157 /* "WAVE" */ #define FMT 0x20746D66 /* "fmt " */ #define DATA 0x61746164 /* "data" */ #define SMPL 0x6c706d73 /* "smpl" */ #define PCM_CODE 1 #define ADPCM_CODE 2 #define WAVE_MONO 1 #define WAVE_STEREO 2 typedef struct { /* Not saved in the chunk we read: Uint32 chunkID; Uint32 chunkLen; */ Uint16 encoding; Uint16 channels; /* 1 = mono, 2 = stereo */ Uint32 frequency; /* One of 11025, 22050, or 44100 Hz */ Uint32 byterate; /* Average bytes per second */ Uint16 blockalign; /* Bytes per sample block */ Uint16 bitspersample; /* One of 8, 12, 16, or 4 for ADPCM */ } WaveFMT; typedef struct { Uint32 identifier; Uint32 type; Uint32 start; Uint32 end; Uint32 fraction; Uint32 play_count; } SampleLoop; typedef struct { /* Not saved in the chunk we read: Uint32 chunkID; Uint32 chunkLen; */ Uint32 manufacturer; Uint32 product; Uint32 sample_period; Uint32 MIDI_unity_note; Uint32 MIDI_pitch_fraction; Uint32 SMTPE_format; Uint32 SMTPE_offset; Uint32 sample_loops; Uint32 sampler_data; SampleLoop loops[]; } SamplerChunk; /*********************************************/ /* Define values for AIFF (IFF audio) format */ /*********************************************/ #define FORM 0x4d524f46 /* "FORM" */ #define AIFF 0x46464941 /* "AIFF" */ #define SSND 0x444e5353 /* "SSND" */ #define COMM 0x4d4d4f43 /* "COMM" */ /* Currently we only support a single stream at a time */ static WAVStream *music = NULL; /* This is the format of the audio mixer data */ static SDL_AudioSpec mixer; static int wavestream_volume = MIX_MAX_VOLUME; /* Function to load the WAV/AIFF stream */ static SDL_bool LoadWAVStream(WAVStream *wave); static SDL_bool LoadAIFFStream(WAVStream *wave); /* Initialize the WAVStream player, with the given mixer settings This function returns 0, or -1 if there was an error. */ int WAVStream_Init(SDL_AudioSpec *mixerfmt) { mixer = *mixerfmt; return(0); } void WAVStream_SetVolume(int volume) { wavestream_volume = volume; } /* Load a WAV stream from the given RWops object */ WAVStream *WAVStream_LoadSong_RW(SDL_RWops *src, int freesrc) { WAVStream *wave; SDL_bool loaded = SDL_FALSE; if (!mixer.format) { Mix_SetError("WAV music output not started"); return(NULL); } wave = (WAVStream *)SDL_malloc(sizeof *wave); if (wave) { Uint32 magic; SDL_zerop(wave); wave->src = src; wave->freesrc = freesrc; magic = SDL_ReadLE32(src); if (magic == RIFF || magic == WAVE) { loaded = LoadWAVStream(wave); } else if (magic == FORM) { loaded = LoadAIFFStream(wave); } else { Mix_SetError("Unknown WAVE format"); } if (!loaded) { WAVStream_FreeSong(wave); return(NULL); } SDL_BuildAudioCVT(&wave->cvt, wave->spec.format, wave->spec.channels, wave->spec.freq, mixer.format, mixer.channels, mixer.freq); } else { SDL_OutOfMemory(); return(NULL); } return(wave); } /* Start playback of a given WAV stream */ void WAVStream_Start(WAVStream *wave) { int i; for (i = 0; i < wave->numloops; ++i) { WAVLoopPoint *loop = &wave->loops[i]; loop->active = SDL_TRUE; loop->current_play_count = loop->initial_play_count; } SDL_RWseek(wave->src, wave->start, RW_SEEK_SET); music = wave; } /* Play some of a stream previously started with WAVStream_Start() */ static int PlaySome(Uint8 *stream, int len) { Sint64 pos, stop; WAVLoopPoint *loop; Sint64 loop_start; Sint64 loop_stop; int i; int consumed; pos = SDL_RWtell(music->src); stop = music->stop; loop = NULL; for (i = 0; i < music->numloops; ++i) { loop = &music->loops[i]; if (loop->active) { const int bytes_per_sample = (SDL_AUDIO_BITSIZE(music->spec.format) / 8) * music->spec.channels; loop_start = music->start + loop->start * bytes_per_sample; loop_stop = music->start + (loop->stop + 1) * bytes_per_sample; if (pos >= loop_start && pos < loop_stop) { stop = loop_stop; break; } } loop = NULL; } if (music->cvt.needed) { int original_len; original_len = (int)((double)len/music->cvt.len_ratio); if (music->cvt.len != original_len) { int worksize; if (music->cvt.buf != NULL) { SDL_free(music->cvt.buf); } worksize = original_len*music->cvt.len_mult; music->cvt.buf=(Uint8 *)SDL_malloc(worksize); if (music->cvt.buf == NULL) { return 0; } music->cvt.len = original_len; } if ((stop - pos) < original_len) { original_len = (int)(stop - pos); } original_len = SDL_RWread(music->src, music->cvt.buf, 1, original_len); /* At least at the time of writing, SDL_ConvertAudio() does byte-order swapping starting at the end of the buffer. Thus, if we are reading 16-bit samples, we had better make damn sure that we get an even number of bytes, or we'll get garbage. */ if ((music->cvt.src_format & 0x0010) && (original_len & 1)) { original_len--; } music->cvt.len = original_len; SDL_ConvertAudio(&music->cvt); SDL_MixAudio(stream, music->cvt.buf, music->cvt.len_cvt, wavestream_volume); consumed = music->cvt.len_cvt; } else { Uint8 *data; if ((stop - pos) < len) { len = (int)(stop - pos); } data = SDL_stack_alloc(Uint8, len); if (data) { len = SDL_RWread(music->src, data, 1, len); SDL_MixAudio(stream, data, len, wavestream_volume); SDL_stack_free(data); } consumed = len; } if (loop && SDL_RWtell(music->src) >= stop) { if (loop->current_play_count == 1) { loop->active = SDL_FALSE; } else { if (loop->current_play_count > 0) { --loop->current_play_count; } SDL_RWseek(music->src, loop_start, RW_SEEK_SET); } } return consumed; } int WAVStream_PlaySome(Uint8 *stream, int len) { if (!music) return 0; while ((SDL_RWtell(music->src) < music->stop) && (len > 0)) { int consumed = PlaySome(stream, len); if (!consumed) break; stream += consumed; len -= consumed; } return len; } /* Stop playback of a stream previously started with WAVStream_Start() */ void WAVStream_Stop(void) { music = NULL; } /* Close the given WAV stream */ void WAVStream_FreeSong(WAVStream *wave) { if (wave) { /* Clean up associated data */ if (wave->loops) { SDL_free(wave->loops); } if (wave->cvt.buf) { SDL_free(wave->cvt.buf); } if (wave->freesrc) { SDL_RWclose(wave->src); } SDL_free(wave); } } /* Return non-zero if a stream is currently playing */ int WAVStream_Active(void) { int active; active = 0; if (music && (SDL_RWtell(music->src) < music->stop)) { active = 1; } return(active); } static SDL_bool ParseFMT(WAVStream *wave, Uint32 chunk_length) { SDL_RWops *src = wave->src; SDL_AudioSpec *spec = &wave->spec; WaveFMT *format; Uint8 *data; SDL_bool loaded = SDL_FALSE; if (chunk_length < sizeof(*format)) { Mix_SetError("Wave format chunk too small"); return SDL_FALSE; } data = (Uint8 *)SDL_malloc(chunk_length); if (!data) { Mix_SetError("Out of memory"); return SDL_FALSE; } if (!SDL_RWread(wave->src, data, chunk_length, 1)) { Mix_SetError("Couldn't read %d bytes from WAV file", chunk_length); return SDL_FALSE; } format = (WaveFMT *)data; /* Decode the audio data format */ switch (SDL_SwapLE16(format->encoding)) { case PCM_CODE: /* We can understand this */ break; default: Mix_SetError("Unknown WAVE data format"); goto done; } spec->freq = SDL_SwapLE32(format->frequency); switch (SDL_SwapLE16(format->bitspersample)) { case 8: spec->format = AUDIO_U8; break; case 16: spec->format = AUDIO_S16; break; default: Mix_SetError("Unknown PCM data format"); goto done; } spec->channels = (Uint8) SDL_SwapLE16(format->channels); spec->samples = 4096; /* Good default buffer size */ loaded = SDL_TRUE; done: SDL_free(data); return loaded; } static SDL_bool ParseDATA(WAVStream *wave, Uint32 chunk_length) { wave->start = SDL_RWtell(wave->src); wave->stop = wave->start + chunk_length; SDL_RWseek(wave->src, chunk_length, RW_SEEK_CUR); return SDL_TRUE; } static SDL_bool AddLoopPoint(WAVStream *wave, Uint32 play_count, Uint32 start, Uint32 stop) { WAVLoopPoint *loop; WAVLoopPoint *loops = SDL_realloc(wave->loops, (wave->numloops + 1)*sizeof(*wave->loops)); if (!loops) { Mix_SetError("Out of memory"); return SDL_FALSE; } loop = &loops[ wave->numloops ]; loop->start = start; loop->stop = stop; loop->initial_play_count = play_count; loop->current_play_count = play_count; wave->loops = loops; ++wave->numloops; return SDL_TRUE; } static SDL_bool ParseSMPL(WAVStream *wave, Uint32 chunk_length) { SamplerChunk *chunk; Uint8 *data; int i; SDL_bool loaded = SDL_FALSE; data = (Uint8 *)SDL_malloc(chunk_length); if (!data) { Mix_SetError("Out of memory"); return SDL_FALSE; } if (!SDL_RWread(wave->src, data, chunk_length, 1)) { Mix_SetError("Couldn't read %d bytes from WAV file", chunk_length); return SDL_FALSE; } chunk = (SamplerChunk *)data; for (i = 0; i < SDL_SwapLE32(chunk->sample_loops); ++i) { const Uint32 LOOP_TYPE_FORWARD = 0; Uint32 loop_type = SDL_SwapLE32(chunk->loops[i].type); if (loop_type == LOOP_TYPE_FORWARD) { AddLoopPoint(wave, SDL_SwapLE32(chunk->loops[i].play_count), SDL_SwapLE32(chunk->loops[i].start), SDL_SwapLE32(chunk->loops[i].end)); } } loaded = SDL_TRUE; done: SDL_free(data); return loaded; } static SDL_bool LoadWAVStream(WAVStream *wave) { SDL_RWops *src = wave->src; Uint32 chunk_type; Uint32 chunk_length; SDL_bool found_FMT = SDL_FALSE; SDL_bool found_DATA = SDL_FALSE; /* WAV magic header */ Uint32 wavelen; Uint32 WAVEmagic; /* Check the magic header */ wavelen = SDL_ReadLE32(src); WAVEmagic = SDL_ReadLE32(src); /* Read the chunks */ for (; ;) { chunk_type = SDL_ReadLE32(src); chunk_length = SDL_ReadLE32(src); if (chunk_length == 0) break; switch (chunk_type) { case FMT: found_FMT = SDL_TRUE; if (!ParseFMT(wave, chunk_length)) return SDL_FALSE; break; case DATA: found_DATA = SDL_TRUE; if (!ParseDATA(wave, chunk_length)) return SDL_FALSE; break; case SMPL: if (!ParseSMPL(wave, chunk_length)) return SDL_FALSE; break; default: SDL_RWseek(src, chunk_length, RW_SEEK_CUR); break; } } if (!found_FMT) { Mix_SetError("Bad WAV file (no FMT chunk)"); return SDL_FALSE; } if (!found_DATA) { Mix_SetError("Bad WAV file (no DATA chunk)"); return SDL_FALSE; } return SDL_TRUE; } /* I couldn't get SANE_to_double() to work, so I stole this from libsndfile. * I don't pretend to fully understand it. */ static Uint32 SANE_to_Uint32 (Uint8 *sanebuf) { /* Negative number? */ if (sanebuf[0] & 0x80) return 0; /* Less than 1? */ if (sanebuf[0] <= 0x3F) return 1; /* Way too big? */ if (sanebuf[0] > 0x40) return 0x4000000; /* Still too big? */ if (sanebuf[0] == 0x40 && sanebuf[1] > 0x1C) return 800000000; return ((sanebuf[2] << 23) | (sanebuf[3] << 15) | (sanebuf[4] << 7) | (sanebuf[5] >> 1)) >> (29 - sanebuf[1]); } static SDL_bool LoadAIFFStream(WAVStream *wave) { SDL_RWops *src = wave->src; SDL_AudioSpec *spec = &wave->spec; SDL_bool found_SSND = SDL_FALSE; SDL_bool found_COMM = SDL_FALSE; Uint32 chunk_type; Uint32 chunk_length; Sint64 next_chunk; /* AIFF magic header */ Uint32 AIFFmagic; /* SSND chunk */ Uint32 offset; Uint32 blocksize; /* COMM format chunk */ Uint16 channels = 0; Uint32 numsamples = 0; Uint16 samplesize = 0; Uint8 sane_freq[10]; Uint32 frequency = 0; /* Check the magic header */ chunk_length = SDL_ReadBE32(src); AIFFmagic = SDL_ReadLE32(src); if (AIFFmagic != AIFF) { Mix_SetError("Unrecognized file type (not AIFF)"); return SDL_FALSE; } /* From what I understand of the specification, chunks may appear in * any order, and we should just ignore unknown ones. * * TODO: Better sanity-checking. E.g. what happens if the AIFF file * contains compressed sound data? */ do { chunk_type = SDL_ReadLE32(src); chunk_length = SDL_ReadBE32(src); next_chunk = SDL_RWtell(src) + chunk_length; /* Paranoia to avoid infinite loops */ if (chunk_length == 0) break; switch (chunk_type) { case SSND: found_SSND = SDL_TRUE; offset = SDL_ReadBE32(src); blocksize = SDL_ReadBE32(src); wave->start = SDL_RWtell(src) + offset; break; case COMM: found_COMM = SDL_TRUE; /* Read the audio data format chunk */ channels = SDL_ReadBE16(src); numsamples = SDL_ReadBE32(src); samplesize = SDL_ReadBE16(src); SDL_RWread(src, sane_freq, sizeof(sane_freq), 1); frequency = SANE_to_Uint32(sane_freq); break; default: break; } } while ((!found_SSND || !found_COMM) && SDL_RWseek(src, next_chunk, RW_SEEK_SET) != -1); if (!found_SSND) { Mix_SetError("Bad AIFF file (no SSND chunk)"); return SDL_FALSE; } if (!found_COMM) { Mix_SetError("Bad AIFF file (no COMM chunk)"); return SDL_FALSE; } wave->stop = wave->start + channels * numsamples * (samplesize / 8); /* Decode the audio data format */ SDL_memset(spec, 0, (sizeof *spec)); spec->freq = frequency; switch (samplesize) { case 8: spec->format = AUDIO_S8; break; case 16: spec->format = AUDIO_S16MSB; break; default: Mix_SetError("Unknown samplesize in data format"); return SDL_FALSE; } spec->channels = (Uint8) channels; spec->samples = 4096; /* Good default buffer size */ return SDL_TRUE; }
7,976
884
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. See License.txt in the project root for license information. from .utils import AutoNumber class CdmRelationshipDiscoveryStyle(AutoNumber): """Describes the types of relationships you want populated in a Manifest. 'NONE' will not add any relationships to a Folio 'EXCLUSIVE' will only include relationships where the toEntity and fromEntity are entities found in this folio 'ALL' will include all relationships including any relationships where the toEntity or the fromEntity point to entities not found in the folio""" NONE = () EXCLUSIVE = () ALL = ()
176
2,077
/** Copyright (c) 2015-present, Facebook, Inc. All rights reserved. This source code is licensed under the BSD-style license found in the LICENSE file in the root directory of this source tree. */ #include <pbxbuild/WorkspaceContext.h> #include <pbxsetting/Environment.h> #include <libutil/Filesystem.h> #include <libutil/FSUtil.h> using pbxbuild::WorkspaceContext; using pbxbuild::DerivedDataHash; using libutil::Filesystem; using libutil::FSUtil; WorkspaceContext:: WorkspaceContext( std::string const &basePath, DerivedDataHash const &derivedDataHash, xcworkspace::XC::Workspace::shared_ptr const &workspace, pbxproj::PBX::Project::shared_ptr const &project, std::vector<xcscheme::SchemeGroup::shared_ptr> const &schemeGroups, std::unordered_map<std::string, pbxproj::PBX::Project::shared_ptr> const &projects, std::unordered_map<pbxproj::XC::BuildConfiguration::shared_ptr, pbxsetting::XC::Config> const &configs) : _basePath (basePath), _derivedDataHash(derivedDataHash), _workspace (workspace), _project (project), _schemeGroups (schemeGroups), _projects (projects), _configs (configs) { } WorkspaceContext:: ~WorkspaceContext() { } pbxproj::PBX::Project::shared_ptr WorkspaceContext:: project(std::string const &projectPath) const { /* Normalize the path in case it has relative components. */ std::string resolvedProjectPath = FSUtil::NormalizePath(projectPath); auto PI = _projects.find(resolvedProjectPath); if (PI != _projects.end()) { return PI->second; } else { return nullptr; } } xcscheme::XC::Scheme::shared_ptr WorkspaceContext:: scheme(std::string const &name) const { for (xcscheme::SchemeGroup::shared_ptr const &schemeGroup : _schemeGroups) { if (xcscheme::XC::Scheme::shared_ptr const &scheme = schemeGroup->scheme(name)) { return scheme; } } return nullptr; } std::vector<std::string> WorkspaceContext:: loadedFilePaths() const { std::vector<std::string> loadedFilePaths; /* Estimate all files; assume two schemes per scheme group. */ loadedFilePaths.reserve(_projects.size() + _schemeGroups.size() * 2 + 1); /* * Add workspace data path. */ if (_workspace != nullptr) { loadedFilePaths.push_back(_workspace->dataFile()); } /* * Add all projects' data paths. */ for (auto const &entry : _projects) { loadedFilePaths.push_back(entry.second->dataFile()); } /* * Add all schemes' data paths. */ for (xcscheme::SchemeGroup::shared_ptr const &schemeGroup : _schemeGroups) { for (xcscheme::XC::Scheme::shared_ptr const &scheme : schemeGroup->schemes()) { loadedFilePaths.push_back(scheme->path()); } } /* * Add all config file paths. */ for (auto const &entry : _configs) { loadedFilePaths.push_back(entry.second.path()); } return loadedFilePaths; } static void IterateWorkspaceItem(xcworkspace::XC::GroupItem::shared_ptr const &item, std::function<void(xcworkspace::XC::FileRef::shared_ptr const &)> const &cb) { switch (item->type()) { case xcworkspace::XC::GroupItem::Type::Group: { xcworkspace::XC::Group::shared_ptr group = std::static_pointer_cast<xcworkspace::XC::Group>(item); for (xcworkspace::XC::GroupItem::shared_ptr const &child : group->items()) { IterateWorkspaceItem(child, cb); } break; } case xcworkspace::XC::GroupItem::Type::FileRef: { xcworkspace::XC::FileRef::shared_ptr fileRef = std::static_pointer_cast<xcworkspace::XC::FileRef>(item); cb(fileRef); break; } } } static void IterateWorkspaceFiles(xcworkspace::XC::Workspace::shared_ptr const &workspace, std::function<void(xcworkspace::XC::FileRef::shared_ptr const &)> const &cb) { for (xcworkspace::XC::GroupItem::shared_ptr const &item : workspace->items()) { IterateWorkspaceItem(item, cb); } } static void LoadWorkspaceProjects(Filesystem const *filesystem, std::vector<pbxproj::PBX::Project::shared_ptr> *projects, xcworkspace::XC::Workspace::shared_ptr const &workspace) { /* * Load all the projects in the workspace. */ IterateWorkspaceFiles(workspace, [&](xcworkspace::XC::FileRef::shared_ptr const &ref) { std::string path = ref->resolve(workspace); pbxproj::PBX::Project::shared_ptr project = pbxproj::PBX::Project::Open(filesystem, path); if (project != nullptr) { projects->push_back(project); } }); } static void LoadConfigurationFiles( Filesystem const *filesystem, std::unordered_map<pbxproj::XC::BuildConfiguration::shared_ptr, pbxsetting::XC::Config> *configs, pbxsetting::Environment const &environment, pbxproj::XC::ConfigurationList::shared_ptr const &configurationList) { if (configurationList == nullptr) { return; } /* * Load all configuration files in the list. */ for (pbxproj::XC::BuildConfiguration::shared_ptr const &buildConfiguration : configurationList->buildConfigurations()) { if (pbxproj::PBX::FileReference::shared_ptr const &configurationReference = buildConfiguration->baseConfigurationReference()) { std::string configurationPath = environment.expand(configurationReference->resolve()); /* Load the configuration file. */ if (ext::optional<pbxsetting::XC::Config> configuration = pbxsetting::XC::Config::Load(filesystem, environment, configurationPath)) { configs->insert({ buildConfiguration, *configuration }); } } } } static void LoadNestedProjects( Filesystem const *filesystem, std::vector<pbxproj::PBX::Project::shared_ptr> *projects, std::unordered_map<pbxproj::XC::BuildConfiguration::shared_ptr, pbxsetting::XC::Config> *configs, pbxsetting::Environment const &baseEnvironment, std::vector<pbxproj::PBX::Project::shared_ptr> const &rootProjects) { std::vector<pbxproj::PBX::Project::shared_ptr> nestedProjects; /* * Load all nested projects recursively. */ for (pbxproj::PBX::Project::shared_ptr const &project : rootProjects) { /* * Determine the settings environment to find the project paths. This may not be complete, * but it's unclear exactly what settings are available here. Notably, we don't yet know what * the configuration or what target to use, so just the project settings seems reasonable. */ pbxsetting::Environment environment = pbxsetting::Environment(baseEnvironment); environment.insertFront(project->settings(), false); /* * Load project and target configurations. */ LoadConfigurationFiles(filesystem, configs, environment, project->buildConfigurationList()); for (pbxproj::PBX::Target::shared_ptr const &target : project->targets()) { LoadConfigurationFiles(filesystem, configs, environment, target->buildConfigurationList()); } /* * Find the nested projects. */ for (pbxproj::PBX::Project::ProjectReference const &projectReference : project->projectReferences()) { pbxproj::PBX::FileReference::shared_ptr const &projectFileReference = projectReference.projectReference(); std::string projectPath = environment.expand(projectFileReference->resolve()); /* * Load the project. */ pbxproj::PBX::Project::shared_ptr project = pbxproj::PBX::Project::Open(filesystem, projectPath); if (project != nullptr) { nestedProjects.push_back(project); } } } /* * Append the nested projects. This has to be after the loop as `rootProjects` might alias `projects`. */ projects->insert(projects->end(), nestedProjects.begin(), nestedProjects.end()); if (!nestedProjects.empty()) { /* * Load nested projects of the nested projects. */ LoadNestedProjects(filesystem, projects, configs, baseEnvironment, nestedProjects); } } static void LoadProjectSchemes(Filesystem const *filesystem, std::string const &userName, std::vector<xcscheme::SchemeGroup::shared_ptr> *schemeGroups, std::vector<pbxproj::PBX::Project::shared_ptr> const &projects) { /* * Load the schemes inside the projects. */ for (pbxproj::PBX::Project::shared_ptr const &project : projects) { xcscheme::SchemeGroup::shared_ptr projectGroup = xcscheme::SchemeGroup::Open(filesystem, userName, project->basePath(), project->projectFile(), project->name()); if (projectGroup != nullptr) { schemeGroups->push_back(projectGroup); } } } static std::unordered_map<std::string, pbxproj::PBX::Project::shared_ptr> CreateProjectMap(std::vector<pbxproj::PBX::Project::shared_ptr> const &projects) { std::unordered_map<std::string, pbxproj::PBX::Project::shared_ptr> projectsMap; for (pbxproj::PBX::Project::shared_ptr const &project : projects) { /* Normalize path so it can be found on lookup. */ std::string normalizedPath = FSUtil::NormalizePath(project->projectFile()); projectsMap.insert({ normalizedPath, project }); } return projectsMap; } WorkspaceContext WorkspaceContext:: Workspace(Filesystem const *filesystem, std::string const &userName, pbxsetting::Environment const &baseEnvironment, xcworkspace::XC::Workspace::shared_ptr const &workspace) { std::vector<pbxproj::PBX::Project::shared_ptr> projects; std::vector<xcscheme::SchemeGroup::shared_ptr> schemeGroups; std::unordered_map<pbxproj::XC::BuildConfiguration::shared_ptr, pbxsetting::XC::Config> configs; /* * Add the schemes from the workspace itself. */ xcscheme::SchemeGroup::shared_ptr workspaceGroup = xcscheme::SchemeGroup::Open(filesystem, userName, workspace->basePath(), workspace->projectFile(), workspace->name()); if (workspaceGroup != nullptr) { schemeGroups.push_back(workspaceGroup); } /* * Load projects within the workspace. */ LoadWorkspaceProjects(filesystem, &projects, workspace); /* * Recursively load nested projects within those projects. */ LoadNestedProjects(filesystem, &projects, &configs, baseEnvironment, projects); /* * Load schemes for all projects, including nested projects. */ LoadProjectSchemes(filesystem, userName, &schemeGroups, projects); /* * Determine the DerivedData path for the workspace. */ DerivedDataHash derivedDataHash = DerivedDataHash::Create(workspace->projectFile()); return WorkspaceContext(workspace->basePath(), derivedDataHash, workspace, nullptr, schemeGroups, CreateProjectMap(projects), configs); } WorkspaceContext WorkspaceContext:: Project(Filesystem const *filesystem, std::string const &userName, pbxsetting::Environment const &baseEnvironment, pbxproj::PBX::Project::shared_ptr const &project) { std::vector<pbxproj::PBX::Project::shared_ptr> projects; std::vector<xcscheme::SchemeGroup::shared_ptr> schemeGroups; std::unordered_map<pbxproj::XC::BuildConfiguration::shared_ptr, pbxsetting::XC::Config> configs; /* * The root is a project, so it should be in the projects list. */ projects.push_back(project); /* * Recursively load nested projects within the project. */ LoadNestedProjects(filesystem, &projects, &configs, baseEnvironment, projects); /* * Load schemes for all projects, including the root and nested projects. */ LoadProjectSchemes(filesystem, userName, &schemeGroups, projects); /* * Determine the DerivedData path for the root project. */ DerivedDataHash derivedDataHash = DerivedDataHash::Create(project->projectFile()); return WorkspaceContext(project->basePath(), derivedDataHash, nullptr, project, schemeGroups, CreateProjectMap(projects), configs); }
4,512
1,668
package org.elixir_lang.parser_definition; import org.elixir_lang.sdk.elixir.Release; /** * atom is invalid to the right of `.`, so unlike in {@link MatchedDotOperationParsingTestcase}, this tests only when * atom is left of `.` and the right operand varies based on the test name. */ public class MatchedQualifiedMultipleAliasesParsingTestCase extends ParsingTestCase { /* * matchedExpression */ public void testMatchedCaptureNonNumericOperation() { assertParsedAndQuotedCorrectlyInOneThree(false); } public void testMatchedInMatchOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedWhenNoParenthesesKeywordsOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedWhenOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedTypeOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedPipeOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedMatchOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedOrOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedAndOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedComparisonOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedRelationalOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedArrowOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedInOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedThreeOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedTwoOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedAdditionOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedMultiplicationOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedUnaryNonNumericOperation() { assertParsedAndQuotedCorrectlyInOneThree(false); } public void testMatchedDotCallOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedQualifiedAliasOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedQualifiedCallOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testMatchedAtNonNumericOperation() { assertParsedAndQuotedCorrectlyInOneThree(false); } public void testMatchedUnqualifiedCallOperation() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testVariable() { assertParsedAndQuotedCorrectlyInOneThree(); } /* * accessExpression */ public void testAtNumericOperation() { assertParsedAndQuotedCorrectlyInOneThree(false); } public void testCaptureNumericOperation() { assertParsedAndQuotedCorrectlyInOneThree(false); } public void testUnaryNumericOperation() { assertParsedAndQuotedCorrectlyInOneThree(false); } public void testList() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testSigil() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testAtomKeyword() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testAlias() { assertParsedAndQuotedCorrectlyInOneThree(); } /* * numeric */ public void testCharToken() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testBinaryWholeNumber() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testHexadecimalWholeNumber() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testOctalWholeNumber() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testUnknownBaseWholeNumber() { assertParsedAndQuotedAroundError(); } public void testDecimalFloat() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testDecimalWholeNumber() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testStringLine() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testStringHeredoc() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testCharListLine() { assertParsedAndQuotedCorrectlyInOneThree(); } public void testCharListHeredoc() { assertParsedAndQuotedCorrectlyInOneThree(); } /* * Protected Instance Methods */ @Override protected String getTestDataPath() { return super.getTestDataPath() + "/matched_qualified_multiple_aliases_parsing_test_case"; } private void assertParsedAndQuotedCorrectlyInOneThree() { assertParsedAndQuotedCorrectlyInOneThree(true); } private void assertParsedAndQuotedCorrectlyInOneThree(boolean checkResult) { if (elixirSdkRelease().level().supportsMultipleAliases) { assertParsedAndQuotedCorrectly(checkResult); } else { assertParsedAndQuotedAroundError(checkResult); } } /* * Private Instance Methods */ private Release elixirSdkRelease() { String elixirVersion = elixirVersion(); Release elixirSdkRelease = Release.fromString((elixirVersion)); assertNotNull( "ELIXIR_VERSION (" + elixirVersion + ") could not be parsed into an ElixirSdkRelease", elixirSdkRelease ); return elixirSdkRelease; } private String elixirVersion() { String elixirVersion = System.getenv("ELIXIR_VERSION"); assertNotNull("ELIXIR_VERSION is not set", elixirVersion); return elixirVersion; } }
2,352
773
<gh_stars>100-1000 from __future__ import annotations from collections import OrderedDict from abc import ABCMeta, abstractmethod import base64 from math import ceil import BitVector #type: ignore from .bitvector import BV from .opaque import OpaqueValue from typing import Any, Dict, Iterable, List, NoReturn, Optional, TypeVar, Union from typing_extensions import Literal, Protocol A = TypeVar('A') class CryptolJSON(Protocol): def __to_cryptol__(self, ty : CryptolType) -> Any: ... class CryptolCode(metaclass=ABCMeta): def __call__(self, other : CryptolJSON) -> CryptolCode: return CryptolApplication(self, other) @abstractmethod def __to_cryptol__(self, ty : CryptolType) -> Any: ... class CryptolLiteral(CryptolCode): def __init__(self, code : str) -> None: self._code = code def __to_cryptol__(self, ty : CryptolType) -> Any: return self._code class CryptolApplication(CryptolCode): def __init__(self, rator : CryptolJSON, *rands : CryptolJSON) -> None: self._rator = rator self._rands = rands def __to_cryptol__(self, ty : CryptolType) -> Any: return {'expression': 'call', 'function': to_cryptol(self._rator), 'arguments': [to_cryptol(arg) for arg in self._rands]} class CryptolArrowKind: def __init__(self, dom : CryptolKind, ran : CryptolKind): self.domain = dom self.range = ran def __repr__(self) -> str: return f"CryptolArrowKind({self.domain!r}, {self.range!r})" CryptolKind = Union[Literal['Type'], Literal['Num'], Literal['Prop'], CryptolArrowKind] def to_kind(k : Any) -> CryptolKind: if k == "Type": return "Type" elif k == "Num": return "Num" elif k == "Prop": return "Prop" elif k['kind'] == "arrow": return CryptolArrowKind(k['from'], k['to']) else: raise ValueError(f'Not a Cryptol kind: {k!r}') class CryptolProp: pass class UnaryProp(CryptolProp): def __init__(self, subject : CryptolType) -> None: self.subject = subject class Fin(UnaryProp): def __repr__(self) -> str: return f"Fin({self.subject!r})" class Cmp(UnaryProp): def __repr__(self) -> str: return f"Cmp({self.subject!r})" class SignedCmp(UnaryProp): def __repr__(self) -> str: return f"SignedCmp({self.subject!r})" class Zero(UnaryProp): def __repr__(self) -> str: return f"Zero({self.subject!r})" class Arith(UnaryProp): def __repr__(self) -> str: return f"Arith({self.subject!r})" class Logic(UnaryProp): def __repr__(self) -> str: return f"Logic({self.subject!r})" def to_cryptol(val : Any, cryptol_type : Optional[CryptolType] = None) -> Any: if cryptol_type is not None: return cryptol_type.from_python(val) else: return CryptolType().from_python(val) def fail_with(exn : Exception) -> NoReturn: raise exn def is_plausible_json(val : Any) -> bool: for ty in [bool, int, float, str]: if isinstance(val, ty): return True if isinstance(val, dict): return all(isinstance(k, str) and is_plausible_json(val[k]) for k in val) if isinstance(val, tuple) or isinstance(val, list): return all(is_plausible_json(elt) for elt in val) return False class CryptolType: def from_python(self, val : Any) -> Any: if hasattr(val, '__to_cryptol__'): code = val.__to_cryptol__(self) if is_plausible_json(code): return code else: raise ValueError(f"Improbable JSON from __to_cryptol__: {val!r} gave {code!r}") # if isinstance(code, CryptolCode): # return self.convert(code) # else: # raise ValueError(f"Expected Cryptol code from __to_cryptol__ on {val!r}, but got {code!r}.") else: return self.convert(val) def convert(self, val : Any) -> Any: if isinstance(val, bool): return val elif isinstance(val, tuple) and val == (): return {'expression': 'unit'} elif isinstance(val, tuple): return {'expression': 'tuple', 'data': [to_cryptol(x) for x in val]} elif isinstance(val, dict): return {'expression': 'record', 'data': {k : to_cryptol(val[k]) if isinstance(k, str) else fail_with (TypeError("Record keys must be strings")) for k in val}} elif isinstance(val, int): return val elif isinstance(val, list): return {'expression': 'sequence', 'data': [to_cryptol(v) for v in val]} elif isinstance(val, bytes) or isinstance(val, bytearray): return {'expression': 'bits', 'encoding': 'base64', 'width': 8 * len(val), 'data': base64.b64encode(val).decode('ascii')} elif isinstance(val, BitVector.BitVector): n = int(val) byte_width = ceil(n.bit_length()/8) return {'expression': 'bits', 'encoding': 'base64', 'width': val.length(), # N.B. original length, not padded 'data': base64.b64encode(n.to_bytes(byte_width,'big')).decode('ascii')} elif isinstance(val, BV): return {'expression': 'bits', 'encoding': 'hex', 'width': val.size(), # N.B. original length, not padded 'data': val.hex()[2:]} elif isinstance(val, OpaqueValue): return {'expression': 'variable', 'identifier': val.identifier} else: raise TypeError("Unsupported value: " + str(val)) class Var(CryptolType): def __init__(self, name : str, kind : CryptolKind) -> None: self.name = name self.kind = kind def __repr__(self) -> str: return f"Var({self.name!r}, {self.kind!r})" class Function(CryptolType): def __init__(self, dom : CryptolType, ran : CryptolType) -> None: self.domain = dom self.range = ran def __repr__(self) -> str: return f"Function({self.domain!r}, {self.range!r})" class Bitvector(CryptolType): def __init__(self, width : CryptolType) -> None: self.width = width def __repr__(self) -> str: return f"Bitvector({self.width!r})" def convert(self, val : Any) -> Any: # XXX figure out what to do when width is not evenly divisible by 8 if isinstance(val, int): w = eval_numeric(self.width, None) if w is not None: return self.convert(int.to_bytes(val, int(w / 8), 'big', signed=True)) else: raise ValueError(f"Insufficent type information to serialize int as bitvector") elif isinstance(val, bytearray) or isinstance(val, bytes): return {'expression': 'bits', 'encoding': 'base64', 'width': eval_numeric(self.width, 8 * len(val)), 'data': base64.b64encode(val).decode('ascii')} elif isinstance(val, BitVector.BitVector): return CryptolType.convert(self, val) elif isinstance(val, BV): return CryptolType.convert(self, val) else: raise ValueError(f"Not supported as bitvector: {val!r}") def eval_numeric(t : Any, default : A) -> Union[int, A]: if isinstance(t, Num): return t.number else: return default class Num(CryptolType): def __init__(self, number : int) -> None: self.number = number def __repr__(self) -> str: return f"Num({self.number!r})" class Bit(CryptolType): def __init__(self) -> None: pass def __repr__(self) -> str: return f"Bit()" class Sequence(CryptolType): def __init__(self, length : CryptolType, contents : CryptolType) -> None: self.length = length self.contents = contents def __repr__(self) -> str: return f"Sequence({self.length!r}, {self.contents!r})" class Inf(CryptolType): def __repr__(self) -> str: return f"Inf()" class Integer(CryptolType): def __repr__(self) -> str: return f"Integer()" class Rational(CryptolType): def __repr__(self) -> str: return f"Rational()" class Z(CryptolType): def __init__(self, modulus : CryptolType) -> None: self.modulus = modulus def __repr__(self) -> str: return f"Z({self.modulus!r})" class Plus(CryptolType): def __init__(self, left : CryptolType, right : CryptolType) -> None: self.left = left self.right = right def __str__(self) -> str: return f"({self.left} + {self.right})" def __repr__(self) -> str: return f"Plus({self.left!r}, {self.right!r})" class Minus(CryptolType): def __init__(self, left : CryptolType, right : CryptolType) -> None: self.left = left self.right = right def __str__(self) -> str: return f"({self.left} - {self.right})" def __repr__(self) -> str: return f"Minus({self.left!r}, {self.right!r})" class Times(CryptolType): def __init__(self, left : CryptolType, right : CryptolType) -> None: self.left = left self.right = right def __str__(self) -> str: return f"({self.left} * {self.right})" def __repr__(self) -> str: return f"Times({self.left!r}, {self.right!r})" class Div(CryptolType): def __init__(self, left : CryptolType, right : CryptolType) -> None: self.left = left self.right = right def __str__(self) -> str: return f"({self.left} / {self.right})" def __repr__(self) -> str: return f"Div({self.left!r}, {self.right!r})" class CeilDiv(CryptolType): def __init__(self, left : CryptolType, right : CryptolType) -> None: self.left = left self.right = right def __str__(self) -> str: return f"({self.left} /^ {self.right})" def __repr__(self) -> str: return f"CeilDiv({self.left!r}, {self.right!r})" class Mod(CryptolType): def __init__(self, left : CryptolType, right : CryptolType) -> None: self.left = left self.right = right def __str__(self) -> str: return f"({self.left} % {self.right})" def __repr__(self) -> str: return f"Mod({self.left!r}, {self.right!r})" class CeilMod(CryptolType): def __init__(self, left : CryptolType, right : CryptolType) -> None: self.left = left self.right = right def __str__(self) -> str: return f"({self.left} %^ {self.right})" def __repr__(self) -> str: return f"CeilMod({self.left!r}, {self.right!r})" class Expt(CryptolType): def __init__(self, left : CryptolType, right : CryptolType) -> None: self.left = left self.right = right def __str__(self) -> str: return f"({self.left} ^^ {self.right})" def __repr__(self) -> str: return f"Expt({self.left!r}, {self.right!r})" class Log2(CryptolType): def __init__(self, operand : CryptolType) -> None: self.operand = operand def __str__(self) -> str: return f"(lg2 {self.operand})" def __repr__(self) -> str: return f"Log2({self.operand!r})" class Width(CryptolType): def __init__(self, operand : CryptolType) -> None: self.operand = operand def __str__(self) -> str: return f"(width {self.operand})" def __repr__(self) -> str: return f"Width({self.operand!r})" class Max(CryptolType): def __init__(self, left : CryptolType, right : CryptolType) -> None: self.left = left self.right = right def __str__(self) -> str: return f"(max {self.left} {self.right})" def __repr__(self) -> str: return f"Max({self.left!r}, {self.right!r})" class Min(CryptolType): def __init__(self, left : CryptolType, right : CryptolType) -> None: self.left = left self.right = right def __str__(self) -> str: return f"(min {self.left} {self.right})" def __repr__(self) -> str: return f"Min({self.left!r}, {self.right!r})" class Tuple(CryptolType): types : Iterable[CryptolType] def __init__(self, *types : CryptolType) -> None: self.types = types def __repr__(self) -> str: return "Tuple(" + ", ".join(map(str, self.types)) + ")" class Record(CryptolType): def __init__(self, fields : Dict[str, CryptolType]) -> None: self.fields = fields def __repr__(self) -> str: return f"Record({self.fields!r})" def to_type(t : Any) -> CryptolType: if t['type'] == 'variable': return Var(t['name'], to_kind(t['kind'])) elif t['type'] == 'function': return Function(to_type(t['domain']), to_type(t['range'])) elif t['type'] == 'bitvector': return Bitvector(to_type(t['width'])) elif t['type'] == 'number': return Num(t['value']) elif t['type'] == 'Bit': return Bit() elif t['type'] == 'sequence': return Sequence(to_type(t['length']), to_type(t['contents'])) elif t['type'] == 'inf': return Inf() elif t['type'] == '+': return Plus(*map(to_type, t['arguments'])) elif t['type'] == '-': return Minus(*map(to_type, t['arguments'])) elif t['type'] == '*': return Times(*map(to_type, t['arguments'])) elif t['type'] == '/': return Div(*map(to_type, t['arguments'])) elif t['type'] == '/^': return CeilDiv(*map(to_type, t['arguments'])) elif t['type'] == '%': return Mod(*map(to_type, t['arguments'])) elif t['type'] == '%^': return CeilMod(*map(to_type, t['arguments'])) elif t['type'] == '^^': return Expt(*map(to_type, t['arguments'])) elif t['type'] == 'lg2': return Log2(*map(to_type, t['arguments'])) elif t['type'] == 'width': return Width(*map(to_type, t['arguments'])) elif t['type'] == 'max': return Max(*map(to_type, t['arguments'])) elif t['type'] == 'min': return Min(*map(to_type, t['arguments'])) elif t['type'] == 'tuple': return Tuple(*map(to_type, t['contents'])) elif t['type'] == 'record': return Record({k : to_type(t['fields'][k]) for k in t['fields']}) elif t['type'] == 'Integer': return Integer() elif t['type'] == 'Rational': return Rational() elif t['type'] == 'Z': return Z(to_type(t['modulus'])) else: raise NotImplementedError(f"to_type({t!r})") class CryptolTypeSchema: def __init__(self, variables : OrderedDict[str, CryptolKind], propositions : List[Optional[CryptolProp]], # TODO complete me! body : CryptolType) -> None: self.variables = variables self.propositions = propositions self.body = body def __repr__(self) -> str: return f"CryptolTypeSchema({self.variables!r}, {self.propositions!r}, {self.body!r})" def to_schema(obj : Any) -> CryptolTypeSchema: return CryptolTypeSchema(OrderedDict((v['name'], to_kind(v['kind'])) for v in obj['forall']), [to_prop(p) for p in obj['propositions']], to_type(obj['type'])) def to_prop(obj : Any) -> Optional[CryptolProp]: if obj['prop'] == 'fin': return Fin(to_type(obj['subject'])) elif obj['prop'] == 'Cmp': return Cmp(to_type(obj['subject'])) elif obj['prop'] == 'SignedCmp': return SignedCmp(to_type(obj['subject'])) elif obj['prop'] == 'Zero': return Zero(to_type(obj['subject'])) elif obj['prop'] == 'Arith': return Arith(to_type(obj['subject'])) elif obj['prop'] == 'Logic': return Logic(to_type(obj['subject'])) else: return None #raise ValueError(f"Can't convert to a Cryptol prop: {obj!r}") def argument_types(obj : Union[CryptolTypeSchema, CryptolType]) -> List[CryptolType]: if isinstance(obj, CryptolTypeSchema): return argument_types(obj.body) elif isinstance(obj, Function): arg1 = obj.domain args = argument_types(obj.range) return [arg1] + args else: return []
7,504
1,303
<reponame>LifeOrGame/Sparky #include "sp/sp.h" #include "Panel.h" #include "Widget.h" #include "sp/app/Application.h" #include "sp/maths/maths.h" #include "sp/graphics/shaders/ShaderFactory.h" namespace sp { namespace graphics { namespace ui { using namespace events; using namespace maths; Panel::Panel() : Layer2D(maths::mat4::Orthographic(0.0f, 32.0f, 0.0f, 18.0f, -1.0f, 1.0f)) { Application::GetApplication().PushOverlay(this); } Panel::~Panel() { for (uint i = 0; i < m_Widgets.size(); i++) delete m_Widgets[i]; Application::GetApplication().PopOverlay(this); } Widget* Panel::Add(Widget* widget) { m_Widgets.push_back(widget); return widget; } void Panel::Remove(Widget* widget) { int32 index = 0; for (uint i = 0; i < m_Widgets.size(); i++) { if (m_Widgets[i] == widget) { m_Widgets.erase(m_Widgets.begin() + i); delete m_Widgets[i]; break; } } } void Panel::Clear() { for (uint i = 0; i < m_Widgets.size(); i++) spdel m_Widgets[i]; m_Widgets.clear(); } void Panel::OnEvent(events::Event& event) { EventDispatcher dispatcher(event); dispatcher.Dispatch<MousePressedEvent>(METHOD(&Panel::OnMousePressedEvent)); dispatcher.Dispatch<MouseReleasedEvent>(METHOD(&Panel::OnMouseReleasedEvent)); dispatcher.Dispatch<MouseMovedEvent>(METHOD(&Panel::OnMouseMovedEvent)); // TODO: Temporary fix dispatcher.Dispatch<ResizeWindowEvent>([this](events::ResizeWindowEvent& e) { return Layer2D::OnResize(e.GetWidth(), e.GetHeight()); }); } bool Panel::OnMousePressedEvent(events::MousePressedEvent& e) { vec2 mouse(e.GetX() * (32.0f / Window::GetWindowClass(nullptr)->GetWidth()), 18.0f - e.GetY() * (18.0f / Window::GetWindowClass(nullptr)->GetHeight())); for (uint i = 0; i < m_Widgets.size(); i++) { Widget* widget = m_Widgets[i]; if (widget->GetBounds().Contains(mouse)) { if (widget->OnMousePressed(e)) return true; } } return false; } bool Panel::OnMouseReleasedEvent(events::MouseReleasedEvent& e) { vec2 mouse(e.GetX() * (32.0f / Window::GetWindowClass(nullptr)->GetWidth()), 18.0f - e.GetY() * (18.0f / Window::GetWindowClass(nullptr)->GetHeight())); for (uint i = 0; i < m_Widgets.size(); i++) { Widget* widget = m_Widgets[i]; if (widget->GetBounds().Contains(mouse)) { if (widget->OnMouseReleased(e)) return true; } } return false; } bool Panel::OnMouseMovedEvent(events::MouseMovedEvent& e) { for (uint i = 0; i < m_Widgets.size(); i++) { Widget* widget = m_Widgets[i]; if (widget->OnMouseMoved(e)) return true; } return false; } void Panel::OnUpdate(const Timestep& ts) { for (Widget* widget : m_Widgets) { if (widget->IsActive()) widget->OnUpdate(); } } void Panel::OnRender(Renderer2D& renderer) { for (Widget* widget : m_Widgets) { if (widget->IsActive()) widget->OnRender(renderer); } } } } }
1,261
1,875
<reponame>CarnegieLearningWeb/teavm /* * Copyright 2015 <NAME>. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.teavm.classlib.java.text; import org.teavm.classlib.java.io.TSerializable; import org.teavm.classlib.java.util.TMap; import org.teavm.classlib.java.util.TSet; public interface TAttributedCharacterIterator extends TCharacterIterator { class Attribute implements TSerializable { public static final Attribute INPUT_METHOD_SEGMENT = new Attribute( "input_method_segment"); public static final Attribute LANGUAGE = new Attribute("language"); public static final Attribute READING = new Attribute("reading"); private String name; protected Attribute(String name) { this.name = name; } @Override public final boolean equals(Object object) { return this == object; } protected String getName() { return name; } @Override public final int hashCode() { return super.hashCode(); } @Override public String toString() { return getClass().getName() + '(' + getName() + ')'; } } /** * Returns a set of attributes present in the {@code * AttributedCharacterIterator}. An empty set is returned if no attributes * were defined. * * @return a set of attribute keys; may be empty. */ TSet<Attribute> getAllAttributeKeys(); /** * Returns the value stored in the attribute for the current character. If * the attribute was not defined then {@code null} is returned. * * @param attribute the attribute for which the value should be returned. * @return the value of the requested attribute for the current character or * {@code null} if it was not defined. */ Object getAttribute(Attribute attribute); /** * Returns a map of all attributes of the current character. If no * attributes were defined for the current character then an empty map is * returned. * * @return a map of all attributes for the current character or an empty * map. */ TMap<Attribute, Object> getAttributes(); /** * Returns the index of the last character in the run having the same * attributes as the current character. * * @return the index of the last character of the current run. */ int getRunLimit(); /** * Returns the index of the last character in the run that has the same * attribute value for the given attribute as the current character. * * @param attribute * the attribute which the run is based on. * @return the index of the last character of the current run. */ int getRunLimit(Attribute attribute); /** * Returns the index of the last character in the run that has the same * attribute values for the attributes in the set as the current character. * * @param attributes * the set of attributes which the run is based on. * @return the index of the last character of the current run. */ int getRunLimit(TSet<? extends Attribute> attributes); /** * Returns the index of the first character in the run that has the same * attributes as the current character. * * @return the index of the last character of the current run. */ int getRunStart(); /** * Returns the index of the first character in the run that has the same * attribute value for the given attribute as the current character. * * @param attribute * the attribute which the run is based on. * @return the index of the last character of the current run. */ int getRunStart(Attribute attribute); /** * Returns the index of the first character in the run that has the same * attribute values for the attributes in the set as the current character. * * @param attributes * the set of attributes which the run is based on. * @return the index of the last character of the current run. */ int getRunStart(TSet<? extends Attribute> attributes); }
1,628
351
/*** ____ __ ____ __ ( _ \ / \( _ \( ) ) __/( O )) __// (_/\ (__) \__/(__) \____/ version 1.2.90 https://github.com/badaix/popl This file is part of popl (program options parser lib) Copyright (C) 2015-2019 <NAME> This software may be modified and distributed under the terms of the MIT license. See the LICENSE file for details. ***/ /// checked with clang-tidy: /// run-clang-tidy-3.8.py -header-filter='.*' /// -checks='*,-misc-definitions-in-headers,-google-readability-braces-around-statements,-readability-braces-around-statements,-cppcoreguidelines-pro-bounds-pointer-arithmetic,-google-build-using-namespace,-google-build-using-namespace' #ifndef POPL_HPP #define POPL_HPP #ifndef NOMINMAX #define NOMINMAX #endif // NOMINMAX #include <algorithm> #include <cstdio> #include <cstring> #include <fstream> #include <iostream> #include <memory> #include <sstream> #include <stdexcept> #include <vector> namespace popl { #define POPL_VERSION "1.2.90" /// Option's argument type /** * Switch has "no" argument * Value has "required" argument * Implicit has "optional" argument */ enum class Argument { no = 0, // option never takes an argument required, // option always requires an argument optional // option may take an argument }; /// Option's attribute /** * inactive: Option is not set and will not be parsed * hidden: Option is active, but will not show up in the help message * required: Option must be set on the command line. Otherwise an exception will be thrown * optional: Option must not be set. Default attribute. * advanced: Option is advanced and will only show up in the advanced help message * expoert: Option is expert and will only show up in the expert help message */ enum class Attribute { inactive = 0, hidden = 1, required = 2, optional = 3, advanced = 4, expert = 5 }; /// Option name type. Used in invalid_option exception. /** * unspecified: not specified * short_name: The option's short name * long_name: The option's long name */ enum class OptionName { unspecified, short_name, long_name }; /// Abstract Base class for Options /** * Base class for Options * holds just configuration data, no runtime data. * Option is not bound to a special type "T" */ class Option { friend class OptionParser; public: /// Construct an Option /// @param short_name the options's short name. Must be empty or one character. /// @param long_name the option's long name. Can be empty. /// @param description the Option's description that will be shown in the help message Option(const std::string& short_name, const std::string& long_name, std::string description); /// Destructor virtual ~Option() = default; /// default copy constructor Option(const Option&) = default; /// default move constructor Option(Option&&) = default; /// default assignement operator Option& operator=(const Option&) = default; /// default move assignement operator Option& operator=(Option&&) = default; /// Get the Option's short name /// @return character of the options's short name or 0 if no short name is defined char short_name() const; /// Get the Option's long name /// @return the long name of the Option. Empty string if no long name is defined std::string long_name() const; /// Get the Option's long or short name /// @param what_name the option's name to return /// @param what_hyphen preced the returned name with (double-)hypen /// @return the requested name of the Option. Empty string if not defined. std::string name(OptionName what_name, bool with_hypen = false) const; /// Get the Option's description /// @return the description std::string description() const; /// Get the Option's default value /// @param out stream to write the default value to /// @return true if a default value is available, false if not virtual bool get_default(std::ostream& out) const = 0; /// Set the Option's attribute /// @param attribute void set_attribute(const Attribute& attribute); /// Get the Option's attribute /// @return the Options's attribute Attribute attribute() const; /// Get the Option's argument type /// @return argument type (no, required, optional) virtual Argument argument_type() const = 0; /// Check how often the Option is set on command line /// @return the Option's count on command line virtual size_t count() const = 0; /// Check if the Option is set /// @return true if set at least once virtual bool is_set() const = 0; protected: /// Parse the command line option and fill the internal data structure /// @param what_name short or long option name /// @param value the value as given on command line virtual void parse(OptionName what_name, const char* value) = 0; /// Clear the internal data structure virtual void clear() = 0; std::string short_name_; std::string long_name_; std::string description_; Attribute attribute_; }; /// Value option with optional default value /** * Value option with optional default value * If set, it requires an argument */ template <class T> class Value : public Option { public: /// Construct an Value Option /// @param short_name the option's short name. Must be empty or one character. /// @param long_name the option's long name. Can be empty. /// @param description the Option's description that will be shown in the help message Value(const std::string& short_name, const std::string& long_name, const std::string& description); /// Construct an Value Option /// @param short_name the option's short name. Must be empty or one character. /// @param long_name the option's long name. Can be empty. /// @param description the Option's description that will be shown in the help message /// @param default_val the Option's default value /// @param assign_to pointer to a variable to assign the parsed command line value to Value(const std::string& short_name, const std::string& long_name, const std::string& description, const T& default_val, T* assign_to = nullptr); size_t count() const override; bool is_set() const override; /// Assign the last parsed command line value to "var" /// @param var pointer to the variable where is value is written to void assign_to(T* var); /// Manually set the Option's value. Deletes current value(s) /// @param value the new value of the option void set_value(const T& value); /// Get the Option's value. Will throw if option at index idx is not available /// @param idx the zero based index of the value (if set multiple times) /// @return the Option's value at index "idx" T value(size_t idx = 0) const; /// Get the Option's value, return default_value if not set. /// @param default_value return value if value is not set /// @param idx the zero based index of the value (if set multiple times) /// @return the Option's value at index "idx" or the default value or default_value T value_or(const T& default_value, size_t idx = 0) const; /// Set the Option's default value /// @param value the default value if not specified on command line void set_default(const T& value); /// Check if the Option has a default value /// @return true if the Option has a default value bool has_default() const; /// Get the Option's default value. Will throw if no default is set. /// @return the Option's default value T get_default() const; bool get_default(std::ostream& out) const override; Argument argument_type() const override; protected: void parse(OptionName what_name, const char* value) override; std::unique_ptr<T> default_; virtual void update_reference(); virtual void add_value(const T& value); void clear() override; T* assign_to_; std::vector<T> values_; }; /// Value option with implicit default value /** * Value option with implicit default value * If set, an argument is optional * -without argument it carries the implicit default value * -with argument it carries the explicit value */ template <class T> class Implicit : public Value<T> { public: Implicit(const std::string& short_name, const std::string& long_name, const std::string& description, const T& implicit_val, T* assign_to = nullptr); Argument argument_type() const override; protected: void parse(OptionName what_name, const char* value) override; }; /// Value option without value /** * Value option without value * Does not require an argument * Can be either set or not set */ class Switch : public Value<bool> { public: Switch(const std::string& short_name, const std::string& long_name, const std::string& description, bool* assign_to = nullptr); void set_default(const bool& value) = delete; Argument argument_type() const override; protected: void parse(OptionName what_name, const char* value) override; }; /// Bounded value option /** * Bounded value option * Checks if the value meets boundary predicate, * for example ( x > 3 && x < 99 ) */ template <class T> class BoundedValue : public Value<T> { public: using Predicate = bool(*)(T); /// Construct a BoundedValue Option /// @param short_name the option's short name. Must be empty or one character. /// @param long_name the option's long name. Can be empty. /// @param description the Option's description that will be shown in the help message /// @param predicate the option's correctness predicate, returns true if option is correct BoundedValue(const std::string& short_name, const std::string& long_name, const std::string& description, Predicate predicate); /// Construct a BoundedValue Option /// @param short_name the option's short name. Must be empty or one character. /// @param long_name the option's long name. Can be empty. /// @param description the Option's description that will be shown in the help message /// @param predicate the option's correctness predicate, returns true if option is correct /// @param default_val the Option's default value /// @param assign_to pointer to a variable to assign the parsed command line value to BoundedValue(const std::string& short_name, const std::string& long_name, const std::string& description, Predicate predicate, const T& default_val, T* assign_to = nullptr); protected: void parse(OptionName what_name, const char* value) override; private: Predicate predicate_; }; using Option_ptr = std::shared_ptr<Option>; /// OptionParser manages all Options /** * OptionParser manages all Options * Add Options (Option_Type = Value<T>, Implicit<T> or Switch) with "add<Option_Type>(option params)" * Call "parse(argc, argv)" to trigger parsing of the options and to * fill "non_option_args" and "unknown_options" */ class OptionParser { public: /// Construct the OptionParser /// @param description used for the help message explicit OptionParser(std::string description = ""); /// Destructor virtual ~OptionParser() = default; /// Add an Option e.g. 'add<Value<int>>("i", "int", "description for the -i option")' /// @param T the option type (Value, Switch, Implicit) /// @param attribute the Option's attribute (inactive, hidden, required, optional, ...) /// @param Ts the Option's parameter template <typename T, Attribute attribute, typename... Ts> std::shared_ptr<T> add(Ts&&... params); /// Add an Option e.g. 'add<Value<int>>("i", "int", "description for the -i option")' /// @param T the option type (Value, Switch, Implicit) /// @param Ts the Option's parameter template <typename T, typename... Ts> std::shared_ptr<T> add(Ts&&... params); /// Parse an ini file into the added Options /// @param ini_filename full path of the ini file void parse(const std::string& ini_filename); /// Parse the command line into the added Options /// @param argc command line argument count /// @param argv command line arguments /// @param start_index index of starting argument void parse(int argc, const char * const argv[], int start_index = 1); /// Produce a help message /// @param max_attribute show options up to this level (optional, advanced, expert) /// @return the help message std::string help(const Attribute& max_attribute = Attribute::optional) const; /// Get the OptionParser's description /// @return the description as given during construction std::string description() const; /// Get all options that where added with "add" /// @return a vector of the contained Options const std::vector<Option_ptr>& options() const; /// Get command line arguments without option /// e.g. "-i 5 hello" => hello /// e.g. "-i 5 -- from here non option args" => "from", "here", "non", "option", "args" /// @return vector to "stand-alone" command line arguments const std::vector<std::string>& non_option_args() const; /// Get unknown command options /// e.g. '--some_unknown_option="hello"' /// @return vector to "stand-alone" command line arguments const std::vector<std::string>& unknown_options() const; /// Get an Option by it's long name /// @param the Option's long name /// @return a pointer of type "Value, Switch, Implicit" to the Option or nullptr template <typename T> std::shared_ptr<T> get_option(const std::string& long_name) const; /// Get an Option by it's short name /// @param the Option's short name /// @return a pointer of type "Value, Switch, Implicit" to the Option or nullptr template <typename T> std::shared_ptr<T> get_option(char short_name) const; protected: std::vector<Option_ptr> options_; std::string description_; std::vector<std::string> non_option_args_; std::vector<std::string> unknown_options_; Option_ptr find_option(const std::string& long_name) const; Option_ptr find_option(char short_name) const; }; class invalid_option : public std::invalid_argument { public: enum class Error { missing_argument, invalid_argument, too_many_arguments, argument_out_of_bound, missing_option }; invalid_option(const Option* option, invalid_option::Error error, OptionName what_name, std::string value, const std::string& text) : std::invalid_argument(text.c_str()), option_(option), error_(error), what_name_(what_name), value_(std::move(value)) { } invalid_option(const Option* option, invalid_option::Error error, const std::string& text) : invalid_option(option, error, OptionName::unspecified, "", text) { } const Option* option() const { return option_; } Error error() const { return error_; } OptionName what_name() const { return what_name_; } std::string value() const { return value_; } private: const Option* option_; Error error_; OptionName what_name_; std::string value_; }; /// Base class for an OptionPrinter /** * OptionPrinter creates a help message for a given OptionParser */ class OptionPrinter { public: /// Constructor /// @param option_parser the OptionParser to create the help message from explicit OptionPrinter(const OptionParser* option_parser) : option_parser_(option_parser) { } /// Destructor virtual ~OptionPrinter() = default; /// Create a help message /// @param max_attribute show options up to this level (optional, advanced, expert) /// @return the help message virtual std::string print(const Attribute& max_attribute = Attribute::optional) const = 0; protected: const OptionParser* option_parser_; }; /// Option printer for the console /** * Standard console option printer * Creates a human readable help message */ class ConsoleOptionPrinter : public OptionPrinter { public: explicit ConsoleOptionPrinter(const OptionParser* option_parser); ~ConsoleOptionPrinter() override = default; std::string print(const Attribute& max_attribute = Attribute::optional) const override; private: std::string to_string(Option_ptr option) const; }; /// Option printer for man pages /** * Creates help messages in groff format that can be used in man pages */ class GroffOptionPrinter : public OptionPrinter { public: explicit GroffOptionPrinter(const OptionParser* option_parser); ~GroffOptionPrinter() override = default; std::string print(const Attribute& max_attribute = Attribute::optional) const override; private: std::string to_string(Option_ptr option) const; }; /// Option printer for bash completion /** * Creates a script with all options (short and long) that can be used for bash completion */ class BashCompletionOptionPrinter : public OptionPrinter { public: BashCompletionOptionPrinter(const OptionParser* option_parser, std::string program_name); ~BashCompletionOptionPrinter() override = default; std::string print(const Attribute& max_attribute = Attribute::optional) const override; private: std::string program_name_; }; /// Option implementation ///////////////////////////////// inline Option::Option(const std::string& short_name, const std::string& long_name, std::string description) : short_name_(short_name), long_name_(long_name), description_(std::move(description)), attribute_(Attribute::optional) { if (short_name.size() > 1) throw std::invalid_argument("length of short name must be <= 1: '" + short_name + "'"); if (short_name.empty() && long_name.empty()) throw std::invalid_argument("short and long name are empty"); } inline char Option::short_name() const { if (!short_name_.empty()) return short_name_[0]; return 0; } inline std::string Option::long_name() const { return long_name_; } inline std::string Option::name(OptionName what_name, bool with_hypen) const { if (what_name == OptionName::short_name) return short_name_.empty() ? "" : ((with_hypen ? "-" : "") + short_name_); if (what_name == OptionName::long_name) return long_name_.empty() ? "" : ((with_hypen ? "--" : "") + long_name_); return ""; } inline std::string Option::description() const { return description_; } inline void Option::set_attribute(const Attribute& attribute) { attribute_ = attribute; } inline Attribute Option::attribute() const { return attribute_; } /// Value implementation ///////////////////////////////// template <class T> inline Value<T>::Value(const std::string& short_name, const std::string& long_name, const std::string& description) : Option(short_name, long_name, description), assign_to_(nullptr) { } template <class T> inline Value<T>::Value(const std::string& short_name, const std::string& long_name, const std::string& description, const T& default_val, T* assign_to) : Value<T>(short_name, long_name, description) { assign_to_ = assign_to; set_default(default_val); } template <class T> inline size_t Value<T>::count() const { return values_.size(); } template <class T> inline bool Value<T>::is_set() const { return !values_.empty(); } template <class T> inline void Value<T>::assign_to(T* var) { assign_to_ = var; update_reference(); } template <class T> inline void Value<T>::set_value(const T& value) { clear(); add_value(value); } template <class T> inline T Value<T>::value_or(const T& default_value, size_t idx) const { if (idx < values_.size()) return values_[idx]; else if (default_) return *default_; else return default_value; } template <class T> inline T Value<T>::value(size_t idx) const { if (!this->is_set() && default_) return *default_; if (!is_set() || (idx >= count())) { std::stringstream optionStr; if (!is_set()) optionStr << "option not set: \""; else optionStr << "index out of range (" << idx << ") for \""; if (short_name() != 0) optionStr << "-" << short_name(); else optionStr << "--" << long_name(); optionStr << "\""; throw std::out_of_range(optionStr.str()); } return values_[idx]; } template <class T> inline void Value<T>::set_default(const T& value) { this->default_.reset(new T); *this->default_ = value; update_reference(); } template <class T> inline bool Value<T>::has_default() const { return (this->default_ != nullptr); } template <class T> inline T Value<T>::get_default() const { if (!has_default()) throw std::runtime_error("no default value set"); return *this->default_; } template <class T> inline bool Value<T>::get_default(std::ostream& out) const { if (!has_default()) return false; out << *this->default_; return true; } template <class T> inline Argument Value<T>::argument_type() const { return Argument::required; } template <> inline void Value<std::string>::parse(OptionName what_name, const char* value) { if (strlen(value) == 0) throw invalid_option(this, invalid_option::Error::missing_argument, what_name, value, "missing argument for " + name(what_name, true)); add_value(value); } template <> inline void Value<bool>::parse(OptionName /*what_name*/, const char* value) { bool val = ((value != nullptr) && ((strcmp(value, "1") == 0) || (strcmp(value, "true") == 0) || (strcmp(value, "True") == 0) || (strcmp(value, "TRUE") == 0))); add_value(val); } template <class T> inline void Value<T>::parse(OptionName what_name, const char* value) { T parsed_value; std::string strValue; if (value != nullptr) strValue = value; std::istringstream is(strValue); int valuesRead = 0; while (is.good()) { if (is.peek() != EOF) is >> parsed_value; else break; valuesRead++; } if (is.fail()) throw invalid_option(this, invalid_option::Error::invalid_argument, what_name, value, "invalid argument for " + name(what_name, true) + ": '" + strValue + "'"); if (valuesRead > 1) throw invalid_option(this, invalid_option::Error::too_many_arguments, what_name, value, "too many arguments for " + name(what_name, true) + ": '" + strValue + "'"); if (strValue.empty()) throw invalid_option(this, invalid_option::Error::missing_argument, what_name, "", "missing argument for " + name(what_name, true)); this->add_value(parsed_value); } template <class T> inline void Value<T>::update_reference() { if (this->assign_to_) { if (this->is_set() || default_) *this->assign_to_ = value(); } } template <class T> inline void Value<T>::add_value(const T& value) { values_.push_back(value); update_reference(); } template <class T> inline void Value<T>::clear() { values_.clear(); update_reference(); } /// Implicit implementation ///////////////////////////////// template <class T> inline Implicit<T>::Implicit(const std::string& short_name, const std::string& long_name, const std::string& description, const T& implicit_val, T* assign_to) : Value<T>(short_name, long_name, description, implicit_val, assign_to) { } template <class T> inline Argument Implicit<T>::argument_type() const { return Argument::optional; } template <class T> inline void Implicit<T>::parse(OptionName what_name, const char* value) { if ((value != nullptr) && (strlen(value) > 0)) Value<T>::parse(what_name, value); else this->add_value(*this->default_); } /// Switch implementation ///////////////////////////////// inline Switch::Switch(const std::string& short_name, const std::string& long_name, const std::string& description, bool* assign_to) : Value<bool>(short_name, long_name, description, false, assign_to) { } inline void Switch::parse(OptionName /*what_name*/, const char* /*value*/) { add_value(true); } inline Argument Switch::argument_type() const { return Argument::no; } /// BoundedValue implementation ///////////////////////////////// template <class T> BoundedValue<T>::BoundedValue(const std::string& short_name, const std::string& long_name, const std::string& description, Predicate predicate) : Value<T>(short_name, long_name, description), predicate_(predicate) { } template <class T> BoundedValue<T>::BoundedValue(const std::string& short_name, const std::string& long_name, const std::string& description, Predicate predicate, const T& default_val, T* assign_to) : Value<T>(short_name, long_name, description, default_val, assign_to), predicate_(predicate) { } template <class T> inline void BoundedValue<T>::parse(OptionName what_name, const char* value) { Value<T>::parse(what_name, value); for ( const auto& v : this->values_) if ( !predicate_( v)) throw invalid_option(this, invalid_option::Error::argument_out_of_bound, what_name, value, "argument is out of bound for " + this->name(what_name, true) + ": '" + value + "'"); } /// OptionParser implementation ///////////////////////////////// inline OptionParser::OptionParser(std::string description) : description_(std::move(description)) { } template <typename T, typename... Ts> inline std::shared_ptr<T> OptionParser::add(Ts&&... params) { return add<T, Attribute::optional>(std::forward<Ts>(params)...); } template <typename T, Attribute attribute, typename... Ts> inline std::shared_ptr<T> OptionParser::add(Ts&&... params) { static_assert(std::is_base_of<Option, typename std::decay<T>::type>::value, "type T must be Switch, Value or Implicit"); std::shared_ptr<T> option = std::make_shared<T>(std::forward<Ts>(params)...); for (const auto& o : options_) { if ((option->short_name() != 0) && (option->short_name() == o->short_name())) throw std::invalid_argument("duplicate short option name '-" + std::string(1, option->short_name()) + "'"); if (!option->long_name().empty() && (option->long_name() == (o->long_name()))) throw std::invalid_argument("duplicate long option name '--" + option->long_name() + "'"); } option->set_attribute(attribute); options_.push_back(option); return option; } inline std::string OptionParser::description() const { return description_; } inline const std::vector<Option_ptr>& OptionParser::options() const { return options_; } inline const std::vector<std::string>& OptionParser::non_option_args() const { return non_option_args_; } inline const std::vector<std::string>& OptionParser::unknown_options() const { return unknown_options_; } inline Option_ptr OptionParser::find_option(const std::string& long_name) const { for (const auto& option : options_) if (option->long_name() == long_name) return option; return nullptr; } inline Option_ptr OptionParser::find_option(char short_name) const { for (const auto& option : options_) if (option->short_name() == short_name) return option; return nullptr; } template <typename T> inline std::shared_ptr<T> OptionParser::get_option(const std::string& long_name) const { Option_ptr option = find_option(long_name); if (!option) throw std::invalid_argument("option not found: " + long_name); auto result = std::dynamic_pointer_cast<T>(option); if (!result) throw std::invalid_argument("cannot cast option to T: " + long_name); return result; } template <typename T> inline std::shared_ptr<T> OptionParser::get_option(char short_name) const { Option_ptr option = find_option(short_name); if (!option) throw std::invalid_argument("option not found: " + std::string(1, short_name)); auto result = std::dynamic_pointer_cast<T>(option); if (!result) throw std::invalid_argument("cannot cast option to T: " + std::string(1, short_name)); return result; } inline void OptionParser::parse(const std::string& ini_filename) { std::ifstream file(ini_filename.c_str()); std::string line; auto trim = [](std::string& s) { s.erase(s.begin(), std::find_if(s.begin(), s.end(), [](int ch) { return !std::isspace(ch); })); s.erase(std::find_if(s.rbegin(), s.rend(), [](int ch) { return !std::isspace(ch); }).base(), s.end()); return s; }; auto trim_copy = [trim](const std::string& s) { std::string copy(s); return trim(copy); }; auto split = [trim_copy](const std::string& s) -> std::pair<std::string, std::string> { size_t pos = s.find('='); if (pos == std::string::npos) return {"", ""}; return {trim_copy(s.substr(0, pos)), trim_copy(s.substr(pos + 1, std::string::npos))}; }; std::string section; while (std::getline(file, line)) { trim(line); if (line.empty()) continue; if (line.front() == '#') continue; if ((line.front() == '[') && (line.back() == ']')) { section = trim_copy(line.substr(1, line.size() - 2)); continue; } auto key_value = split(line); if (key_value.first.empty()) continue; std::string key = section.empty() ? key_value.first : section + "." + key_value.first; Option_ptr option = find_option(key); if (option && (option->attribute() == Attribute::inactive)) option = nullptr; // if (option && (option->argument_type() != Argument::required)) // option = nullptr; if (option) option->parse(OptionName::long_name, key_value.second.c_str()); else unknown_options_.push_back(key); } } inline void OptionParser::parse(int argc, const char* const argv[], int start_index) { unknown_options_.clear(); non_option_args_.clear(); for (auto& opt : options_) opt->clear(); for (int n = start_index; n < argc; ++n) { const std::string arg(argv[n]); if (arg == "--") { /// from here on only non opt args for (int m = n + 1; m < argc; ++m) non_option_args_.emplace_back(argv[m]); } else if (arg.find("--") == 0) { /// long option arg std::string opt = arg.substr(2); std::string optarg; size_t equalIdx = opt.find('='); if (equalIdx != std::string::npos) { optarg = opt.substr(equalIdx + 1); opt.resize(equalIdx); } Option_ptr option = find_option(opt); if (option && (option->attribute() == Attribute::inactive)) option = nullptr; if (option) { if (option->argument_type() == Argument::no) { if (!optarg.empty()) option = nullptr; } else if (option->argument_type() == Argument::required) { if (optarg.empty() && n < argc - 1) optarg = argv[++n]; } } if (option) option->parse(OptionName::long_name, optarg.c_str()); else unknown_options_.push_back(arg); } else if (arg.find('-') == 0) { /// short option arg std::string opt = arg.substr(1); bool unknown = false; for (size_t m = 0; m < opt.size(); ++m) { char c = opt[m]; std::string optarg; Option_ptr option = find_option(c); if (option && (option->attribute() == Attribute::inactive)) option = nullptr; if (option) { if (option->argument_type() == Argument::required) { /// use the rest of the current argument as optarg optarg = opt.substr(m + 1); /// or the next arg if (optarg.empty() && n < argc - 1) optarg = argv[++n]; m = opt.size(); } else if (option->argument_type() == Argument::optional) { /// use the rest of the current argument as optarg optarg = opt.substr(m + 1); m = opt.size(); } } if (option) option->parse(OptionName::short_name, optarg.c_str()); else unknown = true; } if (unknown) unknown_options_.push_back(arg); } else { non_option_args_.push_back(arg); } } for (auto& opt : options_) { if ((opt->attribute() == Attribute::required) && !opt->is_set()) { std::string option = opt->long_name().empty() ? std::string(1, opt->short_name()) : opt->long_name(); throw invalid_option(opt.get(), invalid_option::Error::missing_option, "option \"" + option + "\" is required"); } } } inline std::string OptionParser::help(const Attribute& max_attribute) const { ConsoleOptionPrinter option_printer(this); return option_printer.print(max_attribute); } /// ConsoleOptionPrinter implementation ///////////////////////////////// inline ConsoleOptionPrinter::ConsoleOptionPrinter(const OptionParser* option_parser) : OptionPrinter(option_parser) { } inline std::string ConsoleOptionPrinter::to_string(Option_ptr option) const { std::stringstream line; if (option->short_name() != 0) { line << " -" << option->short_name(); if (!option->long_name().empty()) line << ", "; } else line << " "; if (!option->long_name().empty()) line << "--" << option->long_name(); if (option->argument_type() == Argument::required) { line << " arg"; std::stringstream defaultStr; if (option->get_default(defaultStr)) { if (!defaultStr.str().empty()) line << " (=" << defaultStr.str() << ")"; } } else if (option->argument_type() == Argument::optional) { std::stringstream defaultStr; if (option->get_default(defaultStr)) line << " [=arg(=" << defaultStr.str() << ")]"; } return line.str(); } inline std::string ConsoleOptionPrinter::print(const Attribute& max_attribute) const { if (option_parser_ == nullptr) return ""; if (max_attribute < Attribute::optional) throw std::invalid_argument("attribute must be 'optional', 'advanced', or 'default'"); std::stringstream s; if (!option_parser_->description().empty()) s << option_parser_->description() << ":\n"; size_t optionRightMargin(20); const size_t maxDescriptionLeftMargin(40); // const size_t descriptionRightMargin(80); for (const auto& option : option_parser_->options()) optionRightMargin = std::max(optionRightMargin, to_string(option).size() + 2); optionRightMargin = std::min(maxDescriptionLeftMargin - 2, optionRightMargin); for (const auto& option : option_parser_->options()) { if ((option->attribute() <= Attribute::hidden) || (option->attribute() > max_attribute)) continue; std::string optionStr = to_string(option); if (optionStr.size() < optionRightMargin) optionStr.resize(optionRightMargin, ' '); else optionStr += "\n" + std::string(optionRightMargin, ' '); s << optionStr; std::string line; std::vector<std::string> lines; std::stringstream description(option->description()); while (std::getline(description, line, '\n')) lines.push_back(line); std::string empty(optionRightMargin, ' '); for (size_t n = 0; n < lines.size(); ++n) { if (n > 0) s << "\n" << empty; s << lines[n]; } s << "\n"; } return s.str(); } /// GroffOptionPrinter implementation ///////////////////////////////// inline GroffOptionPrinter::GroffOptionPrinter(const OptionParser* option_parser) : OptionPrinter(option_parser) { } inline std::string GroffOptionPrinter::to_string(Option_ptr option) const { std::stringstream line; if (option->short_name() != 0) { line << "-" << option->short_name(); if (!option->long_name().empty()) line << ", "; } if (!option->long_name().empty()) line << "--" << option->long_name(); if (option->argument_type() == Argument::required) { line << " arg"; std::stringstream defaultStr; if (option->get_default(defaultStr)) { if (!defaultStr.str().empty()) line << " (=" << defaultStr.str() << ")"; } } else if (option->argument_type() == Argument::optional) { std::stringstream defaultStr; if (option->get_default(defaultStr)) line << " [=arg(=" << defaultStr.str() << ")]"; } return line.str(); } inline std::string GroffOptionPrinter::print(const Attribute& max_attribute) const { if (option_parser_ == nullptr) return ""; if (max_attribute < Attribute::optional) throw std::invalid_argument("attribute must be 'optional', 'advanced', or 'default'"); std::stringstream s; if (!option_parser_->description().empty()) s << ".SS " << option_parser_->description() << ":\n"; for (const auto& option : option_parser_->options()) { if ((option->attribute() <= Attribute::hidden) || (option->attribute() > max_attribute)) continue; s << ".TP\n\\fB" << to_string(option) << "\\fR\n"; if (!option->description().empty()) s << option->description() << "\n"; } return s.str(); } /// BashCompletionOptionPrinter implementation ///////////////////////////////// inline BashCompletionOptionPrinter::BashCompletionOptionPrinter(const OptionParser* option_parser, std::string program_name) : OptionPrinter(option_parser), program_name_(std::move(program_name)) { } inline std::string BashCompletionOptionPrinter::print(const Attribute& /*max_attribute*/) const { if (option_parser_ == nullptr) return ""; std::stringstream s; s << "_" << program_name_ << "()\n"; s << R"({ local cur prev opts COMPREPLY=() cur="${COMP_WORDS[COMP_CWORD]}" prev="${COMP_WORDS[COMP_CWORD-1]}" opts=")"; for (const auto& option : option_parser_->options()) { if (option->attribute() > Attribute::hidden) { if (option->short_name() != 0) s << "-" << option->short_name() << " "; if (!option->long_name().empty()) s << "--" << option->long_name() << " "; } } s << R"(" if [[ ${cur} == -* ]] ; then COMPREPLY=( $(compgen -W "${opts}" -- ${cur}) ) return 0 fi } complete -F )"; s << "_" << program_name_ << " " << program_name_ << "\n"; return s.str(); } static inline std::ostream& operator<<(std::ostream& out, const OptionParser& op) { return out << op.help(); } } // namespace popl #endif // POPL_HPP
14,882
347
package org.ovirt.engine.ui.uicommonweb.models.gluster; import java.util.List; import org.ovirt.engine.core.common.businessentities.gluster.GlusterBrickEntity; import org.ovirt.engine.core.common.businessentities.gluster.GlusterVolumeEntity; import org.ovirt.engine.core.common.businessentities.gluster.GlusterVolumeTaskStatusEntity; import org.ovirt.engine.core.common.job.JobExecutionStatus; import org.ovirt.engine.ui.uicommonweb.UICommand; import org.ovirt.engine.ui.uicommonweb.dataprovider.AsyncDataProvider; public class RemoveBrickStatusModel extends VolumeRebalanceStatusModel { private List<GlusterBrickEntity> bricks; private UICommand stopRemoveBricksCommand; private UICommand commitRemoveBricksCommand; private UICommand retainBricksCommand; public RemoveBrickStatusModel(GlusterVolumeEntity volumeEntity, List<GlusterBrickEntity> bricks) { super(volumeEntity); setBricks(bricks); } public void setBricks(List<GlusterBrickEntity> bricks) { this.bricks = bricks; } public List<GlusterBrickEntity> getBricks() { return this.bricks; } public void addStopRemoveBricksCommand(UICommand command) { getCommands().add(command); this.stopRemoveBricksCommand = command; } public UICommand getStopRemoveBricksCommand() { return this.stopRemoveBricksCommand; } public void addRetainBricksCommand(UICommand command) { getCommands().add(command); this.retainBricksCommand = command; } public UICommand getRetainBricksCommand() { return this.retainBricksCommand; } public void addCommitRemoveBricksCommand(UICommand command) { getCommands().add(command); this.commitRemoveBricksCommand = command; } public UICommand getCommitRemoveBricksCommand() { return this.commitRemoveBricksCommand; } @Override public void showStatus(GlusterVolumeTaskStatusEntity statusEntity) { super.showStatus(statusEntity); getStopRemoveBricksCommand().setIsExecutionAllowed(statusEntity.getStatusSummary().getStatus() == JobExecutionStatus.STARTED); getCommitRemoveBricksCommand().setIsExecutionAllowed(statusEntity.getStatusSummary() .getStatus() == JobExecutionStatus.FINISHED); getRetainBricksCommand().setIsExecutionAllowed(statusEntity.getStatusSummary() .getStatus() == JobExecutionStatus.FINISHED); } @Override public void refreshDetails(GlusterVolumeEntity volumeEntity) { AsyncDataProvider.getInstance().getGlusterRemoveBricksStatus(new AsyncQuery<>(returnValue -> { GlusterVolumeTaskStatusEntity statusEntity = returnValue.getReturnValue(); if (statusEntity != null) { showStatus(statusEntity); } }), volumeEntity.getClusterId(), volumeEntity.getId(), getBricks()); } }
1,072
1,907
// // This source file is part of appleseed. // Visit https://appleseedhq.net/ for additional information and resources. // // This software is released under the MIT license. // // Copyright (c) 2010-2013 <NAME>, Jupiter Jazz Limited // Copyright (c) 2014-2018 <NAME>, The appleseedhq Organization // // Permission is hereby granted, free of charge, to any person obtaining a copy // of this software and associated documentation files (the "Software"), to deal // in the Software without restriction, including without limitation the rights // to use, copy, modify, merge, publish, distribute, sublicense, and/or sell // copies of the Software, and to permit persons to whom the Software is // furnished to do so, subject to the following conditions: // // The above copyright notice and this permission notice shall be included in // all copies or substantial portions of the Software. // // THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR // IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, // FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE // AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER // LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, // OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN // THE SOFTWARE. // #pragma once // appleseed.renderer headers. #include "renderer/global/globaltypes.h" #include "renderer/modeling/color/colorspace.h" #include "renderer/modeling/input/source.h" #include "renderer/modeling/input/sourceinputs.h" #include "renderer/modeling/scene/textureinstance.h" // appleseed.foundation headers. #include "foundation/image/canvasproperties.h" #include "foundation/image/color.h" #include "foundation/math/transform.h" #include "foundation/math/vector.h" #include "foundation/platform/compiler.h" #include "foundation/utility/uid.h" // Standard headers. #include <cstddef> #include <cstdint> // Forward declarations. namespace renderer { class TextureCache; } namespace renderer { // // Texture source. // class TextureSource : public Source { public: // Constructor. TextureSource( const foundation::UniqueID assembly_uid, const TextureInstance& texture_instance); // Retrieve the texture instance used by this source. const TextureInstance& get_texture_instance() const; // Compute a signature unique to this source. std::uint64_t compute_signature() const override; // Return hints allowing to treat this source as one of another type. Hints get_hints() const override; // Evaluate the source at a given shading point. void evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, float& scalar) const override; void evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, foundation::Color3f& linear_rgb) const override; void evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, Spectrum& spectrum) const override; void evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, Alpha& alpha) const override; void evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, foundation::Color3f& linear_rgb, Alpha& alpha) const override; void evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, Spectrum& spectrum, Alpha& alpha) const override; private: const foundation::UniqueID m_assembly_uid; const TextureInstance& m_texture_instance; const foundation::UniqueID m_texture_uid; const foundation::CanvasProperties m_texture_props; const foundation::Transformf m_texture_transform; const float m_scalar_canvas_width; const float m_scalar_canvas_height; const float m_max_x; const float m_max_y; // Apply the texture instance transform to UV coordinates. foundation::Vector2f apply_transform( const foundation::Vector2f& uv) const; // Retrieve a given texel. Return a color in the linear RGB color space. foundation::Color4f get_texel( TextureCache& texture_cache, const size_t ix, const size_t iy) const; // Retrieve a 2x2 block of texels. Texels are expressed in the linear RGB color space. void get_texels_2x2( TextureCache& texture_cache, const int ix, const int iy, foundation::Color4f& t00, foundation::Color4f& t10, foundation::Color4f& t01, foundation::Color4f& t11) const; // Sample the texture. Return a color in the linear RGB color space. foundation::Color4f sample_texture( TextureCache& texture_cache, const foundation::Vector2f& uv) const; // Compute an alpha value given a linear RGBA color and the alpha mode of the texture instance. void evaluate_alpha( const foundation::Color4f& color, Alpha& alpha) const; }; // // TextureSource class implementation. // inline const TextureInstance& TextureSource::get_texture_instance() const { return m_texture_instance; } inline void TextureSource::evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, float& scalar) const { const foundation::Color4f color = sample_texture(texture_cache, foundation::Vector2f(source_inputs.m_uv_x, source_inputs.m_uv_y)); scalar = color[0]; } inline void TextureSource::evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, foundation::Color3f& linear_rgb) const { const foundation::Color4f color = sample_texture(texture_cache, foundation::Vector2f(source_inputs.m_uv_x, source_inputs.m_uv_y)); linear_rgb = color.rgb(); } inline void TextureSource::evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, Spectrum& spectrum) const { const foundation::Color4f color = sample_texture(texture_cache, foundation::Vector2f(source_inputs.m_uv_x, source_inputs.m_uv_y)); spectrum.set(color.rgb(), g_std_lighting_conditions, Spectrum::Reflectance); } inline void TextureSource::evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, Alpha& alpha) const { const foundation::Color4f color = sample_texture(texture_cache, foundation::Vector2f(source_inputs.m_uv_x, source_inputs.m_uv_y)); evaluate_alpha(color, alpha); } inline void TextureSource::evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, foundation::Color3f& linear_rgb, Alpha& alpha) const { const foundation::Color4f color = sample_texture(texture_cache, foundation::Vector2f(source_inputs.m_uv_x, source_inputs.m_uv_y)); linear_rgb = color.rgb(); evaluate_alpha(color, alpha); } inline void TextureSource::evaluate( TextureCache& texture_cache, const SourceInputs& source_inputs, Spectrum& spectrum, Alpha& alpha) const { const foundation::Color4f color = sample_texture(texture_cache, foundation::Vector2f(source_inputs.m_uv_x, source_inputs.m_uv_y)); spectrum.set(color.rgb(), g_std_lighting_conditions, Spectrum::Reflectance); evaluate_alpha(color, alpha); } inline void TextureSource::evaluate_alpha( const foundation::Color4f& color, Alpha& alpha) const { switch (m_texture_instance.get_effective_alpha_mode()) { case TextureAlphaModeAlphaChannel: alpha.set(color.a); break; case TextureAlphaModeLuminance: alpha.set(average_value(color.rgb())); break; assert_otherwise; } } } // namespace renderer
4,098
679
<reponame>Grosskopf/openoffice<filename>main/starmath/source/smmod.cxx /************************************************************** * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * *************************************************************/ // MARKER(update_precomp.py): autogen include statement, do not remove #include "precompiled_starmath.hxx" #include <tools/globname.hxx> #include <vcl/status.hxx> #include <sfx2/msg.hxx> #include <sfx2/app.hxx> #include <sfx2/objface.hxx> #include <svl/whiter.hxx> #include <sfx2/request.hxx> #include <sfx2/sfx.hrc> #include <sfx2/viewsh.hxx> #include <vcl/wrkwin.hxx> #ifndef _SVX_SVXIDS_HRC //autogen #include <svx/svxids.hrc> #endif #include <vcl/msgbox.hxx> #include <vcl/virdev.hxx> #include <unotools/syslocale.hxx> #include <tools/rtti.hxx> #include "smmod.hxx" #include "symbol.hxx" #include "config.hxx" #ifndef _DIALOG_HXX #include "dialog.hxx" #endif #include "edit.hxx" #include "view.hxx" #include "starmath.hrc" TYPEINIT1( SmModule, SfxModule ); #define SmModule #include "smslots.hxx" #include <svx/xmlsecctrl.hxx> SmResId::SmResId( sal_uInt16 nId ) : ResId(nId, *SM_MOD()->GetResMgr()) { } ///////////////////////////////////////////////////////////////// SmLocalizedSymbolData::SmLocalizedSymbolData() : Resource( SmResId(RID_LOCALIZED_NAMES) ), aUiSymbolNamesAry ( SmResId(RID_UI_SYMBOL_NAMES) ), aExportSymbolNamesAry ( SmResId(RID_EXPORT_SYMBOL_NAMES) ), aUiSymbolSetNamesAry ( SmResId(RID_UI_SYMBOLSET_NAMES) ), aExportSymbolSetNamesAry( SmResId(RID_EXPORT_SYMBOLSET_NAMES) ), p50NamesAry ( 0 ), p60NamesAry ( 0 ), n50NamesLang ( LANGUAGE_NONE ), n60NamesLang ( LANGUAGE_NONE ) { FreeResource(); } SmLocalizedSymbolData::~SmLocalizedSymbolData() { delete p50NamesAry; delete p60NamesAry; } const String SmLocalizedSymbolData::GetUiSymbolName( const String &rExportName ) const { String aRes; const SmLocalizedSymbolData &rData = SM_MOD()->GetLocSymbolData(); const ResStringArray &rUiNames = rData.GetUiSymbolNamesArray(); const ResStringArray &rExportNames = rData.GetExportSymbolNamesArray(); sal_uInt16 nCount = sal::static_int_cast< xub_StrLen >(rExportNames.Count()); for (sal_uInt16 i = 0; i < nCount && !aRes.Len(); ++i) { if (rExportName == rExportNames.GetString(i)) { aRes = rUiNames.GetString(i); break; } } return aRes; } const String SmLocalizedSymbolData::GetExportSymbolName( const String &rUiName ) const { String aRes; const SmLocalizedSymbolData &rData = SM_MOD()->GetLocSymbolData(); const ResStringArray &rUiNames = rData.GetUiSymbolNamesArray(); const ResStringArray &rExportNames = rData.GetExportSymbolNamesArray(); sal_uInt16 nCount = sal::static_int_cast< xub_StrLen >(rUiNames.Count()); for (sal_uInt16 i = 0; i < nCount && !aRes.Len(); ++i) { if (rUiName == rUiNames.GetString(i)) { aRes = rExportNames.GetString(i); break; } } return aRes; } const String SmLocalizedSymbolData::GetUiSymbolSetName( const String &rExportName ) const { String aRes; const SmLocalizedSymbolData &rData = SM_MOD()->GetLocSymbolData(); const ResStringArray &rUiNames = rData.GetUiSymbolSetNamesArray(); const ResStringArray &rExportNames = rData.GetExportSymbolSetNamesArray(); sal_uInt16 nCount = sal::static_int_cast< xub_StrLen >(rExportNames.Count()); for (sal_uInt16 i = 0; i < nCount && !aRes.Len(); ++i) { if (rExportName == rExportNames.GetString(i)) { aRes = rUiNames.GetString(i); break; } } return aRes; } const String SmLocalizedSymbolData::GetExportSymbolSetName( const String &rUiName ) const { String aRes; const SmLocalizedSymbolData &rData = SM_MOD()->GetLocSymbolData(); const ResStringArray &rUiNames = rData.GetUiSymbolSetNamesArray(); const ResStringArray &rExportNames = rData.GetExportSymbolSetNamesArray(); sal_uInt16 nCount = sal::static_int_cast< xub_StrLen >(rUiNames.Count()); for (sal_uInt16 i = 0; i < nCount && !aRes.Len(); ++i) { if (rUiName == rUiNames.GetString(i)) { aRes = rExportNames.GetString(i); break; } } return aRes; } const ResStringArray* SmLocalizedSymbolData::Get50NamesArray( LanguageType nLang ) { if (nLang != n50NamesLang) { int nRID; switch (nLang) { case LANGUAGE_FRENCH : nRID = RID_FRENCH_50_NAMES; break; case LANGUAGE_ITALIAN : nRID = RID_ITALIAN_50_NAMES; break; case LANGUAGE_SWEDISH : nRID = RID_SWEDISH_50_NAMES; break; case LANGUAGE_SPANISH : nRID = RID_SPANISH_50_NAMES; break; default : nRID = -1; break; } delete p50NamesAry; p50NamesAry = 0; n50NamesLang = nLang; if (-1 != nRID) p50NamesAry = new SmNamesArray( n50NamesLang, nRID ); } return p50NamesAry ? &p50NamesAry->GetNamesArray() : 0; } const ResStringArray* SmLocalizedSymbolData::Get60NamesArray( LanguageType nLang ) { if (nLang != n60NamesLang) { int nRID; switch (nLang) { case LANGUAGE_FRENCH : nRID = RID_FRENCH_60_NAMES; break; case LANGUAGE_ITALIAN : nRID = RID_ITALIAN_60_NAMES; break; case LANGUAGE_SWEDISH : nRID = RID_SWEDISH_60_NAMES; break; case LANGUAGE_SPANISH : nRID = RID_SPANISH_60_NAMES; break; default : nRID = -1; break; } delete p60NamesAry; p60NamesAry = 0; n60NamesLang = nLang; if (-1 != nRID) p60NamesAry = new SmNamesArray( n60NamesLang, nRID ); } return p60NamesAry ? &p60NamesAry->GetNamesArray() : 0; } ///////////////////////////////////////////////////////////////// SFX_IMPL_INTERFACE(SmModule, SfxModule, SmResId(RID_APPLICATION)) { SFX_STATUSBAR_REGISTRATION(SmResId(RID_STATUSBAR)); } SmModule::SmModule(SfxObjectFactory* pObjFact) : SfxModule(SfxApplication::CreateResManager("sm"), sal_False, pObjFact, NULL), pColorConfig( 0 ), pConfig( 0 ), pLocSymbolData( 0 ), pSysLocale( 0 ), pVirtualDev( 0 ) { SetName( C2S("StarMath" )); } SmModule::~SmModule() { delete pConfig; if (pColorConfig) pColorConfig->RemoveListener(this); delete pColorConfig; delete pLocSymbolData; delete pSysLocale; delete pVirtualDev; } void SmModule::_CreateSysLocale() const { SmModule* pThis = (SmModule*)this; pThis->pSysLocale = new SvtSysLocale; } void SmModule::_CreateVirtualDev() const { SmModule* pThis = (SmModule*)this; pThis->pVirtualDev = new VirtualDevice; pThis->pVirtualDev->SetReferenceDevice( VirtualDevice::REFDEV_MODE_MSO1 ); } void SmModule::ApplyColorConfigValues( const svtools::ColorConfig &rColorCfg ) { //invalidate all graphic and edit windows const TypeId aSmViewTypeId = TYPE(SmViewShell); SfxViewShell* pViewShell = SfxViewShell::GetFirst(); while (pViewShell) { if ((pViewShell->IsA(aSmViewTypeId))) { SmViewShell *pSmView = (SmViewShell *) pViewShell; pSmView->GetGraphicWindow().ApplyColorConfigValues( rColorCfg ); SmEditWindow *pEditWin = pSmView->GetEditWindow(); if (pEditWin) pEditWin->ApplyColorConfigValues( rColorCfg ); } pViewShell = SfxViewShell::GetNext( *pViewShell ); } } svtools::ColorConfig & SmModule::GetColorConfig() { if(!pColorConfig) { pColorConfig = new svtools::ColorConfig; ApplyColorConfigValues( *pColorConfig ); pColorConfig->AddListener(this); } return *pColorConfig; } void SmModule::ConfigurationChanged( utl::ConfigurationBroadcaster*, sal_uInt32 ) { ApplyColorConfigValues(*pColorConfig); } SmConfig * SmModule::GetConfig() { if(!pConfig) pConfig = new SmConfig; return pConfig; } SmSymbolManager & SmModule::GetSymbolManager() { return GetConfig()->GetSymbolManager(); } SmLocalizedSymbolData & SmModule::GetLocSymbolData() const { if (!pLocSymbolData) ((SmModule *) this)->pLocSymbolData = new SmLocalizedSymbolData; return *pLocSymbolData; } void SmModule::GetState(SfxItemSet &rSet) { SfxWhichIter aIter(rSet); for (sal_uInt16 nWh = aIter.FirstWhich(); 0 != nWh; nWh = aIter.NextWhich()) switch (nWh) { case SID_CONFIGEVENT : rSet.DisableItem(SID_CONFIGEVENT); break; } } /* -----------------15.02.99 12:45------------------- * * --------------------------------------------------*/ SfxItemSet* SmModule::CreateItemSet( sal_uInt16 nId ) { SfxItemSet* pRet = 0; if(nId == SID_SM_EDITOPTIONS) { pRet = new SfxItemSet(GetPool(), //TP_SMPRINT SID_PRINTSIZE, SID_PRINTSIZE, SID_PRINTZOOM, SID_PRINTZOOM, SID_PRINTTITLE, SID_PRINTTITLE, SID_PRINTTEXT, SID_PRINTTEXT, SID_PRINTFRAME, SID_PRINTFRAME, SID_NO_RIGHT_SPACES, SID_NO_RIGHT_SPACES, SID_SAVE_ONLY_USED_SYMBOLS, SID_SAVE_ONLY_USED_SYMBOLS, 0 ); GetConfig()->ConfigToItemSet(*pRet); } return pRet; } /* -----------------15.02.99 12:45------------------- * * --------------------------------------------------*/ void SmModule::ApplyItemSet( sal_uInt16 nId, const SfxItemSet& rSet ) { if(nId == SID_SM_EDITOPTIONS) { GetConfig()->ItemSetToConfig(rSet); } } /* -----------------15.02.99 12:45------------------- * * --------------------------------------------------*/ SfxTabPage* SmModule::CreateTabPage( sal_uInt16 nId, Window* pParent, const SfxItemSet& rSet ) { SfxTabPage* pRet = 0; if(nId == SID_SM_TP_PRINTOPTIONS) pRet = SmPrintOptionsTabPage::Create( pParent, rSet ); return pRet; }
4,554
5,169
<reponame>Gantios/Specs<filename>Specs/3/6/2/MGActionStageSwift/0.0.2/MGActionStageSwift.podspec.json { "name": "MGActionStageSwift", "version": "0.0.2", "summary": "ActionStageSwift. 升级Swift4.0", "description": "TODO: Add long description of the pod here.", "homepage": "https://github.com/mgzf/MGActionStageSwift", "license": { "type": "MIT", "file": "LICENSE" }, "authors": { "spf": "<EMAIL>" }, "source": { "git": "https://github.com/mgzf/MGActionStageSwift.git", "tag": "0.0.2" }, "platforms": { "ios": "8.0" }, "default_subspecs": "ActionStageSwift", "pushed_with_swift_version": "4.0", "subspecs": [ { "name": "ActionStageSwift", "source_files": "MGActionStageSwift/Classes/ActionStageSwift/*.{swift,m,h}" }, { "name": "MGActionStageSwift_Extension", "source_files": "MGActionStageSwift/Classes/Extension/*.{swift,m,h}", "dependencies": { "MGActionStageSwift/ActionStageSwift": [ ] } } ] }
485
4,262
/* * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.camel.component.mina; import java.nio.charset.Charset; import org.apache.mina.core.session.IoSession; import org.apache.mina.filter.codec.ProtocolCodecFactory; import org.apache.mina.filter.codec.ProtocolDecoder; import org.apache.mina.filter.codec.ProtocolEncoder; import org.apache.mina.filter.codec.textline.LineDelimiter; import org.apache.mina.filter.codec.textline.TextLineDecoder; import org.apache.mina.filter.codec.textline.TextLineEncoder; /** * Text line codec that supports setting charset and delimiter. * <p/> * Uses Mina's default TextLineEncoder and TextLineDncoder. */ public class MinaTextLineCodecFactory implements ProtocolCodecFactory { private TextLineEncoder encoder; private TextLineDecoder decoder; public MinaTextLineCodecFactory(Charset charset, LineDelimiter delimiter) { if (delimiter.equals(LineDelimiter.AUTO)) { // AUTO not supported by encoder encoder = new TextLineEncoder(charset); } else { encoder = new TextLineEncoder(charset, delimiter); } decoder = new TextLineDecoder(charset, delimiter); } @Override public ProtocolEncoder getEncoder(IoSession session) throws Exception { return encoder; } @Override public ProtocolDecoder getDecoder(IoSession session) throws Exception { return decoder; } public void setEncoderMaxLineLength(int encoderMaxLineLength) { encoder.setMaxLineLength(encoderMaxLineLength); } public int getEncoderMaxLineLength() { return encoder.getMaxLineLength(); } public void setDecoderMaxLineLength(int decoderMaxLineLength) { decoder.setMaxLineLength(decoderMaxLineLength); } public int getDecoderMaxLineLength() { return decoder.getMaxLineLength(); } }
878
538
<reponame>e16din/AndroidBucket package com.wangjie.androidbucket.support.recyclerview.layoutmanager; import android.content.Context; import android.support.v7.widget.GridLayoutManager; import android.support.v7.widget.RecyclerView; import com.wangjie.androidbucket.support.recyclerview.listener.OnRecyclerViewScrollLocationListener; /** * Author: wangjie * Email: <EMAIL> * Date: 1/19/15. */ public class ABaseGridLayoutManager extends GridLayoutManager implements RecyclerViewScrollManager.OnScrollManagerLocation { private static final String TAG = ABaseGridLayoutManager.class.getSimpleName(); private RecyclerViewScrollManager recyclerViewScrollManager; public void setOnRecyclerViewScrollListener(RecyclerView recyclerView, OnRecyclerViewScrollLocationListener onRecyclerViewScrollLocationListener) { ensureRecyclerViewScrollManager(); recyclerViewScrollManager.setOnRecyclerViewScrollLocationListener(onRecyclerViewScrollLocationListener); recyclerViewScrollManager.setOnScrollManagerLocation(this); recyclerViewScrollManager.registerScrollListener(recyclerView); } public ABaseGridLayoutManager(Context context, int spanCount) { super(context, spanCount); } public ABaseGridLayoutManager(Context context, int spanCount, int orientation, boolean reverseLayout) { super(context, spanCount, orientation, reverseLayout); } public boolean isScrolling() { if (null != recyclerViewScrollManager) { return recyclerViewScrollManager.isScrolling(); } return false; } public RecyclerViewScrollManager getRecyclerViewScrollManager() { ensureRecyclerViewScrollManager(); return recyclerViewScrollManager; } private void ensureRecyclerViewScrollManager() { if (null == recyclerViewScrollManager) { recyclerViewScrollManager = new RecyclerViewScrollManager(); } } @Override public boolean isTop(RecyclerView recyclerView) { return 0 == findFirstVisibleItemPosition(); } @Override public boolean isBottom(RecyclerView recyclerView) { int lastVisiblePosition = findLastCompletelyVisibleItemPosition(); int lastPosition = recyclerView.getAdapter().getItemCount() - 1; return lastVisiblePosition == lastPosition; } }
783
1,342
/* * Copyright 2021 Google LLC * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * https://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package com.google.android.gms.example.appopendemo; import android.app.Activity; import android.app.Application; import android.app.Application.ActivityLifecycleCallbacks; import android.content.Context; import android.os.Bundle; import android.util.Log; import android.widget.Toast; import androidx.annotation.NonNull; import androidx.annotation.Nullable; import androidx.lifecycle.Lifecycle.Event; import androidx.lifecycle.LifecycleObserver; import androidx.lifecycle.OnLifecycleEvent; import androidx.lifecycle.ProcessLifecycleOwner; import com.google.android.gms.ads.AdError; import com.google.android.gms.ads.FullScreenContentCallback; import com.google.android.gms.ads.LoadAdError; import com.google.android.gms.ads.MobileAds; import com.google.android.gms.ads.admanager.AdManagerAdRequest; import com.google.android.gms.ads.appopen.AppOpenAd; import com.google.android.gms.ads.appopen.AppOpenAd.AppOpenAdLoadCallback; import com.google.android.gms.ads.initialization.InitializationStatus; import com.google.android.gms.ads.initialization.OnInitializationCompleteListener; import java.util.Date; /** Application class that initializes, loads and show ads when activities change states. */ public class MyApplication extends Application implements ActivityLifecycleCallbacks, LifecycleObserver { private AppOpenAdManager appOpenAdManager; private Activity currentActivity; @Override public void onCreate() { super.onCreate(); this.registerActivityLifecycleCallbacks(this); MobileAds.initialize( this, new OnInitializationCompleteListener() { @Override public void onInitializationComplete( @NonNull InitializationStatus initializationStatus) {} }); ProcessLifecycleOwner.get().getLifecycle().addObserver(this); appOpenAdManager = new AppOpenAdManager(); } /** LifecycleObserver method that shows the app open ad when the app moves to foreground. */ @OnLifecycleEvent(Event.ON_START) protected void onMoveToForeground() { // Show the ad (if available) when the app moves to foreground. appOpenAdManager.showAdIfAvailable(currentActivity); } /** ActivityLifecycleCallback methods. */ @Override public void onActivityCreated(@NonNull Activity activity, @Nullable Bundle savedInstanceState) {} @Override public void onActivityStarted(@NonNull Activity activity) { // An ad activity is started when an ad is showing, which could be AdActivity class from Google // SDK or another activity class implemented by a third party mediation partner. Updating the // currentActivity only when an ad is not showing will ensure it is not an ad activity, but the // one that shows the ad. if (!appOpenAdManager.isShowingAd) { currentActivity = activity; } } @Override public void onActivityResumed(@NonNull Activity activity) {} @Override public void onActivityPaused(@NonNull Activity activity) {} @Override public void onActivityStopped(@NonNull Activity activity) {} @Override public void onActivitySaveInstanceState(@NonNull Activity activity, @NonNull Bundle outState) {} @Override public void onActivityDestroyed(@NonNull Activity activity) {} /** * Shows an app open ad. * * @param activity the activity that shows the app open ad * @param onShowAdCompleteListener the listener to be notified when an app open ad is complete */ public void showAdIfAvailable( @NonNull Activity activity, @NonNull OnShowAdCompleteListener onShowAdCompleteListener) { // We wrap the showAdIfAvailable to enforce that other classes only interact with MyApplication // class. appOpenAdManager.showAdIfAvailable(activity, onShowAdCompleteListener); } /** * Interface definition for a callback to be invoked when an app open ad is complete * (i.e. dismissed or fails to show). */ public interface OnShowAdCompleteListener { void onShowAdComplete(); } /** Inner class that loads and shows app open ads. */ private class AppOpenAdManager { private static final String LOG_TAG = "AppOpenAdManager"; private static final String AD_UNIT_ID = "/6499/example/app-open"; private AppOpenAd appOpenAd = null; private boolean isLoadingAd = false; private boolean isShowingAd = false; /** Keep track of the time an app open ad is loaded to ensure you don't show an expired ad. */ private long loadTime = 0; /** Constructor. */ public AppOpenAdManager() {} /** * Load an ad. * * @param context the context of the activity that loads the ad */ private void loadAd(Context context) { // Do not load ad if there is an unused ad or one is already loading. if (isLoadingAd || isAdAvailable()) { return; } isLoadingAd = true; AdManagerAdRequest request = new AdManagerAdRequest.Builder().build(); AppOpenAd.load( context, AD_UNIT_ID, request, AppOpenAd.APP_OPEN_AD_ORIENTATION_PORTRAIT, new AppOpenAdLoadCallback() { /** * Called when an app open ad has loaded. * * @param ad the loaded app open ad. */ @Override public void onAdLoaded(AppOpenAd ad) { appOpenAd = ad; isLoadingAd = false; loadTime = (new Date()).getTime(); Log.d(LOG_TAG, "onAdLoaded."); Toast.makeText(context, "onAdLoaded", Toast.LENGTH_SHORT).show(); } /** * Called when an app open ad has failed to load. * * @param loadAdError the error. */ @Override public void onAdFailedToLoad(LoadAdError loadAdError) { isLoadingAd = false; Log.d(LOG_TAG, "onAdFailedToLoad: " + loadAdError.getMessage()); Toast.makeText(context, "onAdFailedToLoad", Toast.LENGTH_SHORT).show(); } }); } /** Check if ad was loaded more than n hours ago. */ private boolean wasLoadTimeLessThanNHoursAgo(long numHours) { long dateDifference = (new Date()).getTime() - loadTime; long numMilliSecondsPerHour = 3600000; return (dateDifference < (numMilliSecondsPerHour * numHours)); } /** Check if ad exists and can be shown. */ private boolean isAdAvailable() { // Ad references in the app open beta will time out after four hours, but this time limit // may change in future beta versions. For details, see: // https://support.google.com/admob/answer/9341964?hl=en return appOpenAd != null && wasLoadTimeLessThanNHoursAgo(4); } /** * Show the ad if one isn't already showing. * * @param activity the activity that shows the app open ad */ private void showAdIfAvailable(@NonNull final Activity activity) { showAdIfAvailable( activity, new OnShowAdCompleteListener() { @Override public void onShowAdComplete() { // Empty because the user will go back to the activity that shows the ad. } }); } /** * Show the ad if one isn't already showing. * * @param activity the activity that shows the app open ad * @param onShowAdCompleteListener the listener to be notified when an app open ad is complete */ private void showAdIfAvailable( @NonNull final Activity activity, @NonNull OnShowAdCompleteListener onShowAdCompleteListener) { // If the app open ad is already showing, do not show the ad again. if (isShowingAd) { Log.d(LOG_TAG, "The app open ad is already showing."); return; } // If the app open ad is not available yet, invoke the callback then load the ad. if (!isAdAvailable()) { Log.d(LOG_TAG, "The app open ad is not ready yet."); onShowAdCompleteListener.onShowAdComplete(); loadAd(activity); return; } Log.d(LOG_TAG, "Will show ad."); appOpenAd.setFullScreenContentCallback( new FullScreenContentCallback() { /** Called when full screen content is dismissed. */ @Override public void onAdDismissedFullScreenContent() { // Set the reference to null so isAdAvailable() returns false. appOpenAd = null; isShowingAd = false; Log.d(LOG_TAG, "onAdDismissedFullScreenContent."); Toast.makeText(activity, "onAdDismissedFullScreenContent", Toast.LENGTH_SHORT).show(); onShowAdCompleteListener.onShowAdComplete(); loadAd(activity); } /** Called when fullscreen content failed to show. */ @Override public void onAdFailedToShowFullScreenContent(AdError adError) { appOpenAd = null; isShowingAd = false; Log.d(LOG_TAG, "onAdFailedToShowFullScreenContent: " + adError.getMessage()); Toast.makeText(activity, "onAdFailedToShowFullScreenContent", Toast.LENGTH_SHORT) .show(); onShowAdCompleteListener.onShowAdComplete(); loadAd(activity); } /** Called when fullscreen content is shown. */ @Override public void onAdShowedFullScreenContent() { Log.d(LOG_TAG, "onAdShowedFullScreenContent."); Toast.makeText(activity, "onAdShowedFullScreenContent", Toast.LENGTH_SHORT).show(); } }); isShowingAd = true; appOpenAd.show(activity); } } }
3,802
435
// // AutoInch.h // AutoInch // // Created by 李响 on 2019/3/30. // Copyright © 2019 swift. All rights reserved. // #import <UIKit/UIKit.h> //! Project version number for AutoInch. FOUNDATION_EXPORT double AutoInchVersionNumber; //! Project version string for AutoInch. FOUNDATION_EXPORT const unsigned char AutoInchVersionString[]; // In this header, you should import all the public headers of your framework using statements like #import <AutoInch/PublicHeader.h>
151
473
/* * Copyright (c) 2015 Kaprica Security, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ #pragma once #include "cgc_instruction.h" class Emulator { private: inline bool in_stack(int mem) { #ifdef PATCHED_1 if ((unsigned char *)mem < d_stack) return false; #endif return (unsigned char *)mem - d_stack < STACK_SIZE; } inline bool in_heap(int mem) { return (mem >> 30) == 0; } inline unsigned char *heap_addr(int mem) { return mem + d_heap; } inline unsigned int heap_length(int mem) { if (!in_heap(mem)) return 0; return 0x40000000 - mem; } inline void set_operand(const Operand &opr, int value, bool carry, bool do_flags) { if (do_flags) set_flags(value, carry); set_operand(opr, value); } inline void set_flags(int value, bool carry) { d_zf = value == 0; d_cf = carry; } public: Emulator(void *sp, void *heap); ~Emulator(); void reset(); void set_ip(int ip); bool step(); void print_state(); int get_operand_mem(const Operand &opr); int get_operand(const Operand &opr); void set_operand(const Operand &opr, int value); const Instruction &last_instruction() { return d_instruction; } template <typename F> void traverse_dirty(F f) { for (unsigned int i = 0; i < sizeof(d_dirty_pages); i++) { if (d_dirty_pages[i] == 0) continue; for (unsigned int j = 0; j < 8; j++) { if (d_dirty_pages[i] & (1 << j)) f((i * 8 + j) * 0x1000); } } } static const int HEAP_SIZE = 0x40000000; static const int STACK_SIZE = 65536; private: bool d_fault; unsigned char *d_stack; unsigned char *d_heap; unsigned char d_dirty_pages[HEAP_SIZE / 0x8000]; int d_reg[REG__count]; int d_ip; unsigned int d_zf : 1; unsigned int d_cf : 1; Instruction d_instruction; };
1,242
1,840
<reponame>RaulGracia/pravega<filename>common/src/main/java/io/pravega/common/io/serialization/RevisionDataOutputStream.java /** * Copyright Pravega Authors. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package io.pravega.common.io.serialization; import com.google.common.base.Preconditions; import io.pravega.common.io.DirectDataOutput; import io.pravega.common.io.SerializationException; import io.pravega.common.util.BitConverter; import io.pravega.common.util.BufferView; import java.io.DataOutputStream; import java.io.FilterOutputStream; import java.io.IOException; import java.io.OutputStream; import java.util.Arrays; import java.util.Collection; import java.util.Map; import java.util.UUID; import java.util.function.ToIntFunction; import javax.annotation.concurrent.NotThreadSafe; import lombok.Getter; /** * RevisionDataOutput implementation that mimics the encoding in {@link DataOutputStream}. */ @NotThreadSafe abstract class RevisionDataOutputStream extends FilterOutputStream implements RevisionDataOutput { //region Members private DirectDataOutput structuredWriter; @Getter private int size; //endregion //region Constructor private RevisionDataOutputStream(OutputStream outputStream) { super(outputStream); this.structuredWriter = (out instanceof DirectDataOutput) ? (DirectDataOutput) out : new IndirectWriter(); this.size = 0; } protected final void setOut(OutputStream out, int length) throws IOException { super.out = out; this.structuredWriter = (out instanceof DirectDataOutput) ? (DirectDataOutput) out : new IndirectWriter(); this.structuredWriter.writeInt(length); } /** * Wraps the given OutputStream into a specific implementation of RevisionDataOutputStream. * * @param outputStream The OutputStream to wrap. * @return A new instance of a RevisionDataOutputStream sub-class, depending on whether the given OutputStream is a * {@link RandomAccessOutputStream} (supports seeking) or not. * @throws IOException If an IO Exception occurred. This is because if the given OutputStream is a {@link RandomAccessOutputStream}, * this will pre-allocate 4 bytes for the length. */ public static RevisionDataOutputStream wrap(OutputStream outputStream) throws IOException { if (outputStream instanceof RandomAccessOutputStream) { return new RandomRevisionDataOutput(outputStream); } else { return new NonSeekableRevisionDataOutput(outputStream); } } //endregion //region DataOutput Implementation @Override public void flush() throws IOException { this.out.flush(); } @Override public void write(int b) throws IOException { this.out.write(b); this.size++; } @Override public void write(byte[] array, int off, int len) throws IOException { this.out.write(array, off, len); this.size += len; } @Override public final void writeBoolean(boolean value) throws IOException { write(value ? 1 : 0); } @Override public final void writeByte(int b) throws IOException { write(b); } @Override public void writeChar(int c) throws IOException { writeShort(c); } @Override public void writeShort(int s) throws IOException { this.structuredWriter.writeShort(s); this.size += Short.BYTES; } @Override public void writeInt(int i) throws IOException { this.structuredWriter.writeInt(i); this.size += Integer.BYTES; } @Override public void writeLong(long l) throws IOException { this.structuredWriter.writeLong(l); this.size += Long.BYTES; } @Override public void writeFloat(float f) throws IOException { writeInt(Float.floatToIntBits(f)); } @Override public void writeDouble(double d) throws IOException { writeLong(Double.doubleToLongBits(d)); } @Override public void writeBytes(String s) throws IOException { int len = s.length(); for (int i = 0; i < len; ++i) { this.out.write((byte) s.charAt(i)); } this.size += len; } @Override public final void writeChars(String s) throws IOException { int len = s.length(); for (int i = 0; i < len; ++i) { writeChar(s.charAt(i)); } } @Override public final void writeUTF(String str) throws IOException { // Note: this method was borrowed from DataOutputStream without any changes. final int stringLength = str.length(); final int utfLength = getUTFLength(str) - 2; Preconditions.checkArgument(utfLength <= 65535, "Encoded string too long: %s bytes", utfLength); byte[] byteArray = new byte[utfLength + 2]; int index = 0; byteArray[index++] = (byte) ((utfLength >>> 8) & 0xFF); byteArray[index++] = (byte) ((utfLength >>> 0) & 0xFF); int c; int i; for (i = 0; i < stringLength; i++) { c = str.charAt(i); if (c < 1 || c > 127) { break; } byteArray[index++] = (byte) c; } for (; i < stringLength; i++) { c = str.charAt(i); if (c >= 1 && c <= 127) { byteArray[index++] = (byte) c; } else if (c > 2047) { byteArray[index++] = (byte) (0xE0 | ((c >> 12) & 0x0F)); byteArray[index++] = (byte) (0x80 | ((c >> 6) & 0x3F)); byteArray[index++] = (byte) (0x80 | ((c >> 0) & 0x3F)); } else { byteArray[index++] = (byte) (0xC0 | ((c >> 6) & 0x1F)); byteArray[index++] = (byte) (0x80 | ((c >> 0) & 0x3F)); } } write(byteArray); } //endregion //region RevisionDataOutput Implementation @Override public int getUTFLength(String s) { // This code is extracted out of DataOutputStream.writeUTF(). If we change the underlying implementation, this // needs to change as well. int charCount = s.length(); int length = 2; // writeUTF() will also encode a 2-byte length. for (int i = 0; i < charCount; ++i) { char c = s.charAt(i); if (c >= 1 && c <= 127) { length++; } else if (c > 2047) { length += 3; } else { length += 2; } } return length; } @Override public int getCompactLongLength(long value) { if (value < COMPACT_LONG_MIN || value > COMPACT_LONG_MAX) { throw new IllegalArgumentException(badArgRange("writeCompactLong", "longs", "[0, 2^62)", value)); } else if (value > 0x3FFF_FFFF) { return 8; } else if (value > 0x3FFF) { return 4; } else if (value > 0x3F) { return 2; } else { return 1; } } /** * {@inheritDoc} * Encodes the given value as a compact long, using the following scheme (MSB=Most Significant Bits): * * MSB = 00 for values in [0, 0x3F], with a 1-byte encoding. * * MSB = 01 for values in (0x3F, 0x3FFF], with a 2-byte encoding (2 MSB are reserved, leaving 14 bits usable). * * MSB = 10 for values in (0x3FFF, 0x3FFF_FFFF], with a 4-byte encoding (2 MSB are reserved, leaving 30 bits usable). * * MSB = 11 for values in (0x3FFF_FFFF, 0x3FFF_FFFF_FFFF_FFFFL], with an 8-byte encoding (2 MSB are reserved, leaving 62 bits usable). * * @param value The value to encode. */ @Override public void writeCompactLong(long value) throws IOException { if (value < COMPACT_LONG_MIN || value > COMPACT_LONG_MAX) { throw new IllegalArgumentException(badArgRange("writeCompactLong", "longs", "[0, 2^62)", value)); } else if (value > 0x3FFF_FFFF) { // All 8 bytes writeInt((int) (value >>> 32 | 0xC000_0000)); writeInt((int) value); } else if (value > 0x3FFF) { // Only 4 bytes. writeInt((int) (value | 0x8000_0000)); } else if (value > 0x3F) { // Only 2 bytes. writeShort((short) (value | 0x4000)); } else { // 1 byte. writeByte((byte) value); } } @Override public int getCompactSignedLongLength(long value) { if (value < COMPACT_SIGNED_LONG_MIN || value > COMPACT_SIGNED_LONG_MAX) { throw new IllegalArgumentException(badArgRange("writeCompactSignedLong", "longs", "[-2^61, 2^61)", value)); } if (value < 0) { value = negateSignedNumber(value); } if (value > 0x1FFF_FFFF) { return 8; } else if (value > 0x1FFF) { return 4; } else if (value > 0x1F) { return 2; } else { return 1; } } /** * {@inheritDoc} * Encodes the given value as a compact long, using the following scheme (MSB=Most Significant Bits). * * MSB[0] = 1 for negative values and 0 for positive values. * * MSB[1-2] = 00 if abs(value) in [0, 0x1F], with a 1-byte encoding. * * MSB[1-2] = 01 if abs(value) in (0x1F, 0x1FFF], with a 2-byte encoding (3 MSB are reserved, leaving 13 bits usable). * * MSB[1-2] = 10 if abs(value) in (0x1FFF, 0x1FFF_FFFF], with a 4-byte encoding (3 MSB are reserved, leaving 29 bits usable). * * MSB[1-2] = 11 if abs(value) in (0x1FFF_FFFF, 0x1FFF_FFFF_FFFF_FFFFL], with an 8-byte encoding (3 MSB are reserved, leaving 61 bits usable). * * @param value The value to encode. */ @Override public void writeCompactSignedLong(long value) throws IOException { if (value < COMPACT_SIGNED_LONG_MIN || value > COMPACT_SIGNED_LONG_MAX) { throw new IllegalArgumentException(badArgRange("writeCompactSignedLong", "longs", "[-2^61, 2^61)", value)); } else { boolean negative = value < 0; if (negative) { // Transform the value into a positive one. value = negateSignedNumber(value); } if (value > 0x1FFF_FFFF) { // All 8 bytes writeInt((int) (value >>> 32 | (negative ? 0xE000_0000 : 0x6000_0000))); writeInt((int) value); } else if (value > 0x1FFF) { // Only 4 bytes. writeInt((int) (value | (negative ? 0xC000_0000 : 0x4000_0000))); } else if (value > 0x1F) { // Only 2 bytes. writeShort((short) (value | (negative ? 0xA000 : 0x2000))); } else if (negative) { // 1 byte. writeByte((byte) value | 0x80); } else { // 1 byte. writeByte((byte) value); } } } @Override public int getCompactIntLength(int value) { if (value < COMPACT_INT_MIN || value > COMPACT_INT_MAX) { throw new IllegalArgumentException(badArgRange("writeCompactInt", "ints", "[0, 2^30)", value)); } else if (value > 0x3FFF) { return 4; } else if (value > 0x7F) { return 2; } else { return 1; } } /** * {@inheritDoc} * Encodes the given value as a compact integer, using the following scheme (MSB=Most Significant Bits): * * MSB = 0 for values in [0, 0x7F], with a 1-byte encoding. * * MSB = 10 for values in (0x7F, 0x3FFF], with a 2-byte encoding (2 MSB are reserved, leaving 14 bits usable). * * MSB = 11 for values in (0x3FFF, 0x3FFF_FFFF], with a 4-byte encoding (2 MSB are reserved, leaving 30 bits usable). * * @param value The value to encode. */ @Override public void writeCompactInt(int value) throws IOException { // MSB: 0 -> 1 byte with the remaining 7 bits // MSB: 10 -> 2 bytes with the remaining 6+8 bits // MSB: 11 -> 4 bytes with the remaining 6+8+8+8 bits if (value < COMPACT_INT_MIN || value > COMPACT_INT_MAX) { throw new IllegalArgumentException(badArgRange("writeCompactInt", "ints", "[0, 2^30)", value)); } else if (value > 0x3FFF) { // All 4 bytes writeInt(value | 0xC000_0000); } else if (value > 0x7F) { // 2 Bytes. writeShort((short) (value | 0x8000)); } else { // 1 byte. writeByte((byte) value); } } /** * {@inheritDoc} * Encodes the given UUID as a sequence of 2 Longs, withe the Most Significant Bits first, followed by Least * Significant bits. * * @param uuid The value to encode. */ @Override public void writeUUID(UUID uuid) throws IOException { writeLong(uuid.getMostSignificantBits()); writeLong(uuid.getLeastSignificantBits()); } @Override public int getCollectionLength(int elementCount, int elementLength) { return getCompactIntLength(elementCount) + elementCount * elementLength; } @Override public <T> int getCollectionLength(Collection<T> collection, ToIntFunction<T> elementLengthProvider) { if (collection == null) { return getCompactIntLength(0); } return getCompactIntLength(collection.size()) + collection.stream().mapToInt(elementLengthProvider).sum(); } @Override public <T> int getCollectionLength(T[] array, ToIntFunction<T> elementLengthProvider) { if (array == null) { return getCompactIntLength(0); } return getCompactIntLength(array.length) + Arrays.stream(array).mapToInt(elementLengthProvider).sum(); } @Override public <T> void writeCollection(Collection<T> collection, ElementSerializer<T> elementSerializer) throws IOException { if (collection == null) { writeCompactInt(0); return; } writeCompactInt(collection.size()); for (T e : collection) { elementSerializer.accept(this, e); } } @Override public <T> void writeArray(T[] array, ElementSerializer<T> elementSerializer) throws IOException { if (array == null) { writeCompactInt(0); return; } writeCompactInt(array.length); for (T e : array) { elementSerializer.accept(this, e); } } @Override public void writeArray(byte[] array, int offset, int length) throws IOException { if (array == null) { // We ignore offset and length in this case, as per the method's contract. writeCompactInt(0); return; } else if (offset < 0 || offset > array.length || length < 0 || offset + length > array.length) { throw new ArrayIndexOutOfBoundsException("offset and length must refer to a range within the given array."); } writeCompactInt(length); write(array, offset, length); } @Override public void writeBuffer(BufferView buf) throws IOException { if (buf == null) { // Null will be deserialized as an empty array, so write 0 as length. writeCompactInt(0); return; } // Write Length. writeCompactInt(buf.getLength()); // Copy the buffer contents to this OutputStream. This will write all its bytes. this.structuredWriter.writeBuffer(buf); // Increase our size here regardless of what structuredWriter does. See IndirectWriter.writerBuffer for how // this is offset in certain cases. this.size += buf.getLength(); } @Override public int getMapLength(int elementCount, int keyLength, int valueLength) { return getCompactIntLength(elementCount) + elementCount * (keyLength + valueLength); } @Override public <K, V> int getMapLength(Map<K, V> map, ToIntFunction<K> keyLengthProvider, ToIntFunction<V> valueLengthProvider) { if (map == null) { return getCompactIntLength(0); } return getCompactIntLength(map.size()) + map.entrySet().stream() .mapToInt(e -> keyLengthProvider.applyAsInt(e.getKey()) + valueLengthProvider.applyAsInt(e.getValue())) .sum(); } @Override public <K, V> void writeMap(Map<K, V> map, ElementSerializer<K> keySerializer, ElementSerializer<V> valueSerializer) throws IOException { if (map == null) { writeCompactInt(0); return; } writeCompactInt(map.size()); for (Map.Entry<K, V> e : map.entrySet()) { keySerializer.accept(this, e.getKey()); valueSerializer.accept(this, e.getValue()); } } private <T> String badArgRange(String methodName, String type, String interval, T arg) { return String.format("%s can only serialize %s in the interval %s, given %s.", methodName, type, interval, arg); } /** * Transforms a number belonging to a range of [A, B] into an equivalent number in the range [-B-1, -A-1]. This * transformation is reversible (X=negate(negate(X)) and is useful for encoding negative compacted numbers and will * not work for {@link Long#MIN_VALUE}. * * @param value The value to encode. * @return The negated value. */ static long negateSignedNumber(long value) { return -value - 1; } //endregion //region IndirectWriter /** * Structured data writer for cases when the underlying {@link OutputStream} does not support {@link DirectDataOutput}. */ private class IndirectWriter implements DirectDataOutput { @Override public void writeShort(int shortValue) throws IOException { BitConverter.writeShort(out, (short) shortValue); } @Override public void writeInt(int intValue) throws IOException { BitConverter.writeInt(out, intValue); } @Override public void writeLong(long longValue) throws IOException { BitConverter.writeLong(out, longValue); } @Override public void writeBuffer(BufferView buffer) throws IOException { buffer.copyTo(RevisionDataOutputStream.this); // BufferView.copyTo will cause our "size" to increase, but an external DirectDataOutput will not do that. // To avoid wrapping the external DirectDataOutput with another wrapper, we increase the size in // RevisionDataOutputStream.writeBuffer() regardless of case, so we need to make an adjustment here to offset // for that. size -= buffer.getLength(); } } //endregion //region Implementations /** * RevisionDataOutput implementation that writes to a RandomAccessOutputStream OutputStream. This does not force the caller to * explicitly declare the length prior to serialization as it can be back-filled upon closing. */ private static class RandomRevisionDataOutput extends RevisionDataOutputStream { private final int initialPosition; /** * Creates a new instance of the RandomRevisionDataOutput class. Upon a successful call to this constructor, 4 bytes * will have been written to the OutputStream representing a placeholder for the length. These 4 bytes will be populated * upon closing this OutputStream. * * @param outputStream The OutputStream to wrap. * @throws IOException If an IO Exception occurred. */ RandomRevisionDataOutput(OutputStream outputStream) throws IOException { super(outputStream); // Pre-allocate 4 bytes so we can write the length later, but remember this position. RandomAccessOutputStream ros = (RandomAccessOutputStream) outputStream; this.initialPosition = ros.size(); ros.writeInt(0); } @Override public void close() throws IOException { // Calculate the number of bytes written, making sure to exclude the bytes for the length encoding. RandomAccessOutputStream ros = (RandomAccessOutputStream) this.out; int length = ros.size() - this.initialPosition - Integer.BYTES; // Write the length at the appropriate position. ros.writeInt(length, this.initialPosition); } @Override public OutputStream getBaseStream() { // We need to return an OutputStream that implements RandomAccessOutputStream, which is our underlying OutputStream (and not us). return this.out; } @Override public boolean requiresExplicitLength() { return false; } @Override public void length(int length) throws IOException { // Nothing to do. } } /** * RevisionDataOutput implementation that writes to a general OutputStream. This will force the caller to explicitly * calculate and declare the length prior to serialization as it cannot be back-filled upon closing. */ @NotThreadSafe private static class NonSeekableRevisionDataOutput extends RevisionDataOutputStream { private final OutputStream realStream; private int length; NonSeekableRevisionDataOutput(OutputStream outputStream) { super(new LengthRequiredOutputStream()); this.realStream = outputStream; this.length = 0; } @Override public void close() throws IOException { // We do not want to close the underlying Stream as it may be reused. if (this.length != getSize()) { // Check if we wrote the number of bytes we declared, otherwise we will have problems upon deserializing. throw new SerializationException(String.format("Unexpected number of bytes written. Declared: %d, written: %d.", this.length, getSize())); } else if (requiresExplicitLength()) { // We haven't written anything nor declared a length. Write the length prior to exiting. length(0); } } @Override public OutputStream getBaseStream() { return this; } @Override public boolean requiresExplicitLength() { // We only require the Length to be declared once; after it's been set there's no need to set it again. return this.out instanceof LengthRequiredOutputStream; } @Override public void length(int length) throws IOException { if (requiresExplicitLength()) { setOut(this.realStream, length); this.length = length; } } private static class LengthRequiredOutputStream extends OutputStream { @Override public void write(int i) { throw new IllegalStateException("Length must be declared prior to writing anything."); } @Override public void write(byte[] buffer, int index, int length) { throw new IllegalStateException("Length must be declared prior to writing anything."); } } } //endregion }
9,902
348
{"nom":"Villacerf","circ":"1ère circonscription","dpt":"Aube","inscrits":437,"abs":204,"votants":233,"blancs":14,"nuls":1,"exp":218,"res":[{"nuance":"REM","nom":"<NAME>","voix":88},{"nuance":"FN","nom":"<NAME>","voix":73},{"nuance":"LR","nom":"<NAME>","voix":57}]}
103