text
stringlengths
2
99.9k
meta
dict
/*========================================================================= * * Copyright NumFOCUS * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0.txt * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. * *=========================================================================*/ #ifndef itkQuadrilateralCell_h #define itkQuadrilateralCell_h #include "itkLineCell.h" #include "itkQuadrilateralCellTopology.h" namespace itk { /** \class QuadrilateralCell * \brief Represents a quadrilateral for a Mesh. * * \tparam TPixelType The type associated with a point, cell, or boundary * for use in storing its data. * * \tparam TCellTraits Type information of mesh containing cell. * * \ingroup MeshObjects * \ingroup ITKCommon */ template <typename TCellInterface> class ITK_TEMPLATE_EXPORT QuadrilateralCell : public TCellInterface , private QuadrilateralCellTopology { public: ITK_DISALLOW_COPY_AND_ASSIGN(QuadrilateralCell); /** Standard class type aliases. */ itkCellCommonTypedefs(QuadrilateralCell); itkCellInheritedTypedefs(TCellInterface); /** Standard part of every itk Object. */ itkTypeMacro(QuadrilateralCell, CellInterface); /** The type of boundary for this triangle's vertices. */ using VertexType = VertexCell<TCellInterface>; using VertexAutoPointer = typename VertexType::SelfAutoPointer; /** The type of boundary for this triangle's edges. */ using EdgeType = LineCell<TCellInterface>; using EdgeAutoPointer = typename EdgeType::SelfAutoPointer; /** Quadrilateral-specific topology numbers. */ static constexpr unsigned int NumberOfPoints = 4; static constexpr unsigned int NumberOfVertices = 4; static constexpr unsigned int NumberOfEdges = 4; static constexpr unsigned int CellDimension = 2; static constexpr unsigned int NumberOfDerivatives = 8; /** Implement the standard CellInterface. */ CellGeometryEnum GetType() const override { return CellGeometryEnum::QUADRILATERAL_CELL; } void MakeCopy(CellAutoPointer &) const override; unsigned int GetDimension() const override; unsigned int GetNumberOfPoints() const override; CellFeatureCount GetNumberOfBoundaryFeatures(int dimension) const override; bool GetBoundaryFeature(int dimension, CellFeatureIdentifier, CellAutoPointer &) override; void SetPointIds(PointIdConstIterator first) override; void SetPointIds(PointIdConstIterator first, PointIdConstIterator last) override; void SetPointId(int localId, PointIdentifier) override; PointIdIterator PointIdsBegin() override; PointIdConstIterator PointIdsBegin() const override; PointIdIterator PointIdsEnd() override; PointIdConstIterator PointIdsEnd() const override; /** Quadrilateral-specific interface. */ virtual CellFeatureCount GetNumberOfVertices() const; virtual CellFeatureCount GetNumberOfEdges() const; virtual bool GetVertex(CellFeatureIdentifier, VertexAutoPointer &); virtual bool GetEdge(CellFeatureIdentifier, EdgeAutoPointer &); /** Evaluate the position inside the cell */ bool EvaluatePosition(CoordRepType * position, PointsContainer * points, CoordRepType * closestPoint, CoordRepType[CellDimension], double * dist2, InterpolationWeightType * weight) override; /** Visitor interface */ itkCellVisitMacro(CellGeometryEnum::QUADRILATERAL_CELL); /** Constructor and destructor */ QuadrilateralCell() { for (PointIdentifier i = 0; i < Self::NumberOfPoints; i++) { m_PointIds[i] = NumericTraits<PointIdentifier>::max(); } } #if defined(__GNUC__) // A bug in some versions of the GCC and Clang compilers // result in an ICE or linker error when "= default" is requested. // This was observed in at least gcc 4.8 and 5.4.0, and // AppleClang 7.0.2 and 8.0.0. Probably others too. // "= default" doesn't gain us much, so just don't use it here. ~QuadrilateralCell() override{}; #else ~QuadrilateralCell() override = default; #endif protected: /** Store the number of points needed for a quadrilateral. */ PointIdentifier m_PointIds[NumberOfPoints]; void InterpolationDerivs(const CoordRepType pointCoords[CellDimension], CoordRepType derivs[NumberOfDerivatives]); void InterpolationFunctions(const CoordRepType pointCoords[CellDimension], InterpolationWeightType weights[NumberOfPoints]); void EvaluateLocation(int & itkNotUsed(subId), const PointsContainer * points, const CoordRepType pointCoords[PointDimension], CoordRepType x[PointDimension], InterpolationWeightType * weights); }; } // end namespace itk #ifndef ITK_MANUAL_INSTANTIATION # include "itkQuadrilateralCell.hxx" #endif #endif
{ "pile_set_name": "Github" }
// Code generated by smithy-go-codegen DO NOT EDIT. package rds import ( "context" awsmiddleware "github.com/aws/aws-sdk-go-v2/aws/middleware" "github.com/aws/aws-sdk-go-v2/aws/retry" "github.com/aws/aws-sdk-go-v2/aws/signer/v4" "github.com/aws/aws-sdk-go-v2/service/rds/types" smithy "github.com/awslabs/smithy-go" "github.com/awslabs/smithy-go/middleware" smithyhttp "github.com/awslabs/smithy-go/transport/http" ) // Creates a new DB instance that acts as a read replica for an existing source DB // instance. You can create a read replica for a DB instance running MySQL, // MariaDB, Oracle, PostgreSQL, or SQL Server. For more information, see Working // with Read Replicas // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_ReadRepl.html) in // the Amazon RDS User Guide. <p>Amazon Aurora doesn't support this action. Call // the <code>CreateDBInstance</code> action to create a DB instance for an Aurora // DB cluster.</p> <p>All read replica DB instances are created with backups // disabled. All other DB instance attributes (including DB security groups and DB // parameter groups) are inherited from the source DB instance, except as // specified.</p> <important> <p>Your source DB instance must have backup retention // enabled. </p> </important> func (c *Client) CreateDBInstanceReadReplica(ctx context.Context, params *CreateDBInstanceReadReplicaInput, optFns ...func(*Options)) (*CreateDBInstanceReadReplicaOutput, error) { stack := middleware.NewStack("CreateDBInstanceReadReplica", smithyhttp.NewStackRequest) options := c.options.Copy() for _, fn := range optFns { fn(&options) } addawsAwsquery_serdeOpCreateDBInstanceReadReplicaMiddlewares(stack) awsmiddleware.AddRequestInvocationIDMiddleware(stack) smithyhttp.AddContentLengthMiddleware(stack) AddResolveEndpointMiddleware(stack, options) v4.AddComputePayloadSHA256Middleware(stack) retry.AddRetryMiddlewares(stack, options) addHTTPSignerV4Middleware(stack, options) awsmiddleware.AddAttemptClockSkewMiddleware(stack) addClientUserAgent(stack) smithyhttp.AddErrorCloseResponseBodyMiddleware(stack) smithyhttp.AddCloseResponseBodyMiddleware(stack) addOpCreateDBInstanceReadReplicaValidationMiddleware(stack) stack.Initialize.Add(newServiceMetadataMiddleware_opCreateDBInstanceReadReplica(options.Region), middleware.Before) addRequestIDRetrieverMiddleware(stack) addResponseErrorMiddleware(stack) for _, fn := range options.APIOptions { if err := fn(stack); err != nil { return nil, err } } handler := middleware.DecorateHandler(smithyhttp.NewClientHandler(options.HTTPClient), stack) result, metadata, err := handler.Handle(ctx, params) if err != nil { return nil, &smithy.OperationError{ ServiceID: ServiceID, OperationName: "CreateDBInstanceReadReplica", Err: err, } } out := result.(*CreateDBInstanceReadReplicaOutput) out.ResultMetadata = metadata return out, nil } type CreateDBInstanceReadReplicaInput struct { // A value that indicates whether to copy all tags from the read replica to // snapshots of the read replica. By default, tags are not copied. CopyTagsToSnapshot *bool // The URL that contains a Signature Version 4 signed request for the // CreateDBInstanceReadReplica API action in the source AWS Region that contains // the source DB instance. <p>You must specify this parameter when you create an // encrypted read replica from another AWS Region by using the Amazon RDS API. // Don't specify <code>PreSignedUrl</code> when you are creating an encrypted read // replica in the same AWS Region.</p> <p>The presigned URL must be a valid request // for the <code>CreateDBInstanceReadReplica</code> API action that can be executed // in the source AWS Region that contains the encrypted source DB instance. The // presigned URL request must contain the following parameter values: </p> <ul> // <li> <p> <code>DestinationRegion</code> - The AWS Region that the encrypted read // replica is created in. This AWS Region is the same one where the // <code>CreateDBInstanceReadReplica</code> action is called that contains this // presigned URL.</p> <p>For example, if you create an encrypted DB instance in the // us-west-1 AWS Region, from a source DB instance in the us-east-2 AWS Region, // then you call the <code>CreateDBInstanceReadReplica</code> action in the // us-east-1 AWS Region and provide a presigned URL that contains a call to the // <code>CreateDBInstanceReadReplica</code> action in the us-west-2 AWS Region. For // this example, the <code>DestinationRegion</code> in the presigned URL must be // set to the us-east-1 AWS Region. </p> </li> <li> <p> <code>KmsKeyId</code> - The // AWS KMS key identifier for the key to use to encrypt the read replica in the // destination AWS Region. This is the same identifier for both the // <code>CreateDBInstanceReadReplica</code> action that is called in the // destination AWS Region, and the action contained in the presigned URL. </p> // </li> <li> <p> <code>SourceDBInstanceIdentifier</code> - The DB instance // identifier for the encrypted DB instance to be replicated. This identifier must // be in the Amazon Resource Name (ARN) format for the source AWS Region. For // example, if you are creating an encrypted read replica from a DB instance in the // us-west-2 AWS Region, then your <code>SourceDBInstanceIdentifier</code> looks // like the following example: // <code>arn:aws:rds:us-west-2:123456789012:instance:mysql-instance1-20161115</code>. // </p> </li> </ul> <p>To learn how to generate a Signature Version 4 signed // request, see <a // href="https://docs.aws.amazon.com/AmazonS3/latest/API/sigv4-query-string-auth.html">Authenticating // Requests: Using Query Parameters (AWS Signature Version 4)</a> and <a // href="https://docs.aws.amazon.com/general/latest/gr/signature-version-4.html">Signature // Version 4 Signing Process</a>. </p> <note> <p>If you are using an AWS SDK tool // or the AWS CLI, you can specify <code>SourceRegion</code> (or // <code>--source-region</code> for the AWS CLI) instead of specifying // <code>PreSignedUrl</code> manually. Specifying <code>SourceRegion</code> // autogenerates a presigned URL that is a valid request for the operation that can // be executed in the source AWS Region.</p> <p> <code>SourceRegion</code> isn't // supported for SQL Server, because SQL Server on Amazon RDS doesn't support // cross-region read replicas.</p> </note> PreSignedUrl *string // A list of EC2 VPC security groups to associate with the read replica. Default: // The default EC2 VPC security group for the DB subnet group's VPC. VpcSecurityGroupIds []*string // A value that indicates whether the DB instance class of the DB instance uses its // default processor features. UseDefaultProcessorFeatures *bool // The ARN for the IAM role that permits RDS to send enhanced monitoring metrics to // Amazon CloudWatch Logs. For example, arn:aws:iam:123456789012:role/emaccess. For // information on creating a monitoring role, go to To create an IAM role for // Amazon RDS Enhanced Monitoring // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Monitoring.html#USER_Monitoring.OS.IAMRole) // in the Amazon RDS User Guide. If MonitoringInterval is set to a value other than // 0, then you must supply a MonitoringRoleArn value. MonitoringRoleArn *string // A value that indicates whether to enable Performance Insights for the read // replica. For more information, see Using Amazon Performance Insights // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_PerfInsights.html) // in the Amazon RDS User Guide. EnablePerformanceInsights *bool // A value that indicates whether the read replica is in a Multi-AZ deployment. // <p>You can create a read replica as a Multi-AZ DB instance. RDS creates a // standby of your replica in another Availability Zone for failover support for // the replica. Creating your read replica as a Multi-AZ DB instance is independent // of whether the source database is a Multi-AZ DB instance. </p> MultiAZ *bool // Specifies a DB subnet group for the DB instance. The new DB instance is created // in the VPC associated with the DB subnet group. If no DB subnet group is // specified, then the new DB instance isn't created in a VPC. Constraints: // // * // Can only be specified if the source DB instance identifier specifies a DB // instance in another AWS Region. // // * If supplied, must match the name of an // existing DBSubnetGroup. // // * The specified DB subnet group must be in the same // AWS Region in which the operation is running. // // * All read replicas in one // AWS Region that are created from the same source DB instance must either:> // // // * Specify DB subnet groups from the same VPC. All these read replicas are // created in the same VPC. // // * Not specify a DB subnet group. All these // read replicas are created outside of any VPC. // // Example: mySubnetgroup DBSubnetGroupName *string // The compute and memory capacity of the read replica, for example, db.m4.large. // Not all DB instance classes are available in all AWS Regions, or for all // database engines. For the full list of DB instance classes, and availability for // your engine, see DB Instance Class // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/Concepts.DBInstanceClass.html) // in the Amazon RDS User Guide. Default: Inherits from the source DB instance. DBInstanceClass *string // A value that indicates whether minor engine upgrades are applied automatically // to the read replica during the maintenance window. Default: Inherits from the // source DB instance AutoMinorVersionUpgrade *bool // The name of the DB parameter group to associate with this DB instance. If you do // not specify a value for DBParameterGroupName, then Amazon RDS uses the // DBParameterGroup of source DB instance for a same region read replica, or the // default DBParameterGroup for the specified DB engine for a cross region read // replica. Currently, specifying a parameter group for this operation is only // supported for Oracle DB instances. Constraints: // // * Must be 1 to 255 letters, // numbers, or hyphens. // // * First character must be a letter // // * Can't end // with a hyphen or contain two consecutive hyphens DBParameterGroupName *string // The interval, in seconds, between points when Enhanced Monitoring metrics are // collected for the read replica. To disable collecting Enhanced Monitoring // metrics, specify 0. The default is 0. If MonitoringRoleArn is specified, then // you must also set MonitoringInterval to a value other than 0. Valid Values: 0, // 1, 5, 10, 15, 30, 60 MonitoringInterval *int32 // The amount of Provisioned IOPS (input/output operations per second) to be // initially allocated for the DB instance. Iops *int32 // A value that indicates whether the DB instance is publicly accessible. When the // DB instance is publicly accessible, its DNS endpoint resolves to the private IP // address from within the DB instance's VPC, and to the public IP address from // outside of the DB instance's VPC. Access to the DB instance is ultimately // controlled by the security group it uses, and that public access is not // permitted if the security group assigned to the DB instance doesn't permit it. // When the DB instance isn't publicly accessible, it is an internal DB instance // with a DNS name that resolves to a private IP address. For more information, see // CreateDBInstance (). PubliclyAccessible *bool // A list of tags. For more information, see Tagging Amazon RDS Resources // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.html) in // the Amazon RDS User Guide. Tags []*types.Tag // The option group the DB instance is associated with. If omitted, the option // group associated with the source instance is used. For SQL Server, you must use // the option group associated with the source instance. OptionGroupName *string // The Availability Zone (AZ) where the read replica will be created. Default: A // random, system-chosen Availability Zone in the endpoint's AWS Region. Example: // us-east-1d AvailabilityZone *string // A value that indicates whether the DB instance has deletion protection enabled. // The database can't be deleted when deletion protection is enabled. By default, // deletion protection is disabled. For more information, see Deleting a DB // Instance // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_DeleteInstance.html). DeletionProtection *bool // The AWS KMS key ID for an encrypted read replica. The KMS key ID is the Amazon // Resource Name (ARN), KMS key identifier, or the KMS key alias for the KMS // encryption key. <p>If you create an encrypted read replica in the same AWS // Region as the source DB instance, then you do not have to specify a value for // this parameter. The read replica is encrypted with the same KMS key as the // source DB instance.</p> <p>If you create an encrypted read replica in a // different AWS Region, then you must specify a KMS key for the destination AWS // Region. KMS encryption keys are specific to the AWS Region that they are created // in, and you can't use encryption keys from one AWS Region in another AWS // Region.</p> <p>You can't create an encrypted read replica from an unencrypted DB // instance.</p> KmsKeyId *string // A value that indicates whether to enable mapping of AWS Identity and Access // Management (IAM) accounts to database accounts. By default, mapping is disabled. // For information about the supported DB engines, see CreateDBInstance (). <p>For // more information about IAM database authentication, see <a // href="https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/UsingWithRDS.IAMDBAuth.html"> // IAM Database Authentication for MySQL and PostgreSQL</a> in the <i>Amazon RDS // User Guide.</i> </p> EnableIAMDatabaseAuthentication *bool // The AWS KMS key identifier for encryption of Performance Insights data. The KMS // key ID is the Amazon Resource Name (ARN), KMS key identifier, or the KMS key // alias for the KMS encryption key. If you do not specify a value for // PerformanceInsightsKMSKeyId, then Amazon RDS uses your default encryption key. // AWS KMS creates the default encryption key for your AWS account. Your AWS // account has a different default encryption key for each AWS Region. PerformanceInsightsKMSKeyId *string // Specify the name of the IAM role to be used when making API calls to the // Directory Service. DomainIAMRoleName *string // The Active Directory directory ID to create the DB instance in. For Oracle DB // instances, Amazon RDS can use Kerberos authentication to authenticate users that // connect to the DB instance. For more information, see Using Kerberos // Authentication with Amazon RDS for Oracle // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-kerberos.html) in // the Amazon RDS User Guide. For Microsoft SQL Server DB instances, Amazon RDS can // use Windows Authentication to authenticate users that connect to the DB // instance. For more information, see Using Windows Authentication with an Amazon // RDS DB Instance Running Microsoft SQL Server // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_SQLServerWinAuth.html) // in the Amazon RDS User Guide. Domain *string // The list of logs that the new DB instance is to export to CloudWatch Logs. The // values in the list depend on the DB engine being used. For more information, see // Publishing Database Logs to Amazon CloudWatch Logs // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_LogAccess.html#USER_LogAccess.Procedural.UploadtoCloudWatch) // in the Amazon RDS User Guide. EnableCloudwatchLogsExports []*string // The amount of time, in days, to retain Performance Insights data. Valid values // are 7 or 731 (2 years). PerformanceInsightsRetentionPeriod *int32 // The number of CPU cores and the number of threads per core for the DB instance // class of the DB instance. ProcessorFeatures []*types.ProcessorFeature // The port number that the DB instance uses for connections. Default: Inherits // from the source DB instance Valid Values: 1150-65535 Port *int32 // Specifies the storage type to be associated with the read replica. Valid values: // standard | gp2 | io1 If you specify io1, you must also include a value for the // Iops parameter. Default: io1 if the Iops parameter is specified, otherwise gp2 StorageType *string // The DB instance identifier of the read replica. This identifier is the unique // key that identifies a DB instance. This parameter is stored as a lowercase // string. DBInstanceIdentifier *string // The identifier of the DB instance that will act as the source for the read // replica. Each DB instance can have up to five read replicas. Constraints: // // * // Must be the identifier of an existing MySQL, MariaDB, Oracle, PostgreSQL, or SQL // Server DB instance. // // * Can specify a DB instance that is a MySQL read // replica only if the source is running MySQL 5.6 or later. // // * For the // limitations of Oracle read replicas, see Read Replica Limitations with Oracle // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/oracle-read-replicas.html) // in the Amazon RDS User Guide. // // * For the limitations of SQL Server read // replicas, see Read Replica Limitations with Microsoft SQL Server // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/SQLServer.ReadReplicas.Limitations.html) // in the Amazon RDS User Guide. // // * Can specify a PostgreSQL DB instance only // if the source is running PostgreSQL 9.3.5 or later (9.4.7 and higher for // cross-region replication). // // * The specified DB instance must have automatic // backups enabled, that is, its backup retention period must be greater than 0. // // // * If the source DB instance is in the same AWS Region as the read replica, // specify a valid DB instance identifier. // // * If the source DB instance is in a // different AWS Region from the read replica, specify a valid DB instance ARN. For // more information, see Constructing an ARN for Amazon RDS // (https://docs.aws.amazon.com/AmazonRDS/latest/UserGuide/USER_Tagging.ARN.html#USER_Tagging.ARN.Constructing) // in the Amazon RDS User Guide. This doesn't apply to SQL Server, which doesn't // support cross-region replicas. SourceDBInstanceIdentifier *string } type CreateDBInstanceReadReplicaOutput struct { // Contains the details of an Amazon RDS DB instance. This data type is used as a // response element in the DescribeDBInstances action. DBInstance *types.DBInstance // Metadata pertaining to the operation's result. ResultMetadata middleware.Metadata } func addawsAwsquery_serdeOpCreateDBInstanceReadReplicaMiddlewares(stack *middleware.Stack) { stack.Serialize.Add(&awsAwsquery_serializeOpCreateDBInstanceReadReplica{}, middleware.After) stack.Deserialize.Add(&awsAwsquery_deserializeOpCreateDBInstanceReadReplica{}, middleware.After) } func newServiceMetadataMiddleware_opCreateDBInstanceReadReplica(region string) awsmiddleware.RegisterServiceMetadata { return awsmiddleware.RegisterServiceMetadata{ Region: region, ServiceID: ServiceID, SigningName: "rds", OperationName: "CreateDBInstanceReadReplica", } }
{ "pile_set_name": "Github" }
<template> <el-date-picker :class="desc.class" :style="desc.style" class="ele-form-full-line" v-bind="attrs" v-model="newValue" v-on="onEvents" /> </template> <script> import formMixin from '../mixins/formMixin' import timeMixin from '../mixins/timeMixin' export default { name: 'EleFormYear', mixins: [formMixin, timeMixin], data () { return { mockRule: '@date', type: ['Date', 'String', 'Number'] } }, computed: { defaultAttrs () { return { type: 'year', placeholder: this.t('ele-form.year') } } } } </script>
{ "pile_set_name": "Github" }
from typing import Any, Callable, List, Optional, Set, Tuple, Union from django.db import DefaultConnectionProxy from django.db.backends.base.base import BaseDatabaseWrapper from django.db.migrations.migration import Migration from .loader import MigrationLoader from .recorder import MigrationRecorder from .state import ProjectState class MigrationExecutor: connection: Any = ... loader: MigrationLoader = ... recorder: MigrationRecorder = ... progress_callback: Callable = ... def __init__( self, connection: Optional[Union[DefaultConnectionProxy, BaseDatabaseWrapper]], progress_callback: Optional[Callable] = ..., ) -> None: ... def migration_plan( self, targets: Union[List[Tuple[str, Optional[str]]], Set[Tuple[str, str]]], clean_start: bool = ... ) -> List[Tuple[Migration, bool]]: ... def migrate( self, targets: Optional[List[Tuple[str, Optional[str]]]], plan: Optional[List[Tuple[Migration, bool]]] = ..., state: Optional[ProjectState] = ..., fake: bool = ..., fake_initial: bool = ..., ) -> ProjectState: ... def collect_sql(self, plan: List[Tuple[Migration, bool]]) -> List[str]: ... def apply_migration( self, state: ProjectState, migration: Migration, fake: bool = ..., fake_initial: bool = ... ) -> ProjectState: ... def unapply_migration(self, state: ProjectState, migration: Migration, fake: bool = ...) -> ProjectState: ... def check_replacements(self) -> None: ... def detect_soft_applied( self, project_state: Optional[ProjectState], migration: Migration ) -> Tuple[bool, ProjectState]: ...
{ "pile_set_name": "Github" }
--- title: "Step 1: Install PhoneGap" url: getting-started/1-install-phonegap/cli layout: subpage tabs: - label: Desktop App url: getting-started/1-install-phonegap/desktop - label: CLI url: getting-started/1-install-phonegap/cli next: 1-getting-started/2-install-mobile-app.html.md --- The PhoneGap CLI provides a command line interface for creating PhoneGap apps as an alternative to using the [PhoneGap Desktop App](/getting-started/1-install-phonegap/desktop) for those who prefer working at the command line. The PhoneGap CLI currently has some additional features over the PhoneGap Desktop for building, running and packaging your PhoneGap applications on multiple platforms. If you're comfortable using a CLI this option may be best going forward. <div class="alert--info">**NOTE:** The getting started steps vary based on the initial PhoneGap tool you install here in step 1. For the rest of the steps be sure to choose either the **Desktop** or **CLI** tab at the top accordingly.</div> ## Requirements There are a few simple requirements you'll need prior to installing the PhoneGap CLI: - [node.js](https://nodejs.org/) - a JavaScript runtime to build your JavaScript code - [git](http://git-scm.com) - used in the background by the CLI to download assets. It comes pre-installed on some operating systems. To see if you already have it installed, type `git` from the command line. ## Install Steps 1. Install the [PhoneGap CLI](https://www.npmjs.com/package/phonegap) via `npm` with the following command from the Terminal app (Mac) or Command Prompt (Win). ```sh $ npm install -g phonegap@latest ``` <div class="alert--tip">**TIPS:** 1) The `$` symbol is used throughout this guide to indicate the command prompt, it should not be typed. 2) `npm` is the node package manager and installed with node.js. The `npm` command fetches the necessary dependencies for the PhoneGap CLI to run on your local machine. It creates a *node_modules* folder with the necessary code needed to run the CLI. The `-g` flag specifies that folder to be installed at the global location so it can be accessed from anywhere on your machine (defaults to */usr/local/lib/node_modules/phonegap* on Mac).</div> <div class="alert--warning">**OS X Users:** You may need to prefix this command with `sudo` to allow installation to restricted directories and type the following instead: `$ sudo npm install -g phonegap@latest`<br><br>**Windows 8 Users:** If you just installed Node.js, be sure to start the *Node.js Command Prompt* application specifically.</div> 1. Test to ensure the PhoneGap CLI is properly installed by typing `phonegap` on the command line. You should see the following `help` text output displayed: ```sh $ phonegap Usage: phonegap [options] [commands] Description: PhoneGap command-line tool. Commands: help [command] output usage information create <path> create a phonegap project ... ``` <div class="alert--tip">**TIP:** You can access the PhoneGap CLI usage text at any time by adding the keyword `help`, or the `-h` or `--h` attribute with any phonegap command i.e.: `$ phonegap create help`, `$ phonegap serve -h`.</div>
{ "pile_set_name": "Github" }
<!doctype html> <html lang="en"> <head> <title>Code coverage report for tcb-admin-node/lib/wx</title> <meta charset="utf-8" /> <link rel="stylesheet" href="../../../prettify.css" /> <link rel="stylesheet" href="../../../base.css" /> <meta name="viewport" content="width=device-width, initial-scale=1"> <style type='text/css'> .coverage-summary .sorter { background-image: url(../../../sort-arrow-sprite.png); } </style> </head> <body> <div class='wrapper'> <div class='pad1'> <h1> <a href="../../../index.html">All files</a> tcb-admin-node/lib/wx </h1> <div class='clearfix'> <div class='fl pad1y space-right2'> <span class="strong">64.29% </span> <span class="quiet">Statements</span> <span class='fraction'>9/14</span> </div> <div class='fl pad1y space-right2'> <span class="strong">57.14% </span> <span class="quiet">Branches</span> <span class='fraction'>4/7</span> </div> <div class='fl pad1y space-right2'> <span class="strong">100% </span> <span class="quiet">Functions</span> <span class='fraction'>2/2</span> </div> <div class='fl pad1y space-right2'> <span class="strong">64.29% </span> <span class="quiet">Lines</span> <span class='fraction'>9/14</span> </div> </div> </div> <div class='status-line medium'></div> <div class="pad1"> <table class="coverage-summary"> <thead> <tr> <th data-col="file" data-fmt="html" data-html="true" class="file">File</th> <th data-col="pic" data-type="number" data-fmt="html" data-html="true" class="pic"></th> <th data-col="statements" data-type="number" data-fmt="pct" class="pct">Statements</th> <th data-col="statements_raw" data-type="number" data-fmt="html" class="abs"></th> <th data-col="branches" data-type="number" data-fmt="pct" class="pct">Branches</th> <th data-col="branches_raw" data-type="number" data-fmt="html" class="abs"></th> <th data-col="functions" data-type="number" data-fmt="pct" class="pct">Functions</th> <th data-col="functions_raw" data-type="number" data-fmt="html" class="abs"></th> <th data-col="lines" data-type="number" data-fmt="pct" class="pct">Lines</th> <th data-col="lines_raw" data-type="number" data-fmt="html" class="abs"></th> </tr> </thead> <tbody><tr> <td class="file medium" data-value="index.js"><a href="index.js.html">index.js</a></td> <td data-value="64.29" class="pic medium"><div class="chart"><div class="cover-fill" style="width: 64%;"></div><div class="cover-empty" style="width:36%;"></div></div></td> <td data-value="64.29" class="pct medium">64.29%</td> <td data-value="14" class="abs medium">9/14</td> <td data-value="57.14" class="pct medium">57.14%</td> <td data-value="7" class="abs medium">4/7</td> <td data-value="100" class="pct high">100%</td> <td data-value="2" class="abs high">2/2</td> <td data-value="64.29" class="pct medium">64.29%</td> <td data-value="14" class="abs medium">9/14</td> </tr> </tbody> </table> </div><div class='push'></div><!-- for sticky footer --> </div><!-- /wrapper --> <div class='footer quiet pad2 space-top1 center small'> Code coverage generated by <a href="https://istanbul.js.org/" target="_blank">istanbul</a> at Tue Oct 30 2018 17:03:51 GMT+0800 (GMT+08:00) </div> </div> <script src="../../../prettify.js"></script> <script> window.onload = function () { if (typeof prettyPrint === 'function') { prettyPrint(); } }; </script> <script src="../../../sorter.js"></script> </body> </html>
{ "pile_set_name": "Github" }
# Requires IntelMPI to compile your own HDF5 # module unload bullxmpi # module load mpi/intelmpi/5.1.3.181 # export HDF5_ROOT=${HOME}/Smilei_impi_env/hdf5-1.8.9-intelmpi # export PATH=${HDF5_ROOT}/bin:${PATH} # export LD_LIBRARY_PATH=${HDF5_ROOT}/lib:${LD_LIBRARY_PATH} # module switch intel intel/17.0.2.174 # module load gnu # module load python # export PYTHONHOME=/ccc/products/ccc_python/2.7.8_201409 CXXFLAGS += -xAVX LDFLAGS += -L/ccc/products/gcc-4.8.1/default/lib64 -lstdc++ -lgcc_s
{ "pile_set_name": "Github" }
'use strict'; angular.module("ngLocale", [], ["$provide", function($provide) { var PLURAL_CATEGORY = {ZERO: "zero", ONE: "one", TWO: "two", FEW: "few", MANY: "many", OTHER: "other"}; $provide.value("$locale", { "DATETIME_FORMATS": { "AMPMS": [ "\u0635", "\u0645" ], "DAY": [ "\u0627\u0644\u0623\u062d\u062f", "\u0627\u0644\u0627\u062b\u0646\u064a\u0646", "\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621", "\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621", "\u0627\u0644\u062e\u0645\u064a\u0633", "\u0627\u0644\u062c\u0645\u0639\u0629", "\u0627\u0644\u0633\u0628\u062a" ], "MONTH": [ "\u064a\u0646\u0627\u064a\u0631", "\u0641\u0628\u0631\u0627\u064a\u0631", "\u0645\u0627\u0631\u0633", "\u0623\u0628\u0631\u064a\u0644", "\u0645\u0627\u064a\u0648", "\u064a\u0648\u0646\u064a\u0648", "\u064a\u0648\u0644\u064a\u0648", "\u0623\u063a\u0633\u0637\u0633", "\u0633\u0628\u062a\u0645\u0628\u0631", "\u0623\u0643\u062a\u0648\u0628\u0631", "\u0646\u0648\u0641\u0645\u0628\u0631", "\u062f\u064a\u0633\u0645\u0628\u0631" ], "SHORTDAY": [ "\u0627\u0644\u0623\u062d\u062f", "\u0627\u0644\u0627\u062b\u0646\u064a\u0646", "\u0627\u0644\u062b\u0644\u0627\u062b\u0627\u0621", "\u0627\u0644\u0623\u0631\u0628\u0639\u0627\u0621", "\u0627\u0644\u062e\u0645\u064a\u0633", "\u0627\u0644\u062c\u0645\u0639\u0629", "\u0627\u0644\u0633\u0628\u062a" ], "SHORTMONTH": [ "\u064a\u0646\u0627\u064a\u0631", "\u0641\u0628\u0631\u0627\u064a\u0631", "\u0645\u0627\u0631\u0633", "\u0623\u0628\u0631\u064a\u0644", "\u0645\u0627\u064a\u0648", "\u064a\u0648\u0646\u064a\u0648", "\u064a\u0648\u0644\u064a\u0648", "\u0623\u063a\u0633\u0637\u0633", "\u0633\u0628\u062a\u0645\u0628\u0631", "\u0623\u0643\u062a\u0648\u0628\u0631", "\u0646\u0648\u0641\u0645\u0628\u0631", "\u062f\u064a\u0633\u0645\u0628\u0631" ], "fullDate": "EEEE\u060c d MMMM\u060c y", "longDate": "d MMMM\u060c y", "medium": "dd\u200f/MM\u200f/yyyy h:mm:ss a", "mediumDate": "dd\u200f/MM\u200f/yyyy", "mediumTime": "h:mm:ss a", "short": "d\u200f/M\u200f/yyyy h:mm a", "shortDate": "d\u200f/M\u200f/yyyy", "shortTime": "h:mm a" }, "NUMBER_FORMATS": { "CURRENCY_SYM": "\u00a3", "DECIMAL_SEP": "\u066b", "GROUP_SEP": "\u066c", "PATTERNS": [ { "gSize": 0, "lgSize": 0, "macFrac": 0, "maxFrac": 3, "minFrac": 0, "minInt": 1, "negPre": "", "negSuf": "-", "posPre": "", "posSuf": "" }, { "gSize": 0, "lgSize": 0, "macFrac": 0, "maxFrac": 2, "minFrac": 2, "minInt": 1, "negPre": "\u00a4\u00a0", "negSuf": "-", "posPre": "\u00a4\u00a0", "posSuf": "" } ] }, "id": "ar-sd", "pluralCat": function (n) { if (n == 0) { return PLURAL_CATEGORY.ZERO; } if (n == 1) { return PLURAL_CATEGORY.ONE; } if (n == 2) { return PLURAL_CATEGORY.TWO; } if (n == (n | 0) && n % 100 >= 3 && n % 100 <= 10) { return PLURAL_CATEGORY.FEW; } if (n == (n | 0) && n % 100 >= 11 && n % 100 <= 99) { return PLURAL_CATEGORY.MANY; } return PLURAL_CATEGORY.OTHER;} }); }]);
{ "pile_set_name": "Github" }
h2. Migrations Migrations are a convenient way for you to alter your database in a structured and organised manner. You could edit fragments of SQL by hand but you would then be responsible for telling other developers that they need to go and run it. You'd also have to keep track of which changes need to be run against the production machines next time you deploy. Active Record tracks which migrations have already been run so all you have to do is update your source and run +rake db:migrate+. Active Record will work out which migrations should be run. It will also update your +db/schema.rb+ file to match the structure of your database. Migrations also allow you to describe these transformations using Ruby. The great thing about this is that (like most of Active Record's functionality) it is database independent: you don't need to worry about the precise syntax of +CREATE TABLE+ any more that you worry about variations on +SELECT *+ (you can drop down to raw SQL for database specific features). For example you could use SQLite3 in development, but MySQL in production. You'll learn all about migrations including: * The generators you can use to create them * The methods Active Record provides to manipulate your database * The Rake tasks that manipulate them * How they relate to +schema.rb+ endprologue. h3. Anatomy of a Migration Before I dive into the details of a migration, here are a few examples of the sorts of things you can do: <ruby> class CreateProducts < ActiveRecord::Migration def self.up create_table :products do |t| t.string :name t.text :description t.timestamps end end def self.down drop_table :products end end </ruby> This migration adds a table called +products+ with a string column called +name+ and a text column called +description+. A primary key column called +id+ will also be added, however since this is the default we do not need to ask for this. The timestamp columns +created_at+ and +updated_at+ which Active Record populates automatically will also be added. Reversing this migration is as simple as dropping the table. Migrations are not limited to changing the schema. You can also use them to fix bad data in the database or populate new fields: <ruby> class AddReceiveNewsletterToUsers < ActiveRecord::Migration def self.up change_table :users do |t| t.boolean :receive_newsletter, :default => false end User.update_all ["receive_newsletter = ?", true] end def self.down remove_column :users, :receive_newsletter end end </ruby> This migration adds a +receive_newsletter+ column to the +users+ table. We want it to default to +false+ for new users, but existing users are considered to have already opted in, so we use the User model to set the flag to +true+ for existing users. NOTE: Some "caveats":#using-models-in-your-migrations apply to using models in your migrations. h4. Migrations are Classes A migration is a subclass of <tt>ActiveRecord::Migration</tt> that implements two class methods: +up+ (perform the required transformations) and +down+ (revert them). Active Record provides methods that perform common data definition tasks in a database independent way (you'll read about them in detail later): * +create_table+ * +change_table+ * +drop_table+ * +add_column+ * +change_column+ * +rename_column+ * +remove_column+ * +add_index+ * +remove_index+ If you need to perform tasks specific to your database (for example create a "foreign key":#active-record-and-referential-integrity constraint) then the +execute+ function allows you to execute arbitrary SQL. A migration is just a regular Ruby class so you're not limited to these functions. For example after adding a column you could write code to set the value of that column for existing records (if necessary using your models). On databases that support transactions with statements that change the schema (such as PostgreSQL), migrations are wrapped in a transaction. If the database does not support this (for example MySQL and SQLite) then when a migration fails the parts of it that succeeded will not be rolled back. You will have to unpick the changes that were made by hand. h4. What's in a Name Migrations are stored in files in +db/migrate+, one for each migration class. The name of the file is of the form +YYYYMMDDHHMMSS_create_products.rb+, that is to say a UTC timestamp identifying the migration followed by an underscore followed by the name of the migration. The migration class' name must match (the camelcased version of) the latter part of the file name. For example +20080906120000_create_products.rb+ should define +CreateProducts+ and +20080906120001_add_details_to_products.rb+ should define +AddDetailsToProducts+. If you do feel the need to change the file name then you <em>have to</em> update the name of the class inside or Rails will complain about a missing class. Internally Rails only uses the migration's number (the timestamp) to identify them. Prior to Rails 2.1 the migration number started at 1 and was incremented each time a migration was generated. With multiple developers it was easy for these to clash requiring you to rollback migrations and renumber them. With Rails 2.1 this is largely avoided by using the creation time of the migration to identify them. You can revert to the old numbering scheme by setting +config.active_record.timestamped_migrations+ to +false+ in +config/environment.rb+. The combination of timestamps and recording which migrations have been run allows Rails to handle common situations that occur with multiple developers. For example Alice adds migrations +20080906120000+ and +20080906123000+ and Bob adds +20080906124500+ and runs it. Alice finishes her changes and checks in her migrations and Bob pulls down the latest changes. Rails knows that it has not run Alice's two migrations so +rake db:migrate+ would run them (even though Bob's migration with a later timestamp has been run), and similarly migrating down would not run their +down+ methods. Of course this is no substitution for communication within the team. For example, if Alice's migration removed a table that Bob's migration assumed to exist, then trouble would certainly strike. h4. Changing Migrations Occasionally you will make a mistake when writing a migration. If you have already run the migration then you cannot just edit the migration and run the migration again: Rails thinks it has already run the migration and so will do nothing when you run +rake db:migrate+. You must rollback the migration (for example with +rake db:rollback+), edit your migration and then run +rake db:migrate+ to run the corrected version. In general editing existing migrations is not a good idea: you will be creating extra work for yourself and your co-workers and cause major headaches if the existing version of the migration has already been run on production machines. Instead you should write a new migration that performs the changes you require. Editing a freshly generated migration that has not yet been committed to source control (or more generally which has not been propagated beyond your development machine) is relatively harmless. Just use some common sense. h3. Creating a Migration h4. Creating a Model The model and scaffold generators will create migrations appropriate for adding a new model. This migration will already contain instructions for creating the relevant table. If you tell Rails what columns you want then statements for adding those will also be created. For example, running <shell> ruby script/generate model Product name:string description:text </shell> will create a migration that looks like this <ruby> class CreateProducts < ActiveRecord::Migration def self.up create_table :products do |t| t.string :name t.text :description t.timestamps end end def self.down drop_table :products end end </ruby> You can append as many column name/type pairs as you want. By default +t.timestamps+ (which creates the +updated_at+ and +created_at+ columns that are automatically populated by Active Record) will be added for you. h4. Creating a Standalone Migration If you are creating migrations for other purposes (for example to add a column to an existing table) then you can use the migration generator: <shell> ruby script/generate migration AddPartNumberToProducts </shell> This will create an empty but appropriately named migration: <ruby> class AddPartNumberToProducts < ActiveRecord::Migration def self.up end def self.down end end </ruby> If the migration name is of the form "AddXXXToYYY" or "RemoveXXXFromYYY" and is followed by a list of column names and types then a migration containing the appropriate +add_column+ and +remove_column+ statements will be created. <shell> ruby script/generate migration AddPartNumberToProducts part_number:string </shell> will generate <ruby> class AddPartNumberToProducts < ActiveRecord::Migration def self.up add_column :products, :part_number, :string end def self.down remove_column :products, :part_number end end </ruby> Similarly, <shell> ruby script/generate migration RemovePartNumberFromProducts part_number:string </shell> generates <ruby> class RemovePartNumberFromProducts < ActiveRecord::Migration def self.up remove_column :products, :part_number end def self.down add_column :products, :part_number, :string end end </ruby> You are not limited to one magically generated column, for example <shell> ruby script/generate migration AddDetailsToProducts part_number:string price:decimal </shell> generates <ruby> class AddDetailsToProducts < ActiveRecord::Migration def self.up add_column :products, :part_number, :string add_column :products, :price, :decimal end def self.down remove_column :products, :price remove_column :products, :part_number end end </ruby> As always, what has been generated for you is just a starting point. You can add or remove from it as you see fit. h3. Writing a Migration Once you have created your migration using one of the generators it's time to get to work! h4. Creating a Table Migration method +create_table+ will be one of your workhorses. A typical use would be <ruby> create_table :products do |t| t.string :name end </ruby> which creates a +products+ table with a column called +name+ (and as discussed below, an implicit +id+ column). The object yielded to the block allows you create columns on the table. There are two ways of doing this: The first (traditional) form looks like <ruby> create_table :products do |t| t.column :name, :string, :null => false end </ruby> the second form, the so called "sexy" migration, drops the somewhat redundant +column+ method. Instead, the +string+, +integer+, etc. methods create a column of that type. Subsequent parameters are the same. <ruby> create_table :products do |t| t.string :name, :null => false end </ruby> By default +create_table+ will create a primary key called +id+. You can change the name of the primary key with the +:primary_key+ option (don't forget to update the corresponding model) or if you don't want a primary key at all (for example for a HABTM join table) you can pass +:id => false+. If you need to pass database specific options you can place an SQL fragment in the +:options+ option. For example <ruby> create_table :products, :options => "ENGINE=BLACKHOLE" do |t| t.string :name, :null => false end </ruby> will append +ENGINE=BLACKHOLE+ to the SQL statement used to create the table (when using MySQL the default is +ENGINE=InnoDB+). The types supported by Active Record are +:primary_key+, +:string+, +:text+, +:integer+, +:float+, +:decimal+, +:datetime+, +:timestamp+, +:time+, +:date+, +:binary+, +:boolean+. These will be mapped onto an appropriate underlying database type, for example with MySQL +:string+ is mapped to +VARCHAR(255)+. You can create columns of types not supported by Active Record when using the non-sexy syntax, for example <ruby> create_table :products do |t| t.column :name, 'polygon', :null => false end </ruby> This may however hinder portability to other databases. h4. Changing Tables A close cousin of +create_table+ is +change_table+, used for changing existing tables. It is used in a similar fashion to +create_table+ but the object yielded to the block knows more tricks. For example <ruby> change_table :products do |t| t.remove :description, :name t.string :part_number t.index :part_number t.rename :upccode, :upc_code end </ruby> removes the +description+ and +name+ columns, creates a +part_number+ column and adds an index on it. Finally it renames the +upccode+ column. This is the same as doing <ruby> remove_column :products, :description remove_column :products, :name add_column :products, :part_number, :string add_index :products, :part_number rename_column :products, :upccode, :upc_code </ruby> You don't have to keep repeating the table name and it groups all the statements related to modifying one particular table. The individual transformation names are also shorter, for example +remove_column+ becomes just +remove+ and +add_index+ becomes just +index+. h4. Special Helpers Active Record provides some shortcuts for common functionality. It is for example very common to add both the +created_at+ and +updated_at+ columns and so there is a method that does exactly that: <ruby> create_table :products do |t| t.timestamps end </ruby> will create a new products table with those two columns (plus the +id+ column) whereas <ruby> change_table :products do |t| t.timestamps end </ruby> adds those columns to an existing table. The other helper is called +references+ (also available as +belongs_to+). In its simplest form it just adds some readability <ruby> create_table :products do |t| t.references :category end </ruby> will create a +category_id+ column of the appropriate type. Note that you pass the model name, not the column name. Active Record adds the +_id+ for you. If you have polymorphic +belongs_to+ associations then +references+ will add both of the columns required: <ruby> create_table :products do |t| t.references :attachment, :polymorphic => {:default => 'Photo'} end </ruby> will add an +attachment_id+ column and a string +attachment_type+ column with a default value of 'Photo'. NOTE: The +references+ helper does not actually create foreign key constraints for you. You will need to use +execute+ for that or a plugin that adds "foreign key support":#active-record-and-referential-integrity. If the helpers provided by Active Record aren't enough you can use the +execute+ function to execute arbitrary SQL. For more details and examples of individual methods check the API documentation, in particular the documentation for "<tt>ActiveRecord::ConnectionAdapters::SchemaStatements</tt>":http://api.rubyonrails.org/classes/ActiveRecord/ConnectionAdapters/SchemaStatements.html (which provides the methods available in the +up+ and +down+ methods), "<tt>ActiveRecord::ConnectionAdapters::TableDefinition</tt>":http://api.rubyonrails.org/classes/ActiveRecord/ConnectionAdapters/TableDefinition.html (which provides the methods available on the object yielded by +create_table+) and "<tt>ActiveRecord::ConnectionAdapters::Table</tt>":http://api.rubyonrails.org/classes/ActiveRecord/ConnectionAdapters/Table.html (which provides the methods available on the object yielded by +change_table+). h4. Writing Your +down+ Method The +down+ method of your migration should revert the transformations done by the +up+ method. In other words the database schema should be unchanged if you do an +up+ followed by a +down+. For example if you create a table in the +up+ method you should drop it in the +down+ method. It is wise to do things in precisely the reverse order to in the +up+ method. For example <ruby> class ExampleMigration < ActiveRecord::Migration def self.up create_table :products do |t| t.references :category end #add a foreign key execute <<-SQL ALTER TABLE products ADD CONSTRAINT fk_products_categories FOREIGN KEY (category_id) REFERENCES categories(id) SQL add_column :users, :home_page_url, :string rename_column :users, :email, :email_address end def self.down rename_column :users, :email_address, :email remove_column :users, :home_page_url execute "ALTER TABLE products DROP FOREIGN KEY fk_products_categories" drop_table :products end end </ruby> Sometimes your migration will do something which is just plain irreversible, for example it might destroy some data. In cases like those when you can't reverse the migration you can raise +IrreversibleMigration+ from your +down+ method. If someone tries to revert your migration an error message will be displayed saying that it can't be done. h3. Running Migrations Rails provides a set of rake tasks to work with migrations which boils down to running certain sets of migrations. The very first migration related rake task you use will probably be +db:migrate+. In its most basic form it just runs the +up+ method for all the migrations that have not yet been run. If there are no such migrations it exits. Note that running the +db:migrate+ also invokes the +db:schema:dump+ task, which will update your db/schema.rb file to match the structure of your database. If you specify a target version, Active Record will run the required migrations (up or down) until it has reached the specified version. The version is the numerical prefix on the migration's filename. For example to migrate to version 20080906120000 run <shell> rake db:migrate VERSION=20080906120000 </shell> If this is greater than the current version (i.e. it is migrating upwards) this will run the +up+ method on all migrations up to and including 20080906120000, if migrating downwards this will run the +down+ method on all the migrations down to, but not including, 20080906120000. h4. Rolling Back A common task is to rollback the last migration, for example if you made a mistake in it and wish to correct it. Rather than tracking down the version number associated with the previous migration you can run <shell> rake db:rollback </shell> This will run the +down+ method from the latest migration. If you need to undo several migrations you can provide a +STEP+ parameter: <shell> rake db:rollback STEP=3 </shell> will run the +down+ method from the last 3 migrations. The +db:migrate:redo+ task is a shortcut for doing a rollback and then migrating back up again. As with the +db:rollback+ task you can use the +STEP+ parameter if you need to go more than one version back, for example <shell> rake db:migrate:redo STEP=3 </shell> Neither of these Rake tasks do anything you could not do with +db:migrate+, they are simply more convenient since you do not need to explicitly specify the version to migrate to. Lastly, the +db:reset+ task will drop the database, recreate it and load the current schema into it. NOTE: This is not the same as running all the migrations - see the section on "schema.rb":#schema-dumping-and-you. h4. Being Specific If you need to run a specific migration up or down the +db:migrate:up+ and +db:migrate:down+ tasks will do that. Just specify the appropriate version and the corresponding migration will have its +up+ or +down+ method invoked, for example <shell> rake db:migrate:up VERSION=20080906120000 </shell> will run the +up+ method from the 20080906120000 migration. These tasks check whether the migration has already run, so for example +db:migrate:up VERSION=20080906120000+ will do nothing if Active Record believes that 20080906120000 has already been run. h4. Being Talkative By default migrations tell you exactly what they're doing and how long it took. A migration creating a table and adding an index might produce output like this <shell> 20080906170109 CreateProducts: migrating -- create_table(:products) -> 0.0021s -- add_index(:products, :name) -> 0.0026s 20080906170109 CreateProducts: migrated (0.0059s) </shell> Several methods are provided that allow you to control all this: * +suppress_messages+ suppresses any output generated by its block * +say+ outputs text (the second argument controls whether it is indented or not) * +say_with_time+ outputs text along with how long it took to run its block. If the block returns an integer it assumes it is the number of rows affected. For example, this migration <ruby> class CreateProducts < ActiveRecord::Migration def self.up suppress_messages do create_table :products do |t| t.string :name t.text :description t.timestamps end end say "Created a table" suppress_messages {add_index :products, :name} say "and an index!", true say_with_time 'Waiting for a while' do sleep 10 250 end end def self.down drop_table :products end end </ruby> generates the following output <shell> 20080906170109 CreateProducts: migrating Created a table -> and an index! Waiting for a while -> 10.0001s -> 250 rows 20080906170109 CreateProducts: migrated (10.0097s) </shell> If you just want Active Record to shut up then running +rake db:migrate VERBOSE=false+ will suppress any output. h3. Using Models in Your Migrations When creating or updating data in a migration it is often tempting to use one of your models. After all they exist to provide easy access to the underlying data. This can be done but some caution should be observed. Consider for example a migration that uses the +Product+ model to update a row in the corresponding table. Alice later updates the +Product+ model, adding a new column and a validation on it. Bob comes back from holiday, updates the source and runs outstanding migrations with +rake db:migrate+, including the one that used the +Product+ model. When the migration runs the source is up to date and so the +Product+ model has the validation added by Alice. The database however is still old and so does not have that column and an error ensues because that validation is on a column that does not yet exist. Frequently I just want to update rows in the database without writing out the SQL by hand: I'm not using anything specific to the model. One pattern for this is to define a copy of the model inside the migration itself, for example: <ruby> class AddPartNumberToProducts < ActiveRecord::Migration class Product < ActiveRecord::Base end def self.up ... end def self.down ... end end </ruby> The migration has its own minimal copy of the +Product+ model and no longer cares about the +Product+ model defined in the application. h4. Dealing with Changing Models For performance reasons information about the columns a model has is cached. For example if you add a column to a table and then try and use the corresponding model to insert a new row it may try and use the old column information. You can force Active Record to re-read the column information with the +reset_column_information+ method, for example <ruby> class AddPartNumberToProducts < ActiveRecord::Migration class Product < ActiveRecord::Base end def self.up add_column :product, :part_number, :string Product.reset_column_information ... end def self.down ... end end </ruby> h3. Schema Dumping and You h4. What are Schema Files for? Migrations, mighty as they may be, are not the authoritative source for your database schema. That role falls to either +db/schema.rb+ or an SQL file which Active Record generates by examining the database. They are not designed to be edited, they just represent the current state of the database. There is no need (and it is error prone) to deploy a new instance of an app by replaying the entire migration history. It is much simpler and faster to just load into the database a description of the current schema. For example, this is how the test database is created: the current development database is dumped (either to +db/schema.rb+ or +db/development.sql+) and then loaded into the test database. Schema files are also useful if you want a quick look at what attributes an Active Record object has. This information is not in the model's code and is frequently spread across several migrations but is all summed up in the schema file. The "annotate_models":http://agilewebdevelopment.com/plugins/annotate_models plugin, which automatically adds (and updates) comments at the top of each model summarising the schema, may also be of interest. h4. Types of Schema Dumps There are two ways to dump the schema. This is set in +config/environment.rb+ by the +config.active_record.schema_format+ setting, which may be either +:sql+ or +:ruby+. If +:ruby+ is selected then the schema is stored in +db/schema.rb+. If you look at this file you'll find that it looks an awful lot like one very big migration: <ruby> ActiveRecord::Schema.define(:version => 20080906171750) do create_table "authors", :force => true do |t| t.string "name" t.datetime "created_at" t.datetime "updated_at" end create_table "products", :force => true do |t| t.string "name" t.text "description" t.datetime "created_at" t.datetime "updated_at" t.string "part_number" end end </ruby> In many ways this is exactly what it is. This file is created by inspecting the database and expressing its structure using +create_table+, +add_index+, and so on. Because this is database independent it could be loaded into any database that Active Record supports. This could be very useful if you were to distribute an application that is able to run against multiple databases. There is however a trade-off: +db/schema.rb+ cannot express database specific items such as foreign key constraints, triggers or stored procedures. While in a migration you can execute custom SQL statements, the schema dumper cannot reconstitute those statements from the database. If you are using features like this then you should set the schema format to +:sql+. Instead of using Active Record's schema dumper the database's structure will be dumped using a tool specific to that database (via the +db:structure:dump+ Rake task) into +db/#{RAILS_ENV}_structure.sql+. For example for PostgreSQL the +pg_dump+ utility is used and for MySQL this file will contain the output of +SHOW CREATE TABLE+ for the various tables. Loading this schema is simply a question of executing the SQL statements contained inside. By definition this will be a perfect copy of the database's structure but this will usually prevent loading the schema into a database other than the one used to create it. h4. Schema Dumps and Source Control Because schema dumps are the authoritative source for your database schema, it is strongly recommended that you check them into source control. h3. Active Record and Referential Integrity The Active Record way claims that intelligence belongs in your models, not in the database. As such, features such as triggers or foreign key constraints, which push some of that intelligence back into the database, are not heavily used. Validations such as +validates_uniqueness_of+ are one way in which models can enforce data integrity. The +:dependent+ option on associations allows models to automatically destroy child objects when the parent is destroyed. Like anything which operates at the application level these cannot guarantee referential integrity and so some people augment them with foreign key constraints. Although Active Record does not provide any tools for working directly with such features, the +execute+ method can be used to execute arbitrary SQL. There are also a number of plugins such as "redhillonrails":http://agilewebdevelopment.com/plugins/search?search=redhillonrails which add foreign key support to Active Record (including support for dumping foreign keys in +db/schema.rb+). h3. Changelog "Lighthouse ticket":http://rails.lighthouseapp.com/projects/16213-rails-guides/tickets/6 * September 14, 2008: initial version by "Frederick Cheung":credits.html#fcheung
{ "pile_set_name": "Github" }
package com.linsh.utilseverywhere; import android.app.Activity; /** * <pre> * author : Senh Linsh * github : https://github.com/SenhLinsh * date : 2017/11/09 * desc : 工具类: Activity 相关 * </pre> */ public class ActivityUtils { private ActivityUtils() { } /** * 设置屏幕方向 * * @param activity Activity * @param requestedOrientation 方向, 见 {@link android.content.pm.ActivityInfo#screenOrientation ActivityInfo.screenOrientation} */ public static void setScreenOrientation(Activity activity, int requestedOrientation) { activity.setRequestedOrientation(requestedOrientation); } }
{ "pile_set_name": "Github" }
import React from 'react'; import TranslatedComponent from './TranslatedComponent'; import cn from 'classnames'; import { SizeMap } from '../shipyard/Constants'; import { Warning } from './SvgIcons'; /** * Ship Summary Table / Stats */ export default class ShipSummaryTable extends TranslatedComponent { static propTypes = { ship: React.PropTypes.object.isRequired }; /** * Render the table * @return {React.Component} Summary table */ render() { let ship = this.props.ship; let { language, tooltip, termtip } = this.context; let translate = language.translate; let u = language.units; let formats = language.formats; let round = formats.round; let { time, int } = formats; let armourDetails = null; let sgClassNames = cn({ warning: ship.sgSlot && !ship.shieldStrength, muted: !ship.sgSlot }); let sgRecover = '-'; let sgRecharge = '-'; let hide = tooltip.bind(null, null); if (ship.armourMultiplier > 1 || ship.armourAdded) { armourDetails = <u>({ (ship.armourMultiplier > 1 ? formats.rPct(ship.armourMultiplier) : '') + (ship.armourAdded ? ' + ' + ship.armourAdded : '') })</u>; } if (ship.shieldStrength) { sgRecover = time(ship.calcShieldRecovery()); sgRecharge = time(ship.calcShieldRecharge()); } return <div id='summary'> <table id='summaryTable'> <thead> <tr className='main'> <th rowSpan={2}>{translate('size')}</th> <th onMouseEnter={termtip.bind(null, 'maneuverability')} onMouseLeave={hide} rowSpan={2}>{translate('MNV')}</th> <th rowSpan={2} className={ cn({ 'bg-warning-disabled': !ship.canThrust() }) }>{translate('speed')}</th> <th rowSpan={2} className={ cn({ 'bg-warning-disabled': !ship.canBoost() }) }>{translate('boost')}</th> <th onMouseEnter={termtip.bind(null, 'damage per second')} onMouseLeave={hide} rowSpan={2}>{translate('DPS')}</th> <th rowSpan={2}>{translate('armour')}</th> <th colSpan={3}>{translate('shields')}</th> <th colSpan={3}>{translate('mass')}</th> <th rowSpan={2}>{translate('cargo')}</th> <th rowSpan={2}>{translate('fuel')}</th> <th colSpan={3}>{translate('jump range')}</th> <th onMouseEnter={termtip.bind(null, 'PHRASE_FASTEST_RANGE')} onMouseLeave={hide} colSpan={3}>{translate('fastest range')}</th> <th onMouseEnter={termtip.bind(null, 'mass lock factor')} onMouseLeave={hide} rowSpan={2}>{translate('MLF')}</th> </tr> <tr> <th className='lft'>{translate('strength')}</th> <th onMouseEnter={termtip.bind(null, 'PHRASE_SG_RECOVER', { cap: 0 })} onMouseLeave={hide}>{translate('recovery')}</th> <th onMouseEnter={termtip.bind(null, 'PHRASE_SG_RECHARGE', { cap: 0 })} onMouseLeave={hide}>{translate('recharge')}</th> <th className='lft'>{translate('hull')}</th> <th onMouseEnter={termtip.bind(null, 'PHRASE_UNLADEN', { cap: 0 })} onMouseLeave={hide}>{translate('unladen')}</th> <th onMouseEnter={termtip.bind(null, 'PHRASE_LADEN', { cap: 0 })} onMouseLeave={hide}>{translate('laden')}</th> <th className='lft'>{translate('max')}</th> <th>{translate('full tank')}</th> <th>{translate('laden')}</th> <th className='lft'>{translate('jumps')}</th> <th>{translate('unladen')}</th> <th>{translate('laden')}</th> </tr> </thead> <tbody> <tr> <td className='cap'>{translate(SizeMap[ship.class])}</td> <td>{ship.agility}/10</td> <td>{ ship.canThrust() ? <span>{int(ship.topSpeed)} {u['m/s']}</span> : <span className='warning'>0 <Warning/></span> }</td> <td>{ ship.canBoost() ? <span>{int(ship.topBoost)} {u['m/s']}</span> : <span className='warning'>0 <Warning/></span> }</td> <td>{round(ship.totalDps)}</td> <td>{int(ship.armour)} {armourDetails}</td> <td className={sgClassNames}>{int(ship.shieldStrength)} {u.MJ} { ship.shieldMultiplier > 1 && ship.shieldStrength > 0 ? <u>({formats.rPct(ship.shieldMultiplier)})</u> : null }</td> <td className={sgClassNames}>{sgRecover}</td> <td className={sgClassNames}>{sgRecharge}</td> <td>{ship.hullMass} {u.T}</td> <td>{round(ship.unladenMass)} {u.T}</td> <td>{round(ship.ladenMass)} {u.T}</td> <td>{round(ship.cargoCapacity)} {u.T}</td> <td>{round(ship.fuelCapacity)} {u.T}</td> <td>{round(ship.unladenRange)} {u.LY}</td> <td>{round(ship.fullTankRange)} {u.LY}</td> <td>{round(ship.ladenRange)} {u.LY}</td> <td>{round(ship.maxJumpCount)}</td> <td>{round(ship.unladenFastestRange)} {u.LY}</td> <td>{round(ship.ladenFastestRange)} {u.LY}</td> <td>{ship.masslock}</td> </tr> </tbody> </table> </div>; } }
{ "pile_set_name": "Github" }
# $Id: Makefile.in,v 1.5 2014/06/17 13:06:08 dtucker Exp $ sysconfdir=@sysconfdir@ piddir=@piddir@ srcdir=@srcdir@ top_srcdir=@top_srcdir@ VPATH=@srcdir@ CC=@CC@ LD=@LD@ CFLAGS=@CFLAGS@ CPPFLAGS=-I. -I.. -I$(srcdir) -I$(srcdir)/.. @CPPFLAGS@ @DEFS@ EXEEXT=@EXEEXT@ LIBCOMPAT=../libopenbsd-compat.a LIBS=@LIBS@ LDFLAGS=@LDFLAGS@ $(LIBCOMPAT) TESTPROGS=closefromtest$(EXEEXT) snprintftest$(EXEEXT) strduptest$(EXEEXT) \ strtonumtest$(EXEEXT) opensslvertest$(EXEEXT) all: t-exec ${OTHERTESTS} %$(EXEEXT): %.c $(LIBCOMPAT) $(CC) $(CFLAGS) $(CPPFLAGS) $(LDFLAGS) -o $@ $< $(LIBCOMPAT) $(LIBS) t-exec: $(TESTPROGS) @echo running compat regress tests @for TEST in ""$?; do \ echo "run test $${TEST}" ... 1>&2; \ ./$${TEST}$(EXEEXT) || exit $$? ; \ done @echo finished compat regress tests clean: rm -f *.o *.a core $(TESTPROGS) valid.out distclean: clean rm -f Makefile *~
{ "pile_set_name": "Github" }
{"text": ["l", "$", "d"], "created_at": "Wed Sep 09 21:13:07 +0000 2015", "user_id_str": "893833297"} {"text": ["l", "$", "d"], "created_at": "Wed Sep 09 04:38:14 +0000 2015", "user_id_str": "378950692"} {"text": ["l", "$", "d", "-", "a", "$", "ap", "rocky"], "created_at": "Wed Sep 09 18:24:51 +0000 2015", "user_id_str": "3370269394"} {"text": ["AT_USER", "AT_USER", "los", "humanos", "tienen", "que", "aceptar", "que", "los", "u", "$", "d", "entran", "solo", "si", "la", "rentabilidad", "es", "extraordinaria", "o", "es", "libre", "de", "impuestos"], "created_at": "Wed Sep 09 18:29:37 +0000 2015", "user_id_str": "725691528"} {"text": ["l", "$", "d"], "created_at": "Wed Sep 09 01:37:52 +0000 2015", "user_id_str": "449706817"} {"text": ["l", "$", "d"], "created_at": "Wed Sep 09 04:57:11 +0000 2015", "user_id_str": "498933028"} {"text": ["l", "$", "d", ".", "URL"], "created_at": "Wed Sep 09 08:20:02 +0000 2015", "user_id_str": "214201885"}
{ "pile_set_name": "Github" }
/* * * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, * software distributed under the License is distributed on an * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY * KIND, either express or implied. See the License for the * specific language governing permissions and limitations * under the License. * */ describe('Vibration (navigator.notification.vibrate)', function () { it("navigator.notification should exist", function() { expect(navigator.notification).toBeDefined(); }); it("should contain a vibrate function", function() { expect(typeof navigator.notification.vibrate).toBeDefined(); expect(typeof navigator.notification.vibrate).toBe("function"); }); });
{ "pile_set_name": "Github" }
// Copyright 2015 the V8 project authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // Flags: --allow-natives-syntax --stress-compaction --verify-heap // Flags: --stack-size=100 // Load the debug context to fill up code space. %GetDebugContext(); %GetDebugContext(); // Recurse and run regexp code. assertThrows(function f() { f(/./.test("a")); }, RangeError);
{ "pile_set_name": "Github" }
package regex; // Sun Microsystems Example Code @(#)Grep.java 1.1 01/05/10 // Search a list of files for lines that match a given regular-expression // pattern. Demonstrates NIO mapped byte buffers, charsets, and regular // expressions. import java.io.File; import java.io.FileInputStream; import java.io.IOException; import java.nio.CharBuffer; import java.nio.MappedByteBuffer; import java.nio.channels.FileChannel; import java.nio.charset.Charset; import java.nio.charset.CharsetDecoder; import java.util.regex.Matcher; import java.util.regex.Pattern; import java.util.regex.PatternSyntaxException; public class GrepSun { // Charset and decoder for ISO-8859-15 private static Charset charset = Charset.forName("ISO-8859-15"); private static CharsetDecoder decoder = charset.newDecoder(); // Pattern used to parse lines private static Pattern linePattern = Pattern.compile(".*\r?\n"); // The input pattern that we're looking for private static Pattern pattern; // Compile the pattern from the command line // private static void compile(String pat) { try { pattern = Pattern.compile(pat); } catch (PatternSyntaxException x) { System.err.println(x.getMessage()); System.exit(1); } } // Use the linePattern to break the given CharBuffer into lines, applying // the input pattern to each line to see if we have a match // private static void grep(File f, CharBuffer cb) { Matcher lm = linePattern.matcher(cb); // Line matcher Matcher pm = null; // Pattern matcher int lines = 0; while (lm.find()) { lines++; CharSequence cs = lm.group(); // The current line if (pm == null) pm = pattern.matcher(cs); else pm.reset(cs); if (pm.find()) System.out.print(f + ":" + lines + ":" + cs); if (lm.end() == cb.limit()) break; } } // Search for occurrences of the input pattern in the given file // private static void grep(File f) throws IOException { // Open the file and then get a channel from the stream FileInputStream fis = new FileInputStream(f); FileChannel fc = fis.getChannel(); // Get the file's size and then map it into memory int sz = (int)fc.size(); MappedByteBuffer bb = fc.map(FileChannel.MapMode.READ_ONLY, 0, sz); // Decode the file into a char buffer CharBuffer cb = decoder.decode(bb); // Perform the search grep(f, cb); // Close the channel and the stream fc.close(); fis.close(); } public static void main(String[] args) { if (args.length < 2) { System.err.println("Usage: java Grep pattern file..."); return; } compile(args[0]); for (int i = 1; i < args.length; i++) { File f = new File(args[i]); try { grep(f); } catch (IOException x) { System.err.println(f + ": " + x); } } } }
{ "pile_set_name": "Github" }
package com.kakarote.crm9.erp.admin.service; import cn.hutool.core.util.StrUtil; import com.kakarote.crm9.common.constant.BaseConstant; import com.kakarote.crm9.erp.admin.entity.AdminDept; import com.kakarote.crm9.erp.admin.entity.AdminUser; import com.kakarote.crm9.utils.BaseUtil; import com.kakarote.crm9.utils.R; import com.jfinal.plugin.activerecord.Db; import com.jfinal.plugin.activerecord.Record; import java.util.*; import java.util.stream.Collectors; public class AdminDeptService { public R setDept(AdminDept adminDept) { boolean bol; if (adminDept.getDeptId() == null) { bol = adminDept.save(); } else { if (adminDept.getPid() != null && adminDept.getPid() != 0) { List<Record> topDeptList = queryDeptTree("update",adminDept.getDeptId()); boolean isContain = false; for (Record record : topDeptList) { if (record.getInt("id").equals(adminDept.getPid())) { isContain = true; break; } } if (!isContain) { return R.error("该部门的下级部门不能设置为上级部门"); } } bol = adminDept.update(); } return R.isSuccess(bol,"设置失败"); } public List<Record> queryDeptTree(String type,Integer id) { List<Record> allDeptList = new ArrayList<>(); List<Record> adminDeptList = Db.find("select dept_id as id,name,pid from 72crm_admin_dept"); List<Record> recordList = buildTreeBy2Loop(adminDeptList, 0, allDeptList); if (StrUtil.isNotBlank(type) && "tree".equals(type)) { return recordList; } else if (StrUtil.isBlank(type) || "save".equals(type)) { return adminDeptList; } else if (StrUtil.isNotBlank(type) && "update".equals(type)){ return queryTopDeptList(id); } else { return new ArrayList<>(); } } /** * 查询可设置为上级的部门 */ private List<Record> queryTopDeptList(Integer deptId) { List<Record> recordList = Db.find("select dept_id as id,name,pid from 72crm_admin_dept"); AdminUserService adminUserService = new AdminUserService(); List<Integer> subDeptList = adminUserService.queryChileDeptIds(deptId,BaseConstant.AUTH_DATA_RECURSION_NUM); recordList.removeIf(record -> subDeptList.contains(record.getInt("id"))); recordList.removeIf(record -> record.getInt("id").equals(deptId)); return recordList; } /** * 通过userId查询权限内部门 * @param userId 用户ID * @return 权限内部门 * @author zhangzhiwei */ public List<Record> queryDeptByAuth(Long userId) { //查询用户数据权限,从高到低排序 List<Integer> list = Db.query(Db.getSql("admin.role.queryDataTypeByUserId"), userId); List<Record> adminDepts=new ArrayList<>(); if(list.size()==0){ return adminDepts; } //拥有最高数据权限 if(list.contains(5)){ return Db.find("select dept_id as id,name,pid from 72crm_admin_dept"); }else { adminDepts.add(Db.findFirst("select dept_id as id,name,pid from 72crm_admin_dept where dept_id=?", BaseUtil.getUser().getDeptId())); if(list.contains(4)){ adminDepts.addAll(queryDeptByParentDept(BaseUtil.getUser().getDeptId(), BaseConstant.AUTH_DATA_RECURSION_NUM)); } if(list.contains(2)){ adminDepts.addAll(queryDeptByParentUser(userId, BaseConstant.AUTH_DATA_RECURSION_NUM)); } } ArrayList<Record> records = new ArrayList<>(); adminDepts.stream().collect(Collectors.groupingBy(record -> record.getInt("id"))).forEach((k,v)->records.add(v.get(0))); return records; } public List<Record> queryDeptByParentDept(Integer deptId,Integer deepness){ List<Record> recordList=new ArrayList<>(); if(deepness>0){ List<Record> records=Db.find("select dept_id as id,name,pid from 72crm_admin_dept where pid=?",deptId); recordList.addAll(records); records.forEach(record -> { recordList.addAll(queryDeptByParentDept(record.getInt("id"),deepness-1)); }); } return recordList; } private List<Record> queryDeptByParentUser(Long userId,Integer deepness){ List<Record> recordList=new ArrayList<>(); if(deepness>0){ List<Record> records=Db.find(Db.getSql("admin.dept.queryDeptByParentUser"),userId); recordList.addAll(records); records.forEach(record -> { recordList.addAll(queryDeptByParentUser(record.getLong("user_id"),deepness-1)); }); } return recordList; } private List<Record> buildTreeBy2Loop(List<Record> treeNodes, Integer root, List<Record> allDeptList) { List<Record> trees = new ArrayList<>(); for (Record node : treeNodes) { if (root.equals(node.getInt("pid"))) { node.set("level", 1); node.set("label", node.getStr("name")); trees.add(node); allDeptList.add(node); } List<Record> childTrees = new ArrayList<>(); for (Record treeNode : treeNodes) { if (node.getInt("id").equals(treeNode.getInt("pid"))) { treeNode.set("level", node.getInt("level") + 1); treeNode.set("label", treeNode.getStr("name")); childTrees.add(treeNode); allDeptList.add(treeNode); } } if (childTrees.size() != 0) { node.set("children", childTrees); } } return trees; } public R deleteDept(String id) { Integer userCount = Db.queryInt("select count(*) from 72crm_admin_user where dept_id = ?", id); if(userCount>0){ return R.error("该部门下有员工,不能删除!"); } Integer childDeptCount = Db.queryInt("select count(*) from 72crm_admin_dept where pid = ?", id); if(childDeptCount>0){ return R.error("该部门下有下级部门,不能删除!"); } int delete = Db.delete("delete from 72crm_admin_dept where dept_id = ?", id); return delete > 0 ? R.ok() : R.error(); } }
{ "pile_set_name": "Github" }
{% load astakos_tags filters %} <ul class="clearfix"> {% with object|selected_resource_groups as selected_groups %} {% for group, info in resource_groups_dict.items %} <li> <a href="#{{group}}" id="group_{{group}}" {% if group in selected_groups %}class="selected"{% endif %}> <img src="{{ IM_STATIC_URL }}images/create-{{group}}.png" alt="{{ group }}" /> </a> <input type="hidden" name="is_selected_{{group}}" id="proxy_id_is_selected_{{group}}" {% if group in selected_groups %}value="1"{% else %}value="0"{% endif %}/> <p class="msg">{{ info.help_text }}</p> </li> {% endfor %} {% endwith %} </ul>
{ "pile_set_name": "Github" }
% Generated by roxygen2: do not edit by hand % Please edit documentation in R/data.R \docType{data} \name{us_rent_income} \alias{us_rent_income} \title{US rent and income data} \format{ A dataset with variables: \describe{ \item{GEOID}{FIP state identifier} \item{NAME}{Name of state} \item{variable}{Variable name: income = median yearly income, rent = median monthly rent} \item{estimate}{Estimated value} \item{moe}{90\% margin of error} } } \usage{ us_rent_income } \description{ Captured from the 2017 American Community Survey using the tidycensus package. } \keyword{datasets}
{ "pile_set_name": "Github" }
# synergy -- mouse and keyboard sharing utility # Copyright (C) 2012-2016 Symless Ltd. # Copyright (C) 2009 Nick Bolton # # This package is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # found in the file LICENSE that should have accompanied this file. # # This package is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program. If not, see <http://www.gnu.org/licenses/>. file(GLOB_RECURSE headers "*.h") file(GLOB_RECURSE sources "*.cpp") # remove platform files (specific platform added later). file(GLOB_RECURSE remove_platform "platform/*") list(REMOVE_ITEM headers ${remove_platform}) list(REMOVE_ITEM sources ${remove_platform}) # platform if (WIN32) file(GLOB platform_sources "platform/MSWindows*.cpp") file(GLOB platform_headers "platform/MSWindows*.h") elseif (APPLE) file(GLOB platform_sources "platform/OSX*.cpp") file(GLOB platform_headers "platform/OSX*.h") elseif (UNIX) file(GLOB platform_sources "platform/XWindows*.cpp") file(GLOB platform_headers "platform/XWindows*.h") endif() list(APPEND sources ${platform_sources}) list(APPEND headers ${platform_headers}) file(GLOB_RECURSE global_headers "../../test/global/*.h") file(GLOB_RECURSE global_sources "../../test/global/*.cpp") list(APPEND headers ${global_headers}) list(APPEND sources ${global_sources}) file(GLOB_RECURSE mock_headers "../../test/mock/*.h") file(GLOB_RECURSE mock_sources "../../test/mock/*.cpp") list(APPEND headers ${mock_headers}) list(APPEND sources ${mock_sources}) if (SYNERGY_ADD_HEADERS) list(APPEND sources ${headers}) endif() include_directories( ../../ ../../lib/ ../../../ext/googletest/googletest/include ../../../ext/googletest/googlemock/include ) if (UNIX) include_directories( ../../.. ) endif() add_executable(integtests ${sources}) target_link_libraries(integtests arch base client common io ipc mt net platform server synlib gtest gmock ${libs} ${OPENSSL_LIBS})
{ "pile_set_name": "Github" }
import { getFragment } from '@vuepress/test-utils' import { Md } from './util' import preWrapper from '../lib/preWrapper.js' const md = Md() const mdP = Md().use(preWrapper) describe('preWrapper', () => { test('should wrap code with triple back quote', () => { const input = getFragment(__dirname, 'code-prewrapper-with-quotes.md') const output1 = md.render(input) const output2 = mdP.render(input) expect(output1 === output2).toBe(false) expect(output2).toMatchSnapshot() }) test('should wrap code with quadruple space', () => { const input = getFragment(__dirname, 'code-prewrapper-with-spaces.md') const output1 = md.render(input) const output2 = mdP.render(input) expect(output1 === output2).toBe(false) expect(output2).toMatchSnapshot() }) })
{ "pile_set_name": "Github" }
package loon.physics; import loon.LSysException; import loon.geom.FloatValue; public class PWorldBox { private PBody northBody, southBody, eastBody, westBody; private PPhysManager manager; private boolean build; public final FloatValue density = new FloatValue(0); private float mx, my, mw, mh; private float thick; public PWorldBox(PPhysManager world, float x, float y, float w, float h) { this.build = false; this.manager = world; this.set(x, y, w, h); this.thick = 1f; this.density.set(1f); } public void removeWorld() { if (build) { manager.world.removeBody(northBody); manager.world.removeBody(southBody); manager.world.removeBody(eastBody); manager.world.removeBody(westBody); } build = false; } public boolean isBuild() { return build; } public void set(float x, float y, float w, float h) { this.mx = x; this.my = y; this.mw = w; this.mh = h; } public void build() { if (build) { throw new LSysException("Build Error !"); } this.manager.addBox(true, 0f, 0f, mw, thick, 0, density.result()); this.manager.addBox(true, 0f, mh, mw, thick, 0, density.result()); this.manager.addBox(true, 0f, 0f, thick, mh, 0, density.result()); this.manager.addBox(true, mw, 0f, thick, mh, 0, density.result()); this.build = true; } public float getDensity() { return density.result(); } public void setDensity(float d) { this.density.set(d); } public PBody getEastBody() { return eastBody; } public void setEastBody(PBody eastBody) { this.eastBody = eastBody; } public PBody getNorthBody() { return northBody; } public void setNorthBody(PBody northBody) { this.northBody = northBody; } public PBody getSouthBody() { return southBody; } public void setSouthBody(PBody southBody) { this.southBody = southBody; } public PBody getWestBody() { return westBody; } public void setWestBody(PBody westBody) { this.westBody = westBody; } public float x() { return mx; } public void setX(float mx) { this.mx = mx; } public float y() { return my; } public void setY(float my) { this.my = my; } public float getWidth() { return mw; } public void setWidth(float mw) { this.mw = mw; } public float getHeight() { return mh; } public void setHeight(float mh) { this.mh = mh; } public float getThick() { return thick; } public void setThick(float thick) { this.thick = thick; } }
{ "pile_set_name": "Github" }
/* * The MIT License (MIT) * * Copyright (c) 2015 - present Instructure, Inc. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in all * copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE * SOFTWARE. */ const path = require('path') const fs = require('fs') const semver = require('semver') const parseMajorVersion = ({ version }) => { const semanticVersion = semver.coerce(version) return semanticVersion ? semanticVersion.major : version } module.exports = ({ name, type, version } = {}) => { const root = path.resolve(path.dirname(require.resolve('@instructure/instui-config/package.json')), type) const paths = [] fs.readdirSync(root).forEach((dirName) => { if (!version || parseMajorVersion({ version: dirName }) <= parseMajorVersion({ version })) { const filePath = path.join(root, dirName, name) if (fs.existsSync(filePath)) { paths.push(filePath) } } }) return paths }
{ "pile_set_name": "Github" }
// // ITStatusBarController.h // iTransmission // // Created by Mike Chen on 10/19/11. // Copyright (c) 2011 __MyCompanyName__. All rights reserved. // #import <UIKit/UIKit.h> @class ITStatusBar; @interface ITStatusBarController : UIViewController @property (strong, atomic) IBOutlet UIView *containerView; @property (strong, atomic) IBOutlet ITStatusBar *statusBar; @property (strong, nonatomic) UIViewController *contentViewController; - (void)fillContentView; - (void)newStatisticsArrived:(NSNotification*)notif; @end
{ "pile_set_name": "Github" }
package cc.mallet.util; import java.io.File; import java.io.FileReader; import java.io.PrintWriter; import java.util.ArrayList; import java.util.Iterator; import java.util.logging.Logger; import com.google.errorprone.annotations.Var; import cc.mallet.pipe.NGramPreprocessor; import cc.mallet.pipe.Pipe; import cc.mallet.pipe.iterator.CsvIterator; import cc.mallet.types.Instance; /** * This class replaces ngrams as specified in the configuration files. * Input: tab-delimited text, one instance per line * Output: tab-delimited text, but with deletions and text replacements. */ public class Replacer { protected static Logger logger = MalletLogger.getLogger(Replacer.class.getName()); static CommandOption.SpacedStrings inputFiles = new CommandOption.SpacedStrings (Replacer.class, "input", "FILE [FILE ...]", true, null, "The file(s) containing data, one instance per line", null); static CommandOption.File outputFile = new CommandOption.File (Replacer.class, "output", "FILE", true, new File("mallet.data"), "Write the strings with replacements applied to this file", null); static CommandOption.SpacedStrings replacementFiles = new CommandOption.SpacedStrings (Replacer.class, "replacement-files", "FILE [FILE ...]", true, null, "files containing string replacements, one per line:\n 'A B [tab] C' replaces A B with C,\n 'A B' replaces A B with A_B", null); static CommandOption.SpacedStrings deletionFiles = new CommandOption.SpacedStrings (Replacer.class, "deletion-files", "FILE [FILE ...]", true, null, "files containing strings to delete after replacements but before tokenization (ie multiword stop terms)", null); static CommandOption.String lineRegex = new CommandOption.String (Replacer.class, "line-regex", "REGEX", true, "^([^\\t]*)\\t([^\\t]*)\\t(.*)", "Regular expression containing regex-groups for label, name and data.", null); static CommandOption.Integer nameGroup = new CommandOption.Integer (Replacer.class, "name", "INTEGER", true, 1, "The index of the group containing the instance name.\n Use 0 to indicate that this field is not used.", null); static CommandOption.Integer labelGroup = new CommandOption.Integer (Replacer.class, "label", "INTEGER", true, 2, "The index of the group containing the label string.\n Use 0 to indicate that this field is not used.", null); static CommandOption.Integer dataGroup = new CommandOption.Integer (Replacer.class, "data", "INTEGER", true, 3, "The index of the group containing the data.", null); public static void main (String[] args) throws Exception { // Process the command-line options CommandOption.setSummary (Replacer.class, "Tool for modifying text with n-gram preprocessing"); CommandOption.process (Replacer.class, args); NGramPreprocessor preprocessor = new NGramPreprocessor(); if (replacementFiles.value != null) { for (String filename: replacementFiles.value) { System.out.println("including replacements from " + filename); preprocessor.loadReplacements(filename); } } if (deletionFiles.value != null) { for (String filename: deletionFiles.value) { System.out.println("including deletions from " + filename); preprocessor.loadDeletions(filename); } } ArrayList<Pipe> pipes = new ArrayList<Pipe>(); PrintWriter out = new PrintWriter(outputFile.value); for (String filename: inputFiles.value) { logger.info("Loading " + filename); CsvIterator reader = new CsvIterator(new FileReader(filename), lineRegex.value, dataGroup.value, labelGroup.value, nameGroup.value); Iterator<Instance> iterator = preprocessor.newIteratorFrom(reader); @Var int count = 0; // We're not saving the instance list, just writing to the out file while (iterator.hasNext()) { Instance instance = iterator.next(); out.println(instance.getName() + "\t" + instance.getTarget() + "\t" + instance.getData()); count++; if (count % 10000 == 0) { logger.info("instance " + count); } iterator.next(); } } out.close(); } }
{ "pile_set_name": "Github" }
# Protocol Buffers for Go with Gadgets [![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf) [![GoDoc](https://godoc.org/github.com/gogo/protobuf?status.svg)](http://godoc.org/github.com/gogo/protobuf) gogoprotobuf is a fork of <a href="https://github.com/golang/protobuf">golang/protobuf</a> with extra code generation features. This code generation is used to achieve: - fast marshalling and unmarshalling - more canonical Go structures - goprotobuf compatibility - less typing by optionally generating extra helper code - peace of mind by optionally generating test and benchmark code - other serialization formats Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this <a href="https://github.com/gogo/protobuf/issues/191">issue</a> ## Users These projects use gogoprotobuf: - <a href="http://godoc.org/github.com/coreos/etcd">etcd</a> - <a href="https://blog.gopheracademy.com/advent-2015/etcd-distributed-key-value-store-with-grpc-http2/">blog</a> - <a href="https://github.com/coreos/etcd/blob/master/etcdserver/etcdserverpb/etcdserver.proto">sample proto file</a> - <a href="https://www.spacemonkey.com/">spacemonkey</a> - <a href="https://www.spacemonkey.com/blog/posts/go-space-monkey">blog</a> - <a href="http://badoo.com">badoo</a> - <a href="https://github.com/badoo/lsd/blob/32061f501c5eca9c76c596d790b450501ba27b2f/proto/lsd.proto">sample proto file</a> - <a href="https://github.com/mesos/mesos-go">mesos-go</a> - <a href="https://github.com/mesos/mesos-go/blob/f9e5fb7c2f50ab5f23299f26b6b07c5d6afdd252/api/v0/mesosproto/authentication.proto">sample proto file</a> - <a href="https://github.com/mozilla-services/heka">heka</a> - <a href="https://github.com/mozilla-services/heka/commit/eb72fbf7d2d28249fbaf8d8dc6607f4eb6f03351">the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com</a> - <a href="https://github.com/cockroachdb/cockroach">cockroachdb</a> - <a href="https://github.com/cockroachdb/cockroach/blob/651d54d393e391a30154e9117ab4b18d9ee6d845/roachpb/metadata.proto">sample proto file</a> - <a href="https://github.com/jbenet/go-ipfs">go-ipfs</a> - <a href="https://github.com/ipfs/go-ipfs/blob/2b6da0c024f28abeb16947fb452787196a6b56a2/merkledag/pb/merkledag.proto">sample proto file</a> - <a href="https://github.com/philhofer/rkive">rkive-go</a> - <a href="https://github.com/philhofer/rkive/blob/e5dd884d3ea07b341321073882ae28aa16dd11be/rpbc/riak_dt.proto">sample proto file</a> - <a href="https://www.dropbox.com">dropbox</a> - <a href="https://srclib.org/">srclib</a> - <a href="https://github.com/sourcegraph/srclib/blob/6538858f0c410cac5c63440317b8d009e889d3fb/graph/def.proto">sample proto file</a> - <a href="http://www.adyoulike.com/">adyoulike</a> - <a href="http://www.cloudfoundry.org/">cloudfoundry</a> - <a href="https://github.com/cloudfoundry/bbs/blob/d673710b8c4211037805129944ee4c5373d6588a/models/events.proto">sample proto file</a> - <a href="http://kubernetes.io/">kubernetes</a> - <a href="https://github.com/kubernetes/kubernetes/tree/88d8628137f94ee816aaa6606ae8cd045dee0bff/cmd/libs/go2idl">go2idl built on top of gogoprotobuf</a> - <a href="https://dgraph.io/">dgraph</a> - <a href="https://github.com/dgraph-io/dgraph/releases/tag/v0.4.3">release notes</a> - <a href="https://discuss.dgraph.io/t/gogoprotobuf-is-extremely-fast/639">benchmarks</a></a> - <a href="https://github.com/centrifugal/centrifugo">centrifugo</a> - <a href="https://forum.golangbridge.org/t/centrifugo-real-time-messaging-websocket-or-sockjs-server-v1-5-0-released/2861">release notes</a> - <a href="https://medium.com/@fzambia/centrifugo-protobuf-inside-json-outside-21d39bdabd68#.o3icmgjqd">blog</a> - <a href="https://github.com/docker/swarmkit">docker swarmkit</a> - <a href="https://github.com/docker/swarmkit/blob/63600e01af3b8da2a0ed1c9fa6e1ae4299d75edb/api/objects.proto">sample proto file</a> - <a href="https://nats.io/">nats.io</a> - <a href="https://github.com/nats-io/go-nats-streaming/blob/master/pb/protocol.proto">go-nats-streaming</a> - <a href="https://github.com/pingcap/tidb">tidb</a> - Communication between <a href="https://github.com/pingcap/tipb/blob/master/generate-go.sh#L4">tidb</a> and <a href="https://github.com/pingcap/kvproto/blob/master/generate_go.sh#L3">tikv</a> - <a href="https://github.com/AsynkronIT/protoactor-go">protoactor-go</a> - <a href="https://github.com/AsynkronIT/protoactor-go/blob/master/protobuf/protoc-gen-protoactor/main.go">vanity command</a> that also generates actors from service definitions - <a href="https://containerd.io/">containerd</a> - <a href="https://github.com/containerd/containerd/tree/master/cmd/protoc-gen-gogoctrd">vanity command with custom field names</a> that conforms to the golang convention. - <a href="https://github.com/heroiclabs/nakama">nakama</a> - <a href="https://github.com/src-d/proteus">proteus</a> - <a href="https://github.com/go-graphite">carbonzipper stack</a> - <a href="https://sendgrid.com/">sendgrid</a> - <a href="https://github.com/zero-os/0-stor">zero-os/0-stor</a> - <a href="https://github.com/spacemeshos/go-spacemesh">go-spacemesh</a> - <a href="https://github.com/weaveworks/cortex">cortex</a> - <a href="https://github.com/weaveworks/cortex/blob/fee02a59729d3771ef888f7bf0fd050e1197c56e/pkg/ingester/client/cortex.proto">sample proto file</a> - <a href="http://skywalking.apache.org/">Apache SkyWalking APM</a> - Istio telemetry receiver based on Mixer bypass protocol Please let us know if you are using gogoprotobuf by posting on our <a href="https://groups.google.com/forum/#!topic/gogoprotobuf/Brw76BxmFpQ">GoogleGroup</a>. ### Mentioned - <a href="http://www.slideshare.net/albertstrasheim/serialization-in-go">Cloudflare - go serialization talk - Albert Strasheim</a> - <a href="https://youtu.be/4xB46Xl9O9Q?t=557">GopherCon 2014 Writing High Performance Databases in Go by Ben Johnson</a> - <a href="https://github.com/alecthomas/go_serialization_benchmarks">alecthomas' go serialization benchmarks</a> - <a href="http://agniva.me/go/2017/11/18/gogoproto.html">Go faster with gogoproto - Agniva De Sarker</a> - <a href="https://www.youtube.com/watch?v=CY9T020HLP8">Evolution of protobuf (Gource Visualization) - Landon Wilkins</a> - <a href="https://fosdem.org/2018/schedule/event/gopherjs/">Creating GopherJS Apps with gRPC-Web - Johan Brandhorst</a> - <a href="https://jbrandhorst.com/post/gogoproto/">So you want to use GoGo Protobuf - Johan Brandhorst</a> - <a href="https://jbrandhorst.com/post/grpc-errors/">Advanced gRPC Error Usage - Johan Brandhorst</a> - <a href="https://www.udemy.com/grpc-golang/?couponCode=GITHUB10">gRPC Golang Course on Udemy - Stephane Maarek</a> ## Getting Started There are several ways to use gogoprotobuf, but for all you need to install go and protoc. After that you can choose: - Speed - More Speed and more generated code - Most Speed and most customization ### Installation To install it, you must first have Go (at least version 1.6.3 or 1.9 if you are using gRPC) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Latest patch versions of 1.10 and 1.11 are continuously tested. Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.6.1 are continuously tested. ### Speed Install the protoc-gen-gofast binary go get github.com/gogo/protobuf/protoc-gen-gofast Use it to generate faster marshaling and unmarshaling go code for your protocol buffers. protoc --gofast_out=. myproto.proto This does not allow you to use any of the other gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). ### More Speed and more generated code Fields without pointers cause less time in the garbage collector. More code generation results in more convenient methods. Other binaries are also included: protoc-gen-gogofast (same as gofast, but imports gogoprotobuf) protoc-gen-gogofaster (same as gogofast, without XXX_unrecognized, less pointer fields) protoc-gen-gogoslick (same as gogofaster, but with generated string, gostring and equal methods) Installing any of these binaries is easy. Simply run: go get github.com/gogo/protobuf/proto go get github.com/gogo/protobuf/{binary} go get github.com/gogo/protobuf/gogoproto These binaries allow you to use gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). You can also use your own binary. To generate the code, you also need to set the include path properly. protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/gogo/protobuf/protobuf --{binary}_out=. myproto.proto To use proto files from "google/protobuf" you need to add additional args to protoc. protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/gogo/protobuf/protobuf --{binary}_out=\ Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,\ Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,\ Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types,\ Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,\ Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:. \ myproto.proto Note that in the protoc command, {binary} does not contain the initial prefix of "protoc-gen". ### Most Speed and most customization Customizing the fields of the messages to be the fields that you actually want to use removes the need to copy between the structs you use and structs you use to serialize. gogoprotobuf also offers more serialization formats and generation of tests and even more methods. Please visit the [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md) page for more documentation. Install protoc-gen-gogo: go get github.com/gogo/protobuf/proto go get github.com/gogo/protobuf/jsonpb go get github.com/gogo/protobuf/protoc-gen-gogo go get github.com/gogo/protobuf/gogoproto ## GRPC It works the same as golang/protobuf, simply specify the plugin. Here is an example using gofast: protoc --gofast_out=plugins=grpc:. my.proto See [https://github.com/gogo/grpc-example](https://github.com/gogo/grpc-example) for an example of using gRPC with gogoprotobuf and the wider grpc-ecosystem. ## License This software is licensed under the 3-Clause BSD License ("BSD License 2.0", "Revised BSD License", "New BSD License", or "Modified BSD License").
{ "pile_set_name": "Github" }
/* SPDX-License-Identifier: GPL-2.0 */ /* * NFS-private data for each "struct net". Accessed with net_generic(). */ #ifndef __NFS_NETNS_H__ #define __NFS_NETNS_H__ #include <linux/nfs4.h> #include <net/net_namespace.h> #include <net/netns/generic.h> struct bl_dev_msg { int32_t status; uint32_t major, minor; }; struct nfs_netns_client; struct nfs_net { struct cache_detail *nfs_dns_resolve; struct rpc_pipe *bl_device_pipe; struct bl_dev_msg bl_mount_reply; wait_queue_head_t bl_wq; struct mutex bl_mutex; struct list_head nfs_client_list; struct list_head nfs_volume_list; #if IS_ENABLED(CONFIG_NFS_V4) struct idr cb_ident_idr; /* Protected by nfs_client_lock */ unsigned short nfs_callback_tcpport; unsigned short nfs_callback_tcpport6; int cb_users[NFS4_MAX_MINOR_VERSION + 1]; #endif struct nfs_netns_client *nfs_client; spinlock_t nfs_client_lock; ktime_t boot_time; #ifdef CONFIG_PROC_FS struct proc_dir_entry *proc_nfsfs; #endif }; extern unsigned int nfs_net_id; #endif
{ "pile_set_name": "Github" }
#!/usr/bin/env zunit @setup { function uname { printf "Linux" } # Mock function for passing builds where we don't # really need notify-send to be installed (e.g. Mac OSX) function notify-send { } } @test 'version exported' { git_version="$(git tag --list | sort -V | tail -1)" git tag --list load "../auto-notify.plugin.zsh" assert "$AUTO_NOTIFY_VERSION" is_not_empty assert "$AUTO_NOTIFY_VERSION" same_as "$git_version" } @test 'print warning if notify-send is not installed' { function type { return 1 } run load "../auto-notify.plugin.zsh" assert "$lines[1]" same_as "'notify-send' must be installed for zsh-auto-notify to work" assert "$lines[2]" same_as "Please install it with your relevant package manager" @test 'dont load auto-notify if notify-send is not installed' { function type { return 1 } load "../auto-notify.plugin.zsh" assert "_auto_notify_track" not_in $preexec_functions assert "_auto_notify_send" not_in $precmd_functions } @test 'hook functions are loaded by default' { load "../auto-notify.plugin.zsh" assert '_auto_notify_track' in $preexec_functions assert '_auto_notify_send' in $precmd_functions } @test 'enable/disable auto-notify' { load "../auto-notify.plugin.zsh" disable_auto_notify assert '_auto_notify_track' not_in $preexec_functions assert '_auto_notify_send' not_in $precmd_functions enable_auto_notify assert '_auto_notify_track' in $preexec_functions assert '_auto_notify_send' in $precmd_functions }
{ "pile_set_name": "Github" }
/************************************************************************************ Filename : OVR_Stereo.cpp Content : Stereo rendering functions Created : November 30, 2013 Authors : Tom Fosyth Copyright : Copyright 2014 Oculus VR, LLC All Rights reserved. Licensed under the Oculus VR Rift SDK License Version 3.2 (the "License"); you may not use the Oculus VR Rift SDK except in compliance with the License, which is provided at the time of installation or download, or which otherwise accompanies this software in either electronic or hard copy form. You may obtain a copy of the License at http://www.oculusvr.com/licenses/LICENSE-3.2 Unless required by applicable law or agreed to in writing, the Oculus VR SDK distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. *************************************************************************************/ #include "OVR_Stereo.h" #include "OVR_Profile.h" #include "Kernel/OVR_Log.h" #include "Kernel/OVR_Alg.h" //To allow custom distortion to be introduced to CatMulSpline. float (*CustomDistortion)(float) = NULL; float (*CustomDistortionInv)(float) = NULL; namespace OVR { using namespace Alg; //----------------------------------------------------------------------------------- // Inputs are 4 points (pFitX[0],pFitY[0]) through (pFitX[3],pFitY[3]) // Result is four coefficients in pResults[0] through pResults[3] such that // y = pResult[0] + x * ( pResult[1] + x * ( pResult[2] + x * ( pResult[3] ) ) ); // passes through all four input points. // Return is true if it succeeded, false if it failed (because two control points // have the same pFitX value). bool FitCubicPolynomial ( float *pResult, const float *pFitX, const float *pFitY ) { float d0 = ( ( pFitX[0]-pFitX[1] ) * ( pFitX[0]-pFitX[2] ) * ( pFitX[0]-pFitX[3] ) ); float d1 = ( ( pFitX[1]-pFitX[2] ) * ( pFitX[1]-pFitX[3] ) * ( pFitX[1]-pFitX[0] ) ); float d2 = ( ( pFitX[2]-pFitX[3] ) * ( pFitX[2]-pFitX[0] ) * ( pFitX[2]-pFitX[1] ) ); float d3 = ( ( pFitX[3]-pFitX[0] ) * ( pFitX[3]-pFitX[1] ) * ( pFitX[3]-pFitX[2] ) ); if ( ( d0 == 0.0f ) || ( d1 == 0.0f ) || ( d2 == 0.0f ) || ( d3 == 0.0f ) ) { return false; } float f0 = pFitY[0] / d0; float f1 = pFitY[1] / d1; float f2 = pFitY[2] / d2; float f3 = pFitY[3] / d3; pResult[0] = -( f0*pFitX[1]*pFitX[2]*pFitX[3] + f1*pFitX[0]*pFitX[2]*pFitX[3] + f2*pFitX[0]*pFitX[1]*pFitX[3] + f3*pFitX[0]*pFitX[1]*pFitX[2] ); pResult[1] = f0*(pFitX[1]*pFitX[2] + pFitX[2]*pFitX[3] + pFitX[3]*pFitX[1]) + f1*(pFitX[0]*pFitX[2] + pFitX[2]*pFitX[3] + pFitX[3]*pFitX[0]) + f2*(pFitX[0]*pFitX[1] + pFitX[1]*pFitX[3] + pFitX[3]*pFitX[0]) + f3*(pFitX[0]*pFitX[1] + pFitX[1]*pFitX[2] + pFitX[2]*pFitX[0]); pResult[2] = -( f0*(pFitX[1]+pFitX[2]+pFitX[3]) + f1*(pFitX[0]+pFitX[2]+pFitX[3]) + f2*(pFitX[0]+pFitX[1]+pFitX[3]) + f3*(pFitX[0]+pFitX[1]+pFitX[2]) ); pResult[3] = f0 + f1 + f2 + f3; return true; } #define TPH_SPLINE_STATISTICS 0 #if TPH_SPLINE_STATISTICS static float max_scaledVal = 0; static float average_total_out_of_range = 0; static float average_out_of_range; static int num_total = 0; static int num_out_of_range = 0; static int num_out_of_range_over_1 = 0; static int num_out_of_range_over_2 = 0; static int num_out_of_range_over_3 = 0; static float percent_out_of_range; #endif float EvalCatmullRom10Spline ( float const *K, float scaledVal ) { int const NumSegments = LensConfig::NumCoefficients; #if TPH_SPLINE_STATISTICS //Value should be in range of 0 to (NumSegments-1) (typically 10) if spline is valid. Right? if (scaledVal > (NumSegments-1)) { num_out_of_range++; average_total_out_of_range+=scaledVal; average_out_of_range = average_total_out_of_range / ((float) num_out_of_range); percent_out_of_range = 100.0f*(num_out_of_range)/num_total; } if (scaledVal > (NumSegments-1+1)) num_out_of_range_over_1++; if (scaledVal > (NumSegments-1+2)) num_out_of_range_over_2++; if (scaledVal > (NumSegments-1+3)) num_out_of_range_over_3++; num_total++; if (scaledVal > max_scaledVal) { max_scaledVal = scaledVal; max_scaledVal = scaledVal; } #endif float scaledValFloor = floorf ( scaledVal ); scaledValFloor = Alg::Max ( 0.0f, Alg::Min ( (float)(NumSegments-1), scaledValFloor ) ); float t = scaledVal - scaledValFloor; int k = (int)scaledValFloor; float p0, p1; float m0, m1; switch ( k ) { case 0: // Curve starts at 1.0 with gradient K[1]-K[0] p0 = 1.0f; m0 = ( K[1] - K[0] ); // general case would have been (K[1]-K[-1])/2 p1 = K[1]; m1 = 0.5f * ( K[2] - K[0] ); break; default: // General case p0 = K[k ]; m0 = 0.5f * ( K[k+1] - K[k-1] ); p1 = K[k+1]; m1 = 0.5f * ( K[k+2] - K[k ] ); break; case NumSegments-2: // Last tangent is just the slope of the last two points. p0 = K[NumSegments-2]; m0 = 0.5f * ( K[NumSegments-1] - K[NumSegments-2] ); p1 = K[NumSegments-1]; m1 = K[NumSegments-1] - K[NumSegments-2]; break; case NumSegments-1: // Beyond the last segment it's just a straight line p0 = K[NumSegments-1]; m0 = K[NumSegments-1] - K[NumSegments-2]; p1 = p0 + m0; m1 = m0; break; } float omt = 1.0f - t; float res = ( p0 * ( 1.0f + 2.0f * t ) + m0 * t ) * omt * omt + ( p1 * ( 1.0f + 2.0f * omt ) - m1 * omt ) * t * t; return res; } // Converts a Profile eyecup string into an eyecup enumeration void SetEyeCup(HmdRenderInfo* renderInfo, const char* cup) { if (OVR_strcmp(cup, "A") == 0) renderInfo->EyeCups = EyeCup_DK1A; else if (OVR_strcmp(cup, "B") == 0) renderInfo->EyeCups = EyeCup_DK1B; else if (OVR_strcmp(cup, "C") == 0) renderInfo->EyeCups = EyeCup_DK1C; else if (OVR_strcmp(cup, "Orange A") == 0) renderInfo->EyeCups = EyeCup_OrangeA; else if (OVR_strcmp(cup, "Red A") == 0) renderInfo->EyeCups = EyeCup_RedA; else if (OVR_strcmp(cup, "Pink A") == 0) renderInfo->EyeCups = EyeCup_PinkA; else if (OVR_strcmp(cup, "Blue A") == 0) renderInfo->EyeCups = EyeCup_BlueA; else renderInfo->EyeCups = EyeCup_DK1A; } //----------------------------------------------------------------------------------- // The result is a scaling applied to the distance. float LensConfig::DistortionFnScaleRadiusSquared (float rsq) const { float scale = 1.0f; switch ( Eqn ) { case Distortion_Poly4: // This version is deprecated! Prefer one of the other two. scale = ( K[0] + rsq * ( K[1] + rsq * ( K[2] + rsq * K[3] ) ) ); break; case Distortion_RecipPoly4: scale = 1.0f / ( K[0] + rsq * ( K[1] + rsq * ( K[2] + rsq * K[3] ) ) ); break; case Distortion_CatmullRom10:{ // A Catmull-Rom spline through the values 1.0, K[1], K[2] ... K[10] // evenly spaced in R^2 from 0.0 to MaxR^2 // K[0] controls the slope at radius=0.0, rather than the actual value. const int NumSegments = LensConfig::NumCoefficients; OVR_ASSERT ( NumSegments <= NumCoefficients ); float scaledRsq = (float)(NumSegments-1) * rsq / ( MaxR * MaxR ); scale = EvalCatmullRom10Spline ( K, scaledRsq ); //Intercept, and overrule if needed if (CustomDistortion) { scale = CustomDistortion(rsq); } }break; default: OVR_ASSERT ( false ); break; } return scale; } // x,y,z components map to r,g,b Vector3f LensConfig::DistortionFnScaleRadiusSquaredChroma (float rsq) const { float scale = DistortionFnScaleRadiusSquared ( rsq ); Vector3f scaleRGB; scaleRGB.x = scale * ( 1.0f + ChromaticAberration[0] + rsq * ChromaticAberration[1] ); // Red scaleRGB.y = scale; // Green scaleRGB.z = scale * ( 1.0f + ChromaticAberration[2] + rsq * ChromaticAberration[3] ); // Blue return scaleRGB; } // DistortionFnInverse computes the inverse of the distortion function on an argument. float LensConfig::DistortionFnInverse(float r) const { OVR_ASSERT((r <= 20.0f)); float s, d; float delta = r * 0.25f; // Better to start guessing too low & take longer to converge than too high // and hit singularities. Empirically, r * 0.5f is too high in some cases. s = r * 0.25f; d = fabs(r - DistortionFn(s)); for (int i = 0; i < 20; i++) { float sUp = s + delta; float sDown = s - delta; float dUp = fabs(r - DistortionFn(sUp)); float dDown = fabs(r - DistortionFn(sDown)); if (dUp < d) { s = sUp; d = dUp; } else if (dDown < d) { s = sDown; d = dDown; } else { delta *= 0.5f; } } return s; } float LensConfig::DistortionFnInverseApprox(float r) const { float rsq = r * r; float scale = 1.0f; switch ( Eqn ) { case Distortion_Poly4: // Deprecated OVR_ASSERT ( false ); break; case Distortion_RecipPoly4: scale = 1.0f / ( InvK[0] + rsq * ( InvK[1] + rsq * ( InvK[2] + rsq * InvK[3] ) ) ); break; case Distortion_CatmullRom10:{ // A Catmull-Rom spline through the values 1.0, K[1], K[2] ... K[9] // evenly spaced in R^2 from 0.0 to MaxR^2 // K[0] controls the slope at radius=0.0, rather than the actual value. const int NumSegments = LensConfig::NumCoefficients; OVR_ASSERT ( NumSegments <= NumCoefficients ); float scaledRsq = (float)(NumSegments-1) * rsq / ( MaxInvR * MaxInvR ); scale = EvalCatmullRom10Spline ( InvK, scaledRsq ); //Intercept, and overrule if needed if (CustomDistortionInv) { scale = CustomDistortionInv(rsq); } }break; default: OVR_ASSERT ( false ); break; } return r * scale; } void LensConfig::SetUpInverseApprox() { float maxR = MaxInvR; switch ( Eqn ) { case Distortion_Poly4: // Deprecated OVR_ASSERT ( false ); break; case Distortion_RecipPoly4:{ float sampleR[4]; float sampleRSq[4]; float sampleInv[4]; float sampleFit[4]; // Found heuristically... sampleR[0] = 0.0f; sampleR[1] = maxR * 0.4f; sampleR[2] = maxR * 0.8f; sampleR[3] = maxR * 1.5f; for ( int i = 0; i < 4; i++ ) { sampleRSq[i] = sampleR[i] * sampleR[i]; sampleInv[i] = DistortionFnInverse ( sampleR[i] ); sampleFit[i] = sampleR[i] / sampleInv[i]; } sampleFit[0] = 1.0f; FitCubicPolynomial ( InvK, sampleRSq, sampleFit ); #if 0 // Should be a nearly exact match on the chosen points. OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[0] ) - DistortionFnInverseApprox ( sampleR[0] ) ) / maxR < 0.0001f ); OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[1] ) - DistortionFnInverseApprox ( sampleR[1] ) ) / maxR < 0.0001f ); OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[2] ) - DistortionFnInverseApprox ( sampleR[2] ) ) / maxR < 0.0001f ); OVR_ASSERT ( fabs ( DistortionFnInverse ( sampleR[3] ) - DistortionFnInverseApprox ( sampleR[3] ) ) / maxR < 0.0001f ); // Should be a decent match on the rest of the range. const int maxCheck = 20; for ( int i = 0; i < maxCheck; i++ ) { float checkR = (float)i * maxR / (float)maxCheck; float realInv = DistortionFnInverse ( checkR ); float testInv = DistortionFnInverseApprox ( checkR ); float error = fabsf ( realInv - testInv ) / maxR; OVR_ASSERT ( error < 0.1f ); } #endif }break; case Distortion_CatmullRom10:{ const int NumSegments = LensConfig::NumCoefficients; OVR_ASSERT ( NumSegments <= NumCoefficients ); for ( int i = 1; i < NumSegments; i++ ) { float scaledRsq = (float)i; float rsq = scaledRsq * MaxInvR * MaxInvR / (float)( NumSegments - 1); float r = sqrtf ( rsq ); float inv = DistortionFnInverse ( r ); InvK[i] = inv / r; InvK[0] = 1.0f; // TODO: fix this. } #if 0 const int maxCheck = 20; for ( int i = 0; i <= maxCheck; i++ ) { float checkR = (float)i * MaxInvR / (float)maxCheck; float realInv = DistortionFnInverse ( checkR ); float testInv = DistortionFnInverseApprox ( checkR ); float error = fabsf ( realInv - testInv ) / MaxR; OVR_ASSERT ( error < 0.01f ); } #endif }break; default: break; } } void LensConfig::SetToIdentity() { for ( int i = 0; i < NumCoefficients; i++ ) { K[i] = 0.0f; InvK[i] = 0.0f; } Eqn = Distortion_RecipPoly4; K[0] = 1.0f; InvK[0] = 1.0f; MaxR = 1.0f; MaxInvR = 1.0f; ChromaticAberration[0] = 0.0f; ChromaticAberration[1] = 0.0f; ChromaticAberration[2] = 0.0f; ChromaticAberration[3] = 0.0f; MetersPerTanAngleAtCenter = 0.05f; } enum LensConfigStoredVersion { LCSV_CatmullRom10Version1 = 1 }; // DO NOT CHANGE THESE ONCE THEY HAVE BEEN BAKED INTO FIRMWARE. // If something needs to change, add a new one! struct LensConfigStored_CatmullRom10Version1 { // All these items must be fixed-length integers - no "float", no "int", etc. uint16_t VersionNumber; // Must be LCSV_CatmullRom10Version1 uint16_t K[11]; uint16_t MaxR; uint16_t MetersPerTanAngleAtCenter; uint16_t ChromaticAberration[4]; // InvK and MaxInvR are calculated on load. }; uint16_t EncodeFixedPointUInt16 ( float val, uint16_t zeroVal, int fractionalBits ) { OVR_ASSERT ( ( fractionalBits >= 0 ) && ( fractionalBits < 31 ) ); float valWhole = val * (float)( 1 << fractionalBits ); valWhole += (float)zeroVal + 0.5f; valWhole = floorf ( valWhole ); OVR_ASSERT ( ( valWhole >= 0.0f ) && ( valWhole < (float)( 1 << 16 ) ) ); return (uint16_t)valWhole; } float DecodeFixedPointUInt16 ( uint16_t val, uint16_t zeroVal, int fractionalBits ) { OVR_ASSERT ( ( fractionalBits >= 0 ) && ( fractionalBits < 31 ) ); float valFloat = (float)val; valFloat -= (float)zeroVal; valFloat *= 1.0f / (float)( 1 << fractionalBits ); return valFloat; } // Returns true on success. bool LoadLensConfig ( LensConfig *presult, uint8_t const *pbuffer, int bufferSizeInBytes ) { if ( bufferSizeInBytes < 2 ) { // Can't even tell the version number! return false; } uint16_t version = DecodeUInt16 ( pbuffer + 0 ); switch ( version ) { case LCSV_CatmullRom10Version1: { if ( bufferSizeInBytes < (int)sizeof(LensConfigStored_CatmullRom10Version1) ) { return false; } LensConfigStored_CatmullRom10Version1 lcs; lcs.VersionNumber = DecodeUInt16 ( pbuffer + 0 ); for ( int i = 0; i < 11; i++ ) { lcs.K[i] = DecodeUInt16 ( pbuffer + 2 + 2*i ); } lcs.MaxR = DecodeUInt16 ( pbuffer + 24 ); lcs.MetersPerTanAngleAtCenter = DecodeUInt16 ( pbuffer + 26 ); for ( int i = 0; i < 4; i++ ) { lcs.ChromaticAberration[i] = DecodeUInt16 ( pbuffer + 28 + 2*i ); } OVR_COMPILER_ASSERT ( sizeof(lcs) == 36 ); // Convert to the real thing. LensConfig result; result.Eqn = Distortion_CatmullRom10; for ( int i = 0; i < 11; i++ ) { // K[] are mostly 1.something. They may get significantly bigger, but they never hit 0.0. result.K[i] = DecodeFixedPointUInt16 ( lcs.K[i], 0, 14 ); } // MaxR is tan(angle), so always >0, typically just over 1.0 (45 degrees half-fov), // but may get arbitrarily high. tan(76)=4 is a very reasonable limit! result.MaxR = DecodeFixedPointUInt16 ( lcs.MaxR, 0, 14 ); // MetersPerTanAngleAtCenter is also known as focal length! // Typically around 0.04 for our current screens, minimum of 0, sensible maximum of 0.125 (i.e. 3 "extra" bits of fraction) result.MetersPerTanAngleAtCenter = DecodeFixedPointUInt16 ( lcs.MetersPerTanAngleAtCenter, 0, 16+3 ); for ( int i = 0; i < 4; i++ ) { // ChromaticAberration[] are mostly 0.0something, centered on 0.0. Largest seen is 0.04, so set max to 0.125 (i.e. 3 "extra" bits of fraction) result.ChromaticAberration[i] = DecodeFixedPointUInt16 ( lcs.ChromaticAberration[i], 0x8000, 16+3 ); } result.MaxInvR = result.DistortionFn ( result.MaxR ); result.SetUpInverseApprox(); OVR_ASSERT ( version == lcs.VersionNumber ); *presult = result; } break; default: // Unknown format. return false; break; } return true; } // Returns number of bytes needed. int SaveLensConfigSizeInBytes ( LensConfig const &config ) { OVR_UNUSED ( config ); return sizeof ( LensConfigStored_CatmullRom10Version1 ); } // Returns true on success. bool SaveLensConfig ( uint8_t *pbuffer, int bufferSizeInBytes, LensConfig const &config ) { if ( bufferSizeInBytes < (int)sizeof ( LensConfigStored_CatmullRom10Version1 ) ) { return false; } // Construct the values. LensConfigStored_CatmullRom10Version1 lcs; lcs.VersionNumber = LCSV_CatmullRom10Version1; for ( int i = 0; i < 11; i++ ) { // K[] are mostly 1.something. They may get significantly bigger, but they never hit 0.0. lcs.K[i] = EncodeFixedPointUInt16 ( config.K[i], 0, 14 ); } // MaxR is tan(angle), so always >0, typically just over 1.0 (45 degrees half-fov), // but may get arbitrarily high. tan(76)=4 is a very reasonable limit! lcs.MaxR = EncodeFixedPointUInt16 ( config.MaxR, 0, 14 ); // MetersPerTanAngleAtCenter is also known as focal length! // Typically around 0.04 for our current screens, minimum of 0, sensible maximum of 0.125 (i.e. 3 "extra" bits of fraction) lcs.MetersPerTanAngleAtCenter = EncodeFixedPointUInt16 ( config.MetersPerTanAngleAtCenter, 0, 16+3 ); for ( int i = 0; i < 4; i++ ) { // ChromaticAberration[] are mostly 0.0something, centered on 0.0. Largest seen is 0.04, so set max to 0.125 (i.e. 3 "extra" bits of fraction) lcs.ChromaticAberration[i] = EncodeFixedPointUInt16 ( config.ChromaticAberration[i], 0x8000, 16+3 ); } // Now store them out, sensitive to endianness. EncodeUInt16 ( pbuffer + 0, lcs.VersionNumber ); for ( int i = 0; i < 11; i++ ) { EncodeUInt16 ( pbuffer + 2 + 2*i, lcs.K[i] ); } EncodeUInt16 ( pbuffer + 24, lcs.MaxR ); EncodeUInt16 ( pbuffer + 26, lcs.MetersPerTanAngleAtCenter ); for ( int i = 0; i < 4; i++ ) { EncodeUInt16 ( pbuffer + 28 + 2*i, lcs.ChromaticAberration[i] ); } OVR_COMPILER_ASSERT ( 36 == sizeof(lcs) ); return true; } #ifdef OVR_BUILD_DEBUG void TestSaveLoadLensConfig ( LensConfig const &config ) { OVR_ASSERT ( config.Eqn == Distortion_CatmullRom10 ); // As a test, make sure this can be encoded and decoded correctly. const int bufferSize = 256; uint8_t buffer[bufferSize]; OVR_ASSERT ( SaveLensConfigSizeInBytes ( config ) < bufferSize ); bool success; success = SaveLensConfig ( buffer, bufferSize, config ); OVR_ASSERT ( success ); LensConfig testConfig; success = LoadLensConfig ( &testConfig, buffer, bufferSize ); OVR_ASSERT ( success ); OVR_ASSERT ( testConfig.Eqn == config.Eqn ); for ( int i = 0; i < 11; i++ ) { OVR_ASSERT ( fabs ( testConfig.K[i] - config.K[i] ) < 0.0001f ); } OVR_ASSERT ( fabsf ( testConfig.MaxR - config.MaxR ) < 0.0001f ); OVR_ASSERT ( fabsf ( testConfig.MetersPerTanAngleAtCenter - config.MetersPerTanAngleAtCenter ) < 0.00001f ); for ( int i = 0; i < 4; i++ ) { OVR_ASSERT ( fabsf ( testConfig.ChromaticAberration[i] - config.ChromaticAberration[i] ) < 0.00001f ); } } #endif //----------------------------------------------------------------------------------- // TBD: There is a question of whether this is the best file for CreateDebugHMDInfo. As long as there are many // constants for HmdRenderInfo here as well it is ok. The alternative would be OVR_Common_HMDDevice.cpp, but // that's specialized per platform... should probably move it there onces the code is in the common base class. HMDInfo CreateDebugHMDInfo(HmdTypeEnum hmdType) { HMDInfo info; if ((hmdType != HmdType_DK1) && (hmdType != HmdType_CrystalCoveProto) && (hmdType != HmdType_DK2)) { LogText("Debug HMDInfo - HmdType not supported. Defaulting to DK1.\n"); hmdType = HmdType_DK1; } // The alternative would be to initialize info.HmdType to HmdType_None instead. If we did that, // code wouldn't be "maximally compatible" and devs wouldn't know what device we are // simulating... so if differentiation becomes necessary we better add Debug flag in the future. info.HmdType = hmdType; info.Manufacturer = "Oculus VR"; switch(hmdType) { case HmdType_DK1: info.ProductName = "Oculus Rift DK1"; info.ResolutionInPixels = Sizei ( 1280, 800 ); info.ScreenSizeInMeters = Sizef ( 0.1498f, 0.0936f ); info.ScreenGapSizeInMeters = 0.0f; info.CenterFromTopInMeters = 0.0468f; info.LensSeparationInMeters = 0.0635f; info.PelOffsetR = Vector2f ( 0.0f, 0.0f ); info.PelOffsetB = Vector2f ( 0.0f, 0.0f ); info.Shutter.Type = HmdShutter_RollingTopToBottom; info.Shutter.VsyncToNextVsync = ( 1.0f / 60.0f ); info.Shutter.VsyncToFirstScanline = 0.000052f; info.Shutter.FirstScanlineToLastScanline = 0.016580f; info.Shutter.PixelSettleTime = 0.015f; info.Shutter.PixelPersistence = ( 1.0f / 60.0f ); break; case HmdType_CrystalCoveProto: info.ProductName = "Oculus Rift Crystal Cove"; info.ResolutionInPixels = Sizei ( 1920, 1080 ); info.ScreenSizeInMeters = Sizef ( 0.12576f, 0.07074f ); info.ScreenGapSizeInMeters = 0.0f; info.CenterFromTopInMeters = info.ScreenSizeInMeters.h * 0.5f; info.LensSeparationInMeters = 0.0635f; info.PelOffsetR = Vector2f ( 0.0f, 0.0f ); info.PelOffsetB = Vector2f ( 0.0f, 0.0f ); info.Shutter.Type = HmdShutter_RollingRightToLeft; info.Shutter.VsyncToNextVsync = ( 1.0f / 76.0f ); info.Shutter.VsyncToFirstScanline = 0.0000273f; info.Shutter.FirstScanlineToLastScanline = 0.0131033f; info.Shutter.PixelSettleTime = 0.0f; info.Shutter.PixelPersistence = 0.18f * info.Shutter.VsyncToNextVsync; break; case HmdType_DK2: info.ProductName = "Oculus Rift DK2"; info.ResolutionInPixels = Sizei ( 1920, 1080 ); info.ScreenSizeInMeters = Sizef ( 0.12576f, 0.07074f ); info.ScreenGapSizeInMeters = 0.0f; info.CenterFromTopInMeters = info.ScreenSizeInMeters.h * 0.5f; info.LensSeparationInMeters = 0.0635f; info.PelOffsetR = Vector2f ( 0.5f, 0.5f ); info.PelOffsetB = Vector2f ( 0.5f, 0.5f ); info.Shutter.Type = HmdShutter_RollingRightToLeft; info.Shutter.VsyncToNextVsync = ( 1.0f / 76.0f ); info.Shutter.VsyncToFirstScanline = 0.0000273f; info.Shutter.FirstScanlineToLastScanline = 0.0131033f; info.Shutter.PixelSettleTime = 0.0f; info.Shutter.PixelPersistence = 0.18f * info.Shutter.VsyncToNextVsync; break; default: break; } return info; } HmdRenderInfo GenerateHmdRenderInfoFromHmdInfo ( HMDInfo const &hmdInfo, Profile const *profile, DistortionEqnType distortionType /*= Distortion_CatmullRom10*/, EyeCupType eyeCupOverride /*= EyeCup_LAST*/ ) { HmdRenderInfo renderInfo; OVR_ASSERT(profile); // profiles are required if(!profile) return renderInfo; renderInfo.HmdType = hmdInfo.HmdType; renderInfo.ResolutionInPixels = hmdInfo.ResolutionInPixels; renderInfo.ScreenSizeInMeters = hmdInfo.ScreenSizeInMeters; renderInfo.CenterFromTopInMeters = hmdInfo.CenterFromTopInMeters; renderInfo.ScreenGapSizeInMeters = hmdInfo.ScreenGapSizeInMeters; renderInfo.LensSeparationInMeters = hmdInfo.LensSeparationInMeters; renderInfo.PelOffsetR = hmdInfo.PelOffsetR; renderInfo.PelOffsetB = hmdInfo.PelOffsetB; OVR_ASSERT ( sizeof(renderInfo.Shutter) == sizeof(hmdInfo.Shutter) ); // Try to keep the files in sync! renderInfo.Shutter.Type = hmdInfo.Shutter.Type; renderInfo.Shutter.VsyncToNextVsync = hmdInfo.Shutter.VsyncToNextVsync; renderInfo.Shutter.VsyncToFirstScanline = hmdInfo.Shutter.VsyncToFirstScanline; renderInfo.Shutter.FirstScanlineToLastScanline = hmdInfo.Shutter.FirstScanlineToLastScanline; renderInfo.Shutter.PixelSettleTime = hmdInfo.Shutter.PixelSettleTime; renderInfo.Shutter.PixelPersistence = hmdInfo.Shutter.PixelPersistence; renderInfo.LensDiameterInMeters = 0.035f; renderInfo.LensSurfaceToMidplateInMeters = 0.025f; renderInfo.EyeCups = EyeCup_DK1A; #if 0 // Device settings are out of date - don't use them. if (Contents & Contents_Distortion) { memcpy(renderInfo.DistortionK, DistortionK, sizeof(float)*4); renderInfo.DistortionEqn = Distortion_RecipPoly4; } #endif // Defaults in case of no user profile. renderInfo.EyeLeft.NoseToPupilInMeters = 0.032f; renderInfo.EyeLeft.ReliefInMeters = 0.012f; // 10mm eye-relief laser numbers for DK1 lenses. // These are a decent seed for finding eye-relief and IPD. // These are NOT used for rendering! // Rendering distortions are now in GenerateLensConfigFromEyeRelief() // So, if you're hacking in new distortions, don't do it here! renderInfo.EyeLeft.Distortion.SetToIdentity(); renderInfo.EyeLeft.Distortion.MetersPerTanAngleAtCenter = 0.0449f; renderInfo.EyeLeft.Distortion.Eqn = Distortion_RecipPoly4; renderInfo.EyeLeft.Distortion.K[0] = 1.0f; renderInfo.EyeLeft.Distortion.K[1] = -0.494165344f; renderInfo.EyeLeft.Distortion.K[2] = 0.587046423f; renderInfo.EyeLeft.Distortion.K[3] = -0.841887126f; renderInfo.EyeLeft.Distortion.MaxR = 1.0f; renderInfo.EyeLeft.Distortion.ChromaticAberration[0] = -0.006f; renderInfo.EyeLeft.Distortion.ChromaticAberration[1] = 0.0f; renderInfo.EyeLeft.Distortion.ChromaticAberration[2] = 0.014f; renderInfo.EyeLeft.Distortion.ChromaticAberration[3] = 0.0f; renderInfo.EyeRight = renderInfo.EyeLeft; // Obtain data from profile. char eyecup[16]; if (profile->GetValue(OVR_KEY_EYE_CUP, eyecup, 16)) { SetEyeCup(&renderInfo, eyecup); } switch ( hmdInfo.HmdType ) { case HmdType_None: case HmdType_DKProto: case HmdType_DK1: // Slight hack to improve usability. // If you have a DKHD-style lens profile enabled, // but you plug in DK1 and forget to change the profile, // obviously you don't want those lens numbers. if ( ( renderInfo.EyeCups != EyeCup_DK1A ) && ( renderInfo.EyeCups != EyeCup_DK1B ) && ( renderInfo.EyeCups != EyeCup_DK1C ) ) { renderInfo.EyeCups = EyeCup_DK1A; } break; case HmdType_DKHD2Proto: renderInfo.EyeCups = EyeCup_DKHD2A; break; case HmdType_CrystalCoveProto: renderInfo.EyeCups = EyeCup_PinkA; break; case HmdType_DK2: renderInfo.EyeCups = EyeCup_DK2A; break; default: break; } if ( eyeCupOverride != EyeCup_LAST ) { renderInfo.EyeCups = eyeCupOverride; } switch ( renderInfo.EyeCups ) { case EyeCup_DK1A: case EyeCup_DK1B: case EyeCup_DK1C: renderInfo.LensDiameterInMeters = 0.035f; renderInfo.LensSurfaceToMidplateInMeters = 0.02357f; // Not strictly lens-specific, but still wise to set a reasonable default for relief. renderInfo.EyeLeft.ReliefInMeters = 0.010f; renderInfo.EyeRight.ReliefInMeters = 0.010f; break; case EyeCup_DKHD2A: renderInfo.LensDiameterInMeters = 0.035f; renderInfo.LensSurfaceToMidplateInMeters = 0.02357f; // Not strictly lens-specific, but still wise to set a reasonable default for relief. renderInfo.EyeLeft.ReliefInMeters = 0.010f; renderInfo.EyeRight.ReliefInMeters = 0.010f; break; case EyeCup_PinkA: case EyeCup_DK2A: renderInfo.LensDiameterInMeters = 0.04f; // approximate renderInfo.LensSurfaceToMidplateInMeters = 0.01965f; // Not strictly lens-specific, but still wise to set a reasonable default for relief. renderInfo.EyeLeft.ReliefInMeters = 0.012f; renderInfo.EyeRight.ReliefInMeters = 0.012f; break; default: OVR_ASSERT ( false ); break; } Profile* def = ProfileManager::GetInstance()->GetDefaultProfile(hmdInfo.HmdType); // Set the eye position // Use the user profile value unless they have elected to use the defaults if (!profile->GetBoolValue(OVR_KEY_CUSTOM_EYE_RENDER, true)) profile = def; // use the default char user[32]; profile->GetValue(OVR_KEY_USER, user, 32); // for debugging purposes // TBD: Maybe we should separate custom camera positioning from custom distortion rendering ?? float eye2nose[2] = { OVR_DEFAULT_IPD / 2, OVR_DEFAULT_IPD / 2 }; if (profile->GetFloatValues(OVR_KEY_EYE_TO_NOSE_DISTANCE, eye2nose, 2) == 2) { renderInfo.EyeLeft.NoseToPupilInMeters = eye2nose[0]; renderInfo.EyeRight.NoseToPupilInMeters = eye2nose[1]; } else { // Legacy profiles may not include half-ipd, so use the regular IPD value instead float ipd = profile->GetFloatValue(OVR_KEY_IPD, OVR_DEFAULT_IPD); renderInfo.EyeLeft.NoseToPupilInMeters = 0.5f * ipd; renderInfo.EyeRight.NoseToPupilInMeters = 0.5f * ipd; } float eye2plate[2]; if ((profile->GetFloatValues(OVR_KEY_MAX_EYE_TO_PLATE_DISTANCE, eye2plate, 2) == 2) || (def->GetFloatValues(OVR_KEY_MAX_EYE_TO_PLATE_DISTANCE, eye2plate, 2) == 2)) { // Subtract the eye-cup height from the plate distance to get the eye-to-lens distance // This measurement should be the the distance at maximum dial setting // We still need to adjust with the dial offset renderInfo.EyeLeft.ReliefInMeters = eye2plate[0] - renderInfo.LensSurfaceToMidplateInMeters; renderInfo.EyeRight.ReliefInMeters = eye2plate[1] - renderInfo.LensSurfaceToMidplateInMeters; // Adjust the eye relief with the dial setting (from the assumed max eye relief) int dial = profile->GetIntValue(OVR_KEY_EYE_RELIEF_DIAL, OVR_DEFAULT_EYE_RELIEF_DIAL); renderInfo.EyeLeft.ReliefInMeters -= ((10 - dial) * 0.001f); renderInfo.EyeRight.ReliefInMeters -= ((10 - dial) * 0.001f); } else { // We shouldn't be here. The user or default profile should have the eye relief OVR_ASSERT(false); // Set the eye relief with the user configured dial setting //int dial = profile->GetIntValue(OVR_KEY_EYE_RELIEF_DIAL, OVR_DEFAULT_EYE_RELIEF_DIAL); // Assume a default of 7 to 17 mm eye relief based on the dial. This corresponds // to the sampled and tuned distortion range on the DK1. //renderInfo.EyeLeft.ReliefInMeters = 0.007f + (dial * 0.001f); //renderInfo.EyeRight.ReliefInMeters = 0.007f + (dial * 0.001f); } def->Release(); // Now we know where the eyes are relative to the lenses, we can compute a distortion for each. // TODO: incorporate lateral offset in distortion generation. // TODO: we used a distortion to calculate eye-relief, and now we're making a distortion from that eye-relief. Close the loop! for ( int eyeNum = 0; eyeNum < 2; eyeNum++ ) { HmdRenderInfo::EyeConfig *pHmdEyeConfig = ( eyeNum == 0 ) ? &(renderInfo.EyeLeft) : &(renderInfo.EyeRight); float eye_relief = pHmdEyeConfig->ReliefInMeters; LensConfig distortionConfig = GenerateLensConfigFromEyeRelief ( eye_relief, renderInfo, distortionType ); pHmdEyeConfig->Distortion = distortionConfig; } return renderInfo; } LensConfig GenerateLensConfigFromEyeRelief ( float eyeReliefInMeters, HmdRenderInfo const &hmd, DistortionEqnType distortionType /*= Distortion_CatmullRom10*/ ) { struct DistortionDescriptor { float EyeRelief; // The three places we're going to sample & lerp the curve at. // One sample is always at 0.0, and the distortion scale should be 1.0 or else! // Only use for poly4 numbers - CR has an implicit scale. float SampleRadius[3]; // Where the distortion has actually been measured/calibrated out to. // Don't try to hallucinate data out beyond here. float MaxRadius; // The config itself. LensConfig Config; }; static const int MaxDistortions = 10; DistortionDescriptor distortions[MaxDistortions]; for (int i = 0; i < MaxDistortions; i++) { distortions[i].EyeRelief = 0.0f; memset(distortions[i].SampleRadius, 0, sizeof(distortions[i].SampleRadius)); distortions[i].MaxRadius = 1.0f; distortions[i].Config.SetToIdentity(); // Note: This line causes a false Microsoft static analysis error -cat } int numDistortions = 0; int defaultDistortion = 0; // index of the default distortion curve to use if zero eye relief supplied if ( ( hmd.EyeCups == EyeCup_DK1A ) || ( hmd.EyeCups == EyeCup_DK1B ) || ( hmd.EyeCups == EyeCup_DK1C ) ) { numDistortions = 0; // Tuned at minimum dial setting - extended to r^2 == 1.8 distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; distortions[numDistortions].EyeRelief = 0.012760465f - 0.005f; distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.0425f; distortions[numDistortions].Config.K[0] = 1.0000f; distortions[numDistortions].Config.K[1] = 1.06505f; distortions[numDistortions].Config.K[2] = 1.14725f; distortions[numDistortions].Config.K[3] = 1.2705f; distortions[numDistortions].Config.K[4] = 1.48f; distortions[numDistortions].Config.K[5] = 1.87f; distortions[numDistortions].Config.K[6] = 2.534f; distortions[numDistortions].Config.K[7] = 3.6f; distortions[numDistortions].Config.K[8] = 5.1f; distortions[numDistortions].Config.K[9] = 7.4f; distortions[numDistortions].Config.K[10] = 11.0f; distortions[numDistortions].MaxRadius = sqrt(1.8f); defaultDistortion = numDistortions; // this is the default numDistortions++; // Tuned at middle dial setting distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; distortions[numDistortions].EyeRelief = 0.012760465f; // my average eye-relief distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.0425f; distortions[numDistortions].Config.K[0] = 1.0f; distortions[numDistortions].Config.K[1] = 1.032407264f; distortions[numDistortions].Config.K[2] = 1.07160462f; distortions[numDistortions].Config.K[3] = 1.11998388f; distortions[numDistortions].Config.K[4] = 1.1808606f; distortions[numDistortions].Config.K[5] = 1.2590494f; distortions[numDistortions].Config.K[6] = 1.361915f; distortions[numDistortions].Config.K[7] = 1.5014339f; distortions[numDistortions].Config.K[8] = 1.6986004f; distortions[numDistortions].Config.K[9] = 1.9940577f; distortions[numDistortions].Config.K[10] = 2.4783147f; distortions[numDistortions].MaxRadius = 1.0f; numDistortions++; // Tuned at maximum dial setting distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; distortions[numDistortions].EyeRelief = 0.012760465f + 0.005f; distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.0425f; distortions[numDistortions].Config.K[0] = 1.0102f; distortions[numDistortions].Config.K[1] = 1.0371f; distortions[numDistortions].Config.K[2] = 1.0831f; distortions[numDistortions].Config.K[3] = 1.1353f; distortions[numDistortions].Config.K[4] = 1.2f; distortions[numDistortions].Config.K[5] = 1.2851f; distortions[numDistortions].Config.K[6] = 1.3979f; distortions[numDistortions].Config.K[7] = 1.56f; distortions[numDistortions].Config.K[8] = 1.8f; distortions[numDistortions].Config.K[9] = 2.25f; distortions[numDistortions].Config.K[10] = 3.0f; distortions[numDistortions].MaxRadius = 1.0f; numDistortions++; // Chromatic aberration doesn't seem to change with eye relief. for ( int i = 0; i < numDistortions; i++ ) { distortions[i].Config.ChromaticAberration[0] = -0.006f; distortions[i].Config.ChromaticAberration[1] = 0.0f; distortions[i].Config.ChromaticAberration[2] = 0.014f; distortions[i].Config.ChromaticAberration[3] = 0.0f; } } else if ( hmd.EyeCups == EyeCup_DKHD2A ) { // Tuned DKHD2 lens numDistortions = 0; distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; distortions[numDistortions].EyeRelief = 0.010f; distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.0425f; distortions[numDistortions].Config.K[0] = 1.0f; distortions[numDistortions].Config.K[1] = 1.0425f; distortions[numDistortions].Config.K[2] = 1.0826f; distortions[numDistortions].Config.K[3] = 1.130f; distortions[numDistortions].Config.K[4] = 1.185f; distortions[numDistortions].Config.K[5] = 1.250f; distortions[numDistortions].Config.K[6] = 1.338f; distortions[numDistortions].Config.K[7] = 1.455f; distortions[numDistortions].Config.K[8] = 1.620f; distortions[numDistortions].Config.K[9] = 1.840f; distortions[numDistortions].Config.K[10] = 2.200f; distortions[numDistortions].MaxRadius = 1.0f; defaultDistortion = numDistortions; // this is the default numDistortions++; distortions[numDistortions] = distortions[0]; distortions[numDistortions].EyeRelief = 0.020f; numDistortions++; // Chromatic aberration doesn't seem to change with eye relief. for ( int i = 0; i < numDistortions; i++ ) { distortions[i].Config.ChromaticAberration[0] = -0.006f; distortions[i].Config.ChromaticAberration[1] = 0.0f; distortions[i].Config.ChromaticAberration[2] = 0.014f; distortions[i].Config.ChromaticAberration[3] = 0.0f; } } else if ( hmd.EyeCups == EyeCup_PinkA || hmd.EyeCups == EyeCup_DK2A ) { // Tuned Crystal Cove & DK2 Lens (CES & GDC) numDistortions = 0; distortions[numDistortions].EyeRelief = 0.008f; distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.036f; // TODO: Need to retune this distortion for minimum eye relief distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; distortions[numDistortions].Config.K[0] = 1.003f; distortions[numDistortions].Config.K[1] = 1.02f; distortions[numDistortions].Config.K[2] = 1.042f; distortions[numDistortions].Config.K[3] = 1.066f; distortions[numDistortions].Config.K[4] = 1.094f; distortions[numDistortions].Config.K[5] = 1.126f; distortions[numDistortions].Config.K[6] = 1.162f; distortions[numDistortions].Config.K[7] = 1.203f; distortions[numDistortions].Config.K[8] = 1.25f; distortions[numDistortions].Config.K[9] = 1.31f; distortions[numDistortions].Config.K[10] = 1.38f; distortions[numDistortions].MaxRadius = 1.0f; distortions[numDistortions].Config.ChromaticAberration[0] = -0.0112f; distortions[numDistortions].Config.ChromaticAberration[1] = -0.015f; distortions[numDistortions].Config.ChromaticAberration[2] = 0.0187f; distortions[numDistortions].Config.ChromaticAberration[3] = 0.015f; numDistortions++; distortions[numDistortions].EyeRelief = 0.018f; distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.036f; distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; distortions[numDistortions].Config.K[0] = 1.003f; distortions[numDistortions].Config.K[1] = 1.02f; distortions[numDistortions].Config.K[2] = 1.042f; distortions[numDistortions].Config.K[3] = 1.066f; distortions[numDistortions].Config.K[4] = 1.094f; distortions[numDistortions].Config.K[5] = 1.126f; distortions[numDistortions].Config.K[6] = 1.162f; distortions[numDistortions].Config.K[7] = 1.203f; distortions[numDistortions].Config.K[8] = 1.25f; distortions[numDistortions].Config.K[9] = 1.31f; distortions[numDistortions].Config.K[10] = 1.38f; distortions[numDistortions].MaxRadius = 1.0f; distortions[numDistortions].Config.ChromaticAberration[0] = -0.015f; distortions[numDistortions].Config.ChromaticAberration[1] = -0.02f; distortions[numDistortions].Config.ChromaticAberration[2] = 0.025f; distortions[numDistortions].Config.ChromaticAberration[3] = 0.02f; defaultDistortion = numDistortions; // this is the default numDistortions++; /* // Orange Lens on DK2 distortions[numDistortions].EyeRelief = 0.010f; distortions[numDistortions].Config.MetersPerTanAngleAtCenter = 0.031f; distortions[numDistortions].Config.Eqn = Distortion_CatmullRom10; distortions[numDistortions].Config.K[0] = 1.00f; distortions[numDistortions].Config.K[1] = 1.0169f; distortions[numDistortions].Config.K[2] = 1.0378f; distortions[numDistortions].Config.K[3] = 1.0648f; distortions[numDistortions].Config.K[4] = 1.0990f; distortions[numDistortions].Config.K[5] = 1.141f; distortions[numDistortions].Config.K[6] = 1.192f; distortions[numDistortions].Config.K[7] = 1.255f; distortions[numDistortions].Config.K[8] = 1.335f; distortions[numDistortions].Config.K[9] = 1.435f; distortions[numDistortions].Config.K[10] = 1.56f; distortions[numDistortions].MaxRadius = 1.0f; */ } else { // Unknown lens. // Use DK1 black lens settings, just so we can continue to run with something. distortions[0].EyeRelief = 0.005f; distortions[0].Config.MetersPerTanAngleAtCenter = 0.043875f; distortions[0].Config.Eqn = Distortion_RecipPoly4; distortions[0].Config.K[0] = 1.0f; distortions[0].Config.K[1] = -0.3999f; distortions[0].Config.K[2] = 0.2408f; distortions[0].Config.K[3] = -0.4589f; distortions[0].SampleRadius[0] = 0.2f; distortions[0].SampleRadius[1] = 0.4f; distortions[0].SampleRadius[2] = 0.6f; distortions[1] = distortions[0]; distortions[1].EyeRelief = 0.010f; numDistortions = 2; // Chromatic aberration doesn't seem to change with eye relief. for ( int i = 0; i < numDistortions; i++ ) { // These are placeholder, they have not been tuned! distortions[i].Config.ChromaticAberration[0] = 0.0f; distortions[i].Config.ChromaticAberration[1] = 0.0f; distortions[i].Config.ChromaticAberration[2] = 0.0f; distortions[i].Config.ChromaticAberration[3] = 0.0f; } } OVR_ASSERT(numDistortions < MaxDistortions); DistortionDescriptor *pUpper = NULL; DistortionDescriptor *pLower = NULL; float lerpVal = 0.0f; if (eyeReliefInMeters == 0) { // Use a constant default distortion if an invalid eye-relief is supplied pLower = &(distortions[defaultDistortion]); pUpper = &(distortions[defaultDistortion]); lerpVal = 0.0f; } else { for ( int i = 0; i < numDistortions-1; i++ ) { OVR_ASSERT ( distortions[i].EyeRelief < distortions[i+1].EyeRelief ); if ( ( distortions[i].EyeRelief <= eyeReliefInMeters ) && ( distortions[i+1].EyeRelief > eyeReliefInMeters ) ) { pLower = &(distortions[i]); pUpper = &(distortions[i+1]); lerpVal = ( eyeReliefInMeters - pLower->EyeRelief ) / ( pUpper->EyeRelief - pLower->EyeRelief ); // No break here - I want the ASSERT to check everything every time! } } } if ( pUpper == NULL ) { #if 0 // Outside the range, so extrapolate rather than interpolate. if ( distortions[0].EyeRelief > eyeReliefInMeters ) { pLower = &(distortions[0]); pUpper = &(distortions[1]); } else { OVR_ASSERT ( distortions[numDistortions-1].EyeRelief <= eyeReliefInMeters ); pLower = &(distortions[numDistortions-2]); pUpper = &(distortions[numDistortions-1]); } lerpVal = ( eyeReliefInMeters - pLower->EyeRelief ) / ( pUpper->EyeRelief - pLower->EyeRelief ); #else // Do not extrapolate, just clamp - slightly worried about people putting in bogus settings. if ( distortions[0].EyeRelief > eyeReliefInMeters ) { pLower = &(distortions[0]); pUpper = &(distortions[0]); } else { OVR_ASSERT ( distortions[numDistortions-1].EyeRelief <= eyeReliefInMeters ); pLower = &(distortions[numDistortions-1]); pUpper = &(distortions[numDistortions-1]); } lerpVal = 0.0f; #endif } float invLerpVal = 1.0f - lerpVal; pLower->Config.MaxR = pLower->MaxRadius; pUpper->Config.MaxR = pUpper->MaxRadius; LensConfig result; // Where is the edge of the lens - no point modelling further than this. float maxValidRadius = invLerpVal * pLower->MaxRadius + lerpVal * pUpper->MaxRadius; result.MaxR = maxValidRadius; switch ( distortionType ) { case Distortion_Poly4: // Deprecated OVR_ASSERT ( false ); break; case Distortion_RecipPoly4:{ // Lerp control points and fit an equation to them. float fitX[4]; float fitY[4]; fitX[0] = 0.0f; fitY[0] = 1.0f; for ( int ctrlPt = 1; ctrlPt < 4; ctrlPt ++ ) { // SampleRadius is not valid for Distortion_RecipPoly4 types. float radiusLerp = ( invLerpVal * pLower->MaxRadius + lerpVal * pUpper->MaxRadius ) * ( (float)ctrlPt / 4.0f ); float radiusLerpSq = radiusLerp * radiusLerp; float fitYLower = pLower->Config.DistortionFnScaleRadiusSquared ( radiusLerpSq ); float fitYUpper = pUpper->Config.DistortionFnScaleRadiusSquared ( radiusLerpSq ); fitX[ctrlPt] = radiusLerpSq; fitY[ctrlPt] = 1.0f / ( invLerpVal * fitYLower + lerpVal * fitYUpper ); } result.Eqn = Distortion_RecipPoly4; bool bSuccess = FitCubicPolynomial ( result.K, fitX, fitY ); OVR_ASSERT ( bSuccess ); OVR_UNUSED ( bSuccess ); // Set up the fast inverse. float maxRDist = result.DistortionFn ( maxValidRadius ); result.MaxInvR = maxRDist; result.SetUpInverseApprox(); }break; case Distortion_CatmullRom10:{ // Evenly sample & lerp points on the curve. const int NumSegments = LensConfig::NumCoefficients; result.MaxR = maxValidRadius; // Directly interpolate the K0 values result.K[0] = invLerpVal * pLower->Config.K[0] + lerpVal * pUpper->Config.K[0]; // Sample and interpolate the distortion curves to derive K[1] ... K[n] for ( int ctrlPt = 1; ctrlPt < NumSegments; ctrlPt++ ) { float radiusSq = ( (float)ctrlPt / (float)(NumSegments-1) ) * maxValidRadius * maxValidRadius; float fitYLower = pLower->Config.DistortionFnScaleRadiusSquared ( radiusSq ); float fitYUpper = pUpper->Config.DistortionFnScaleRadiusSquared ( radiusSq ); float fitLerp = invLerpVal * fitYLower + lerpVal * fitYUpper; result.K[ctrlPt] = fitLerp; } result.Eqn = Distortion_CatmullRom10; for ( int ctrlPt = 1; ctrlPt < NumSegments; ctrlPt++ ) { float radiusSq = ( (float)ctrlPt / (float)(NumSegments-1) ) * maxValidRadius * maxValidRadius; float val = result.DistortionFnScaleRadiusSquared ( radiusSq ); OVR_ASSERT ( Alg::Abs ( val - result.K[ctrlPt] ) < 0.0001f ); OVR_UNUSED1(val); // For release build. } // Set up the fast inverse. float maxRDist = result.DistortionFn ( maxValidRadius ); result.MaxInvR = maxRDist; result.SetUpInverseApprox(); }break; default: OVR_ASSERT ( false ); break; } // Chromatic aberration. result.ChromaticAberration[0] = invLerpVal * pLower->Config.ChromaticAberration[0] + lerpVal * pUpper->Config.ChromaticAberration[0]; result.ChromaticAberration[1] = invLerpVal * pLower->Config.ChromaticAberration[1] + lerpVal * pUpper->Config.ChromaticAberration[1]; result.ChromaticAberration[2] = invLerpVal * pLower->Config.ChromaticAberration[2] + lerpVal * pUpper->Config.ChromaticAberration[2]; result.ChromaticAberration[3] = invLerpVal * pLower->Config.ChromaticAberration[3] + lerpVal * pUpper->Config.ChromaticAberration[3]; // Scale. result.MetersPerTanAngleAtCenter = pLower->Config.MetersPerTanAngleAtCenter * invLerpVal + pUpper->Config.MetersPerTanAngleAtCenter * lerpVal; /* // Commented out - Causes ASSERT with no HMD plugged in #ifdef OVR_BUILD_DEBUG if ( distortionType == Distortion_CatmullRom10 ) { TestSaveLoadLensConfig ( result ); } #endif */ return result; } DistortionRenderDesc CalculateDistortionRenderDesc ( StereoEye eyeType, HmdRenderInfo const &hmd, const LensConfig *pLensOverride /*= NULL */ ) { // From eye relief, IPD and device characteristics, we get the distortion mapping. // This distortion does the following things: // 1. It undoes the distortion that happens at the edges of the lens. // 2. It maps the undistorted field into "retina" space. // So the input is a pixel coordinate - the physical pixel on the display itself. // The output is the real-world direction of the ray from this pixel as it comes out of the lens and hits the eye. // However we typically think of rays "coming from" the eye, so the direction (TanAngleX,TanAngleY,1) is the direction // that the pixel appears to be in real-world space, where AngleX and AngleY are relative to the straight-ahead vector. // If your renderer is a raytracer, you can use this vector directly (normalize as appropriate). // However in standard rasterisers, we have rendered a 2D image and are putting it in front of the eye, // so we then need a mapping from this space to the [-1,1] UV coordinate space, which depends on exactly // where "in space" the app wants to put that rendertarget. // Where in space, and how large this rendertarget is, is completely up to the app and/or user, // though of course we can provide some useful hints. // TODO: Use IPD and eye relief to modify distortion (i.e. non-radial component) // TODO: cope with lenses that don't produce collimated light. // This means that IPD relative to the lens separation changes the light vergence, // and so we actually need to change where the image is displayed. const HmdRenderInfo::EyeConfig &hmdEyeConfig = ( eyeType == StereoEye_Left ) ? hmd.EyeLeft : hmd.EyeRight; DistortionRenderDesc localDistortion; localDistortion.Lens = hmdEyeConfig.Distortion; if ( pLensOverride != NULL ) { localDistortion.Lens = *pLensOverride; } Sizef pixelsPerMeter(hmd.ResolutionInPixels.w / ( hmd.ScreenSizeInMeters.w - hmd.ScreenGapSizeInMeters ), hmd.ResolutionInPixels.h / hmd.ScreenSizeInMeters.h); localDistortion.PixelsPerTanAngleAtCenter = (pixelsPerMeter * localDistortion.Lens.MetersPerTanAngleAtCenter).ToVector(); // Same thing, scaled to [-1,1] for each eye, rather than pixels. localDistortion.TanEyeAngleScale = Vector2f(0.25f, 0.5f).EntrywiseMultiply( (hmd.ScreenSizeInMeters / localDistortion.Lens.MetersPerTanAngleAtCenter).ToVector()); // <--------------left eye------------------><-ScreenGapSizeInMeters-><--------------right eye-----------------> // <------------------------------------------ScreenSizeInMeters.Width-----------------------------------------> // <----------------LensSeparationInMeters---------------> // <--centerFromLeftInMeters-> // ^ // Center of lens // Find the lens centers in scale of [-1,+1] (NDC) in left eye. float visibleWidthOfOneEye = 0.5f * ( hmd.ScreenSizeInMeters.w - hmd.ScreenGapSizeInMeters ); float centerFromLeftInMeters = ( hmd.ScreenSizeInMeters.w - hmd.LensSeparationInMeters ) * 0.5f; localDistortion.LensCenter.x = ( centerFromLeftInMeters / visibleWidthOfOneEye ) * 2.0f - 1.0f; localDistortion.LensCenter.y = ( hmd.CenterFromTopInMeters / hmd.ScreenSizeInMeters.h ) * 2.0f - 1.0f; if ( eyeType == StereoEye_Right ) { localDistortion.LensCenter.x = -localDistortion.LensCenter.x; } return localDistortion; } FovPort CalculateFovFromEyePosition ( float eyeReliefInMeters, float offsetToRightInMeters, float offsetDownwardsInMeters, float lensDiameterInMeters, float extraEyeRotationInRadians /*= 0.0f*/ ) { // 2D view of things: // |-| <--- offsetToRightInMeters (in this case, it is negative) // |=======C=======| <--- lens surface (C=center) // \ | _/ // \ R _/ // \ | _/ // \ | _/ // \|/ // O <--- center of pupil // (technically the lens is round rather than square, so it's not correct to // separate vertical and horizontal like this, but it's close enough) float halfLensDiameter = lensDiameterInMeters * 0.5f; FovPort fovPort; fovPort.UpTan = ( halfLensDiameter + offsetDownwardsInMeters ) / eyeReliefInMeters; fovPort.DownTan = ( halfLensDiameter - offsetDownwardsInMeters ) / eyeReliefInMeters; fovPort.LeftTan = ( halfLensDiameter + offsetToRightInMeters ) / eyeReliefInMeters; fovPort.RightTan = ( halfLensDiameter - offsetToRightInMeters ) / eyeReliefInMeters; if ( extraEyeRotationInRadians > 0.0f ) { // That's the basic looking-straight-ahead eye position relative to the lens. // But if you look left, the pupil moves left as the eyeball rotates, which // means you can see more to the right than this geometry suggests. // So add in the bounds for the extra movement of the pupil. // Beyond 30 degrees does not increase FOV because the pupil starts moving backwards more than sideways. extraEyeRotationInRadians = Alg::Min ( DegreeToRad ( 30.0f ), Alg::Max ( 0.0f, extraEyeRotationInRadians ) ); // The rotation of the eye is a bit more complex than a simple circle. The center of rotation // at 13.5mm from cornea is slightly further back than the actual center of the eye. // Additionally the rotation contains a small lateral component as the muscles pull the eye const float eyeballCenterToPupil = 0.0135f; // center of eye rotation const float eyeballLateralPull = 0.001f * (extraEyeRotationInRadians / DegreeToRad ( 30.0f)); // lateral motion as linear function float extraTranslation = eyeballCenterToPupil * sinf ( extraEyeRotationInRadians ) + eyeballLateralPull; float extraRelief = eyeballCenterToPupil * ( 1.0f - cosf ( extraEyeRotationInRadians ) ); fovPort.UpTan = Alg::Max ( fovPort.UpTan , ( halfLensDiameter + offsetDownwardsInMeters + extraTranslation ) / ( eyeReliefInMeters + extraRelief ) ); fovPort.DownTan = Alg::Max ( fovPort.DownTan , ( halfLensDiameter - offsetDownwardsInMeters + extraTranslation ) / ( eyeReliefInMeters + extraRelief ) ); fovPort.LeftTan = Alg::Max ( fovPort.LeftTan , ( halfLensDiameter + offsetToRightInMeters + extraTranslation ) / ( eyeReliefInMeters + extraRelief ) ); fovPort.RightTan = Alg::Max ( fovPort.RightTan, ( halfLensDiameter - offsetToRightInMeters + extraTranslation ) / ( eyeReliefInMeters + extraRelief ) ); } return fovPort; } FovPort CalculateFovFromHmdInfo ( StereoEye eyeType, DistortionRenderDesc const &distortion, HmdRenderInfo const &hmd, float extraEyeRotationInRadians /*= 0.0f*/ ) { FovPort fovPort; float eyeReliefInMeters; float offsetToRightInMeters; if ( eyeType == StereoEye_Right ) { eyeReliefInMeters = hmd.EyeRight.ReliefInMeters; offsetToRightInMeters = hmd.EyeRight.NoseToPupilInMeters - 0.5f * hmd.LensSeparationInMeters; } else { eyeReliefInMeters = hmd.EyeLeft.ReliefInMeters; offsetToRightInMeters = -(hmd.EyeLeft.NoseToPupilInMeters - 0.5f * hmd.LensSeparationInMeters); } // Limit the eye-relief to 6 mm for FOV calculations since this just tends to spread off-screen // and get clamped anyways on DK1 (but in Unity it continues to spreads and causes // unnecessarily large render targets) eyeReliefInMeters = Alg::Max(eyeReliefInMeters, 0.006f); // Central view. fovPort = CalculateFovFromEyePosition ( eyeReliefInMeters, offsetToRightInMeters, 0.0f, hmd.LensDiameterInMeters, extraEyeRotationInRadians ); // clamp to the screen fovPort = ClampToPhysicalScreenFov ( eyeType, distortion, fovPort ); return fovPort; } FovPort GetPhysicalScreenFov ( StereoEye eyeType, DistortionRenderDesc const &distortion ) { OVR_UNUSED1 ( eyeType ); FovPort resultFovPort; // Figure out the boundaries of the screen. We take the middle pixel of the screen, // move to each of the four screen edges, and transform those back into TanAngle space. Vector2f dmiddle = distortion.LensCenter; // The gotcha is that for some distortion functions, the map will "wrap around" // for screen pixels that are not actually visible to the user (especially on DK1, // which has a lot of invisible pixels), and map to pixels that are close to the middle. // This means the edges of the screen will actually be // "closer" than the visible bounds, so we'll clip too aggressively. // Solution - step gradually towards the boundary, noting the maximum distance. struct FunctionHider { static FovPort FindRange ( Vector2f from, Vector2f to, int numSteps, DistortionRenderDesc const &distortionL ) { FovPort result; result.UpTan = 0.0f; result.DownTan = 0.0f; result.LeftTan = 0.0f; result.RightTan = 0.0f; float stepScale = 1.0f / ( numSteps - 1 ); for ( int step = 0; step < numSteps; step++ ) { float lerpFactor = stepScale * (float)step; Vector2f sample = from + (to - from) * lerpFactor; Vector2f tanEyeAngle = TransformScreenNDCToTanFovSpace ( distortionL, sample ); result.LeftTan = Alg::Max ( result.LeftTan, -tanEyeAngle.x ); result.RightTan = Alg::Max ( result.RightTan, tanEyeAngle.x ); result.UpTan = Alg::Max ( result.UpTan, -tanEyeAngle.y ); result.DownTan = Alg::Max ( result.DownTan, tanEyeAngle.y ); } return result; } }; FovPort leftFovPort = FunctionHider::FindRange( dmiddle, Vector2f( -1.0f, dmiddle.y ), 10, distortion ); FovPort rightFovPort = FunctionHider::FindRange( dmiddle, Vector2f( 1.0f, dmiddle.y ), 10, distortion ); FovPort upFovPort = FunctionHider::FindRange( dmiddle, Vector2f( dmiddle.x, -1.0f ), 10, distortion ); FovPort downFovPort = FunctionHider::FindRange( dmiddle, Vector2f( dmiddle.x, 1.0f ), 10, distortion ); resultFovPort.LeftTan = leftFovPort.LeftTan; resultFovPort.RightTan = rightFovPort.RightTan; resultFovPort.UpTan = upFovPort.UpTan; resultFovPort.DownTan = downFovPort.DownTan; return resultFovPort; } FovPort ClampToPhysicalScreenFov( StereoEye eyeType, DistortionRenderDesc const &distortion, FovPort inputFovPort ) { FovPort resultFovPort; FovPort phsyicalFovPort = GetPhysicalScreenFov ( eyeType, distortion ); resultFovPort.LeftTan = Alg::Min ( inputFovPort.LeftTan, phsyicalFovPort.LeftTan ); resultFovPort.RightTan = Alg::Min ( inputFovPort.RightTan, phsyicalFovPort.RightTan ); resultFovPort.UpTan = Alg::Min ( inputFovPort.UpTan, phsyicalFovPort.UpTan ); resultFovPort.DownTan = Alg::Min ( inputFovPort.DownTan, phsyicalFovPort.DownTan ); return resultFovPort; } Sizei CalculateIdealPixelSize ( StereoEye eyeType, DistortionRenderDesc const &distortion, FovPort tanHalfFov, float pixelsPerDisplayPixel ) { OVR_UNUSED(eyeType); // might be useful in the future if we do overlapping fovs Sizei result; // TODO: if the app passes in a FOV that doesn't cover the centre, use the distortion values for the nearest edge/corner to match pixel size. result.w = (int)(0.5f + pixelsPerDisplayPixel * distortion.PixelsPerTanAngleAtCenter.x * ( tanHalfFov.LeftTan + tanHalfFov.RightTan ) ); result.h = (int)(0.5f + pixelsPerDisplayPixel * distortion.PixelsPerTanAngleAtCenter.y * ( tanHalfFov.UpTan + tanHalfFov.DownTan ) ); return result; } Recti GetFramebufferViewport ( StereoEye eyeType, HmdRenderInfo const &hmd ) { Recti result; result.w = hmd.ResolutionInPixels.w/2; result.h = hmd.ResolutionInPixels.h; result.x = 0; result.y = 0; if ( eyeType == StereoEye_Right ) { result.x = (hmd.ResolutionInPixels.w+1)/2; // Round up, not down. } return result; } ScaleAndOffset2D CreateNDCScaleAndOffsetFromFov ( FovPort tanHalfFov ) { float projXScale = 2.0f / ( tanHalfFov.LeftTan + tanHalfFov.RightTan ); float projXOffset = ( tanHalfFov.LeftTan - tanHalfFov.RightTan ) * projXScale * 0.5f; float projYScale = 2.0f / ( tanHalfFov.UpTan + tanHalfFov.DownTan ); float projYOffset = ( tanHalfFov.UpTan - tanHalfFov.DownTan ) * projYScale * 0.5f; ScaleAndOffset2D result; result.Scale = Vector2f(projXScale, projYScale); result.Offset = Vector2f(projXOffset, projYOffset); // Hey - why is that Y.Offset negated? // It's because a projection matrix transforms from world coords with Y=up, // whereas this is from NDC which is Y=down. return result; } ScaleAndOffset2D CreateUVScaleAndOffsetfromNDCScaleandOffset ( ScaleAndOffset2D scaleAndOffsetNDC, Recti renderedViewport, Sizei renderTargetSize ) { // scaleAndOffsetNDC takes you to NDC space [-1,+1] within the given viewport on the rendertarget. // We want a scale to instead go to actual UV coordinates you can sample with, // which need [0,1] and ignore the viewport. ScaleAndOffset2D result; // Scale [-1,1] to [0,1] result.Scale = scaleAndOffsetNDC.Scale * 0.5f; result.Offset = scaleAndOffsetNDC.Offset * 0.5f + Vector2f(0.5f); // ...but we will have rendered to a subsection of the RT, so scale for that. Vector2f scale( (float)renderedViewport.w / (float)renderTargetSize.w, (float)renderedViewport.h / (float)renderTargetSize.h ); Vector2f offset( (float)renderedViewport.x / (float)renderTargetSize.w, (float)renderedViewport.y / (float)renderTargetSize.h ); result.Scale = result.Scale.EntrywiseMultiply(scale); result.Offset = result.Offset.EntrywiseMultiply(scale) + offset; return result; } Matrix4f CreateProjection( bool rightHanded, FovPort tanHalfFov, float zNear /*= 0.01f*/, float zFar /*= 10000.0f*/ ) { // A projection matrix is very like a scaling from NDC, so we can start with that. ScaleAndOffset2D scaleAndOffset = CreateNDCScaleAndOffsetFromFov ( tanHalfFov ); float handednessScale = 1.0f; if ( rightHanded ) { handednessScale = -1.0f; } Matrix4f projection; // Produces X result, mapping clip edges to [-w,+w] projection.M[0][0] = scaleAndOffset.Scale.x; projection.M[0][1] = 0.0f; projection.M[0][2] = handednessScale * scaleAndOffset.Offset.x; projection.M[0][3] = 0.0f; // Produces Y result, mapping clip edges to [-w,+w] // Hey - why is that YOffset negated? // It's because a projection matrix transforms from world coords with Y=up, // whereas this is derived from an NDC scaling, which is Y=down. projection.M[1][0] = 0.0f; projection.M[1][1] = scaleAndOffset.Scale.y; projection.M[1][2] = handednessScale * -scaleAndOffset.Offset.y; projection.M[1][3] = 0.0f; // Produces Z-buffer result - app needs to fill this in with whatever Z range it wants. // We'll just use some defaults for now. projection.M[2][0] = 0.0f; projection.M[2][1] = 0.0f; projection.M[2][2] = -handednessScale * zFar / (zNear - zFar); projection.M[2][3] = (zFar * zNear) / (zNear - zFar); // Produces W result (= Z in) projection.M[3][0] = 0.0f; projection.M[3][1] = 0.0f; projection.M[3][2] = handednessScale; projection.M[3][3] = 0.0f; return projection; } Matrix4f CreateOrthoSubProjection ( bool rightHanded, StereoEye eyeType, float tanHalfFovX, float tanHalfFovY, float unitsX, float unitsY, float distanceFromCamera, float interpupillaryDistance, Matrix4f const &projection, float zNear /*= 0.0f*/, float zFar /*= 0.0f*/ ) { OVR_UNUSED1 ( rightHanded ); float orthoHorizontalOffset = interpupillaryDistance * 0.5f / distanceFromCamera; switch ( eyeType ) { case StereoEye_Center: orthoHorizontalOffset = 0.0f; break; case StereoEye_Left: break; case StereoEye_Right: orthoHorizontalOffset = -orthoHorizontalOffset; break; default: OVR_ASSERT ( false ); break; } // Current projection maps real-world vector (x,y,1) to the RT. // We want to find the projection that maps the range [-FovPixels/2,FovPixels/2] to // the physical [-orthoHalfFov,orthoHalfFov] // Note moving the offset from M[0][2]+M[1][2] to M[0][3]+M[1][3] - this means // we don't have to feed in Z=1 all the time. // The horizontal offset math is a little hinky because the destination is // actually [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset] // So we need to first map [-FovPixels/2,FovPixels/2] to // [-orthoHalfFov+orthoHorizontalOffset,orthoHalfFov+orthoHorizontalOffset]: // x1 = x0 * orthoHalfFov/(FovPixels/2) + orthoHorizontalOffset; // = x0 * 2*orthoHalfFov/FovPixels + orthoHorizontalOffset; // But then we need the sam mapping as the existing projection matrix, i.e. // x2 = x1 * Projection.M[0][0] + Projection.M[0][2]; // = x0 * (2*orthoHalfFov/FovPixels + orthoHorizontalOffset) * Projection.M[0][0] + Projection.M[0][2]; // = x0 * Projection.M[0][0]*2*orthoHalfFov/FovPixels + // orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2]; // So in the new projection matrix we need to scale by Projection.M[0][0]*2*orthoHalfFov/FovPixels and // offset by orthoHorizontalOffset*Projection.M[0][0] + Projection.M[0][2]. float orthoScaleX = 2.0f * tanHalfFovX / unitsX; float orthoScaleY = 2.0f * tanHalfFovY / unitsY; Matrix4f ortho; ortho.M[0][0] = projection.M[0][0] * orthoScaleX; ortho.M[0][1] = 0.0f; ortho.M[0][2] = 0.0f; ortho.M[0][3] = -projection.M[0][2] + ( orthoHorizontalOffset * projection.M[0][0] ); ortho.M[1][0] = 0.0f; ortho.M[1][1] = -projection.M[1][1] * orthoScaleY; // Note sign flip (text rendering uses Y=down). ortho.M[1][2] = 0.0f; ortho.M[1][3] = -projection.M[1][2]; if ( fabsf ( zNear - zFar ) < 0.001f ) { ortho.M[2][0] = 0.0f; ortho.M[2][1] = 0.0f; ortho.M[2][2] = 0.0f; ortho.M[2][3] = zFar; } else { ortho.M[2][0] = 0.0f; ortho.M[2][1] = 0.0f; ortho.M[2][2] = zFar / (zNear - zFar); ortho.M[2][3] = (zFar * zNear) / (zNear - zFar); } // No perspective correction for ortho. ortho.M[3][0] = 0.0f; ortho.M[3][1] = 0.0f; ortho.M[3][2] = 0.0f; ortho.M[3][3] = 1.0f; return ortho; } //----------------------------------------------------------------------------------- // A set of "forward-mapping" functions, mapping from framebuffer space to real-world and/or texture space. // This mimics the first half of the distortion shader's function. Vector2f TransformScreenNDCToTanFovSpace( DistortionRenderDesc const &distortion, const Vector2f &framebufferNDC ) { // Scale to TanHalfFov space, but still distorted. Vector2f tanEyeAngleDistorted; tanEyeAngleDistorted.x = ( framebufferNDC.x - distortion.LensCenter.x ) * distortion.TanEyeAngleScale.x; tanEyeAngleDistorted.y = ( framebufferNDC.y - distortion.LensCenter.y ) * distortion.TanEyeAngleScale.y; // Distort. float radiusSquared = ( tanEyeAngleDistorted.x * tanEyeAngleDistorted.x ) + ( tanEyeAngleDistorted.y * tanEyeAngleDistorted.y ); float distortionScale = distortion.Lens.DistortionFnScaleRadiusSquared ( radiusSquared ); Vector2f tanEyeAngle; tanEyeAngle.x = tanEyeAngleDistorted.x * distortionScale; tanEyeAngle.y = tanEyeAngleDistorted.y * distortionScale; return tanEyeAngle; } // Same, with chromatic aberration correction. void TransformScreenNDCToTanFovSpaceChroma ( Vector2f *resultR, Vector2f *resultG, Vector2f *resultB, DistortionRenderDesc const &distortion, const Vector2f &framebufferNDC ) { // Scale to TanHalfFov space, but still distorted. Vector2f tanEyeAngleDistorted; tanEyeAngleDistorted.x = ( framebufferNDC.x - distortion.LensCenter.x ) * distortion.TanEyeAngleScale.x; tanEyeAngleDistorted.y = ( framebufferNDC.y - distortion.LensCenter.y ) * distortion.TanEyeAngleScale.y; // Distort. float radiusSquared = ( tanEyeAngleDistorted.x * tanEyeAngleDistorted.x ) + ( tanEyeAngleDistorted.y * tanEyeAngleDistorted.y ); Vector3f distortionScales = distortion.Lens.DistortionFnScaleRadiusSquaredChroma ( radiusSquared ); *resultR = tanEyeAngleDistorted * distortionScales.x; *resultG = tanEyeAngleDistorted * distortionScales.y; *resultB = tanEyeAngleDistorted * distortionScales.z; } // This mimics the second half of the distortion shader's function. Vector2f TransformTanFovSpaceToRendertargetTexUV( ScaleAndOffset2D const &eyeToSourceUV, Vector2f const &tanEyeAngle ) { Vector2f textureUV; textureUV.x = tanEyeAngle.x * eyeToSourceUV.Scale.x + eyeToSourceUV.Offset.x; textureUV.y = tanEyeAngle.y * eyeToSourceUV.Scale.y + eyeToSourceUV.Offset.y; return textureUV; } Vector2f TransformTanFovSpaceToRendertargetNDC( ScaleAndOffset2D const &eyeToSourceNDC, Vector2f const &tanEyeAngle ) { Vector2f textureNDC; textureNDC.x = tanEyeAngle.x * eyeToSourceNDC.Scale.x + eyeToSourceNDC.Offset.x; textureNDC.y = tanEyeAngle.y * eyeToSourceNDC.Scale.y + eyeToSourceNDC.Offset.y; return textureNDC; } Vector2f TransformScreenPixelToScreenNDC( Recti const &distortionViewport, Vector2f const &pixel ) { // Move to [-1,1] NDC coords. Vector2f framebufferNDC; framebufferNDC.x = -1.0f + 2.0f * ( ( pixel.x - (float)distortionViewport.x ) / (float)distortionViewport.w ); framebufferNDC.y = -1.0f + 2.0f * ( ( pixel.y - (float)distortionViewport.y ) / (float)distortionViewport.h ); return framebufferNDC; } Vector2f TransformScreenPixelToTanFovSpace( Recti const &distortionViewport, DistortionRenderDesc const &distortion, Vector2f const &pixel ) { return TransformScreenNDCToTanFovSpace( distortion, TransformScreenPixelToScreenNDC( distortionViewport, pixel ) ); } Vector2f TransformScreenNDCToRendertargetTexUV( DistortionRenderDesc const &distortion, StereoEyeParams const &eyeParams, Vector2f const &pixel ) { return TransformTanFovSpaceToRendertargetTexUV ( eyeParams, TransformScreenNDCToTanFovSpace ( distortion, pixel ) ); } Vector2f TransformScreenPixelToRendertargetTexUV( Recti const &distortionViewport, DistortionRenderDesc const &distortion, StereoEyeParams const &eyeParams, Vector2f const &pixel ) { return TransformTanFovSpaceToRendertargetTexUV ( eyeParams, TransformScreenPixelToTanFovSpace ( distortionViewport, distortion, pixel ) ); } //----------------------------------------------------------------------------------- // A set of "reverse-mapping" functions, mapping from real-world and/or texture space back to the framebuffer. Vector2f TransformTanFovSpaceToScreenNDC( DistortionRenderDesc const &distortion, const Vector2f &tanEyeAngle, bool usePolyApprox /*= false*/ ) { float tanEyeAngleRadius = tanEyeAngle.Length(); float tanEyeAngleDistortedRadius = distortion.Lens.DistortionFnInverseApprox ( tanEyeAngleRadius ); if ( !usePolyApprox ) { tanEyeAngleDistortedRadius = distortion.Lens.DistortionFnInverse ( tanEyeAngleRadius ); } Vector2f tanEyeAngleDistorted = tanEyeAngle; if ( tanEyeAngleRadius > 0.0f ) { tanEyeAngleDistorted = tanEyeAngle * ( tanEyeAngleDistortedRadius / tanEyeAngleRadius ); } Vector2f framebufferNDC; framebufferNDC.x = ( tanEyeAngleDistorted.x / distortion.TanEyeAngleScale.x ) + distortion.LensCenter.x; framebufferNDC.y = ( tanEyeAngleDistorted.y / distortion.TanEyeAngleScale.y ) + distortion.LensCenter.y; return framebufferNDC; } Vector2f TransformRendertargetNDCToTanFovSpace( const ScaleAndOffset2D &eyeToSourceNDC, const Vector2f &textureNDC ) { Vector2f tanEyeAngle = (textureNDC - eyeToSourceNDC.Offset) / eyeToSourceNDC.Scale; return tanEyeAngle; } } //namespace OVR //Just want to make a copy disentangled from all these namespaces! float ExtEvalCatmullRom10Spline ( float const *K, float scaledVal ) { return(OVR::EvalCatmullRom10Spline ( K, scaledVal )); }
{ "pile_set_name": "Github" }
import React from "react" import { EditorInterface } from "./EditorInterface" import MonacoEditor from "./MonacoEditor" type Props = EditorInterface type State = { value: string } const monacoOptions = { minimap: { enabled: false } } export class ProgramEditor extends React.Component<Props, State> { constructor(props: any) { super(props) this.state = { value: props.initialValue } } render() { const { fontScale, onChange, theme } = this.props const { language, ...options } = this.props.options || { language: "javascript" } const { value } = this.state return ( <MonacoEditor width="100%" height="100%" language={language || "javascript"} value={value} options={{ ...monacoOptions, fontSize: fontScale * 12, theme: theme === "dark" ? "vs-dark" : null, tabSize: 2, insertSpaces: true, ...options }} onChange={(newValue: string, e: Event) => { this.setState({ value: newValue }) onChange(this.state.value) }} editorDidMount={(editor: any, _monaco: any) => { editor.focus() }} /> ) } }
{ "pile_set_name": "Github" }
namespace SmartHunter.Game.Config { public class DebugConfig { public bool UseInternalSkin = false; public bool UseSampleData = false; public bool TraceUniquePointers = false; public bool ShowWeirdRemovableParts = false; } }
{ "pile_set_name": "Github" }
/* * Copyright (c) 2013, Regents of the University of California * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * 1. Redistributions of source code must retain the above copyright notice, this * list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package edu.uci.python.test.datatype; import static edu.uci.python.test.PythonTests.*; import org.junit.*; public class SubscriptTest { @Test public void simple() { String source = "a = [0, 1, 2, 3, 4]\n" + // "b = 1\n" + // "print(a[b])"; assertPrints("1\n", source); } @Test public void simple2() { String source = "a = [0, 1, 2, 3, 4]\n" + // "print (a[1:3:1])"; assertPrints("[1, 2]\n", source); } }
{ "pile_set_name": "Github" }
/* Definitions for getting information about a filesystem. Copyright (C) 1998, 1999, 2000, 2004 Free Software Foundation, Inc. This file is part of the GNU C Library. The GNU C Library is free software; you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation; either version 2.1 of the License, or (at your option) any later version. The GNU C Library is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with the GNU C Library; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA. */ #ifndef _SYS_STATVFS_H #define _SYS_STATVFS_H 1 #include <features.h> /* Get the system-specific definition of `struct statfs'. */ #include <bits/statvfs.h> #ifndef __USE_FILE_OFFSET64 # ifndef __fsblkcnt_t_defined typedef __fsblkcnt_t fsblkcnt_t; /* Type to count file system blocks. */ # define __fsblkcnt_t_defined # endif # ifndef __fsfilcnt_t_defined typedef __fsfilcnt_t fsfilcnt_t; /* Type to count file system inodes. */ # define __fsfilcnt_t_defined # endif #else # ifndef __fsblkcnt_t_defined typedef __fsblkcnt64_t fsblkcnt_t; /* Type to count file system blocks. */ # define __fsblkcnt_t_defined # endif # ifndef __fsfilcnt_t_defined typedef __fsfilcnt64_t fsfilcnt_t; /* Type to count file system inodes. */ # define __fsfilcnt_t_defined # endif #endif __BEGIN_DECLS /* Return information about the filesystem on which FILE resides. */ #ifndef __USE_FILE_OFFSET64 extern int statvfs (__const char *__restrict __file, struct statvfs *__restrict __buf) __THROW __nonnull ((1, 2)); #else # ifdef __REDIRECT_NTH extern int __REDIRECT_NTH (statvfs, (__const char *__restrict __file, struct statvfs *__restrict __buf), statvfs64) __nonnull ((1, 2)); # else # define statvfs statvfs64 # endif #endif #ifdef __USE_LARGEFILE64 extern int statvfs64 (__const char *__restrict __file, struct statvfs64 *__restrict __buf) __THROW __nonnull ((1, 2)); #endif /* Return information about the filesystem containing the file FILDES refers to. */ #ifndef __USE_FILE_OFFSET64 extern int fstatvfs (int __fildes, struct statvfs *__buf) __THROW __nonnull ((2)); #else # ifdef __REDIRECT_NTH extern int __REDIRECT_NTH (fstatvfs, (int __fildes, struct statvfs *__buf), fstatvfs64) __nonnull ((2)); # else # define fstatvfs fstatvfs64 # endif #endif #ifdef __USE_LARGEFILE64 extern int fstatvfs64 (int __fildes, struct statvfs64 *__buf) __THROW __nonnull ((2)); #endif __END_DECLS #endif /* sys/statvfs.h */
{ "pile_set_name": "Github" }
<?php abstract class Auth extends Kohana_Auth {}
{ "pile_set_name": "Github" }
'use strict'; describe('code folding', function () { var codeMirror; var getFoldRange; beforeEach(module('codeMirror')); beforeEach(module('codeFolding')); beforeEach(inject(function ($injector) { codeMirror = $injector.get('codeMirror'); getFoldRange = $injector.get('getFoldRange'); })); it('should detect fold ranges of only one line', function () { var indentUnit = 2; var editor = getEditor(codeMirror, [ 'title: Test', 'baseUri: http://www.api.com/{version}/{company}', 'version: v1.1', '/tags:', ' name: Tags', ' description: This is a description of tags', ' get:', ' summary: Get a list of recently tagged media', ' post:', ' summary: Create a new tagged media', ' description: This is a description of creating tags', ], { line: 6, ch: 0 }, { indentUnit: indentUnit } ); var foldRange = getFoldRange(editor, { line: 6 }); foldRange.should.deep.equal({ from: { line: 6, ch: 6 }, to: { line: 7, ch: 48} }); }); it('should detect fold range for root nodes', function () { var indentUnit = 2; var editor = getEditor(codeMirror, [ 'title: Test', 'baseUri: http://www.api.com/{version}/{company}', 'version: v1.1', '/tags:', ' name: Tags', ' description: This is a description of tags', ' get:', ' summary: Get a list of recently tagged media', ' description: This is a description of getting tags', ' post:', ' summary: Create a new tagged media', ' description: This is a description of creating tags', ], { line: 6, ch: 0 }, { indentUnit: indentUnit } ); var foldRange = getFoldRange(editor, { line: 3 }); foldRange.should.deep.equal({ from: { line: 3, ch: 6 }, to: { line: 11, ch: 55} }); }); it('should detect fold range for first level nodes', function () { var indentUnit = 2; var editor = getEditor(codeMirror, [ 'title: Test', 'baseUri: http://www.api.com/{version}/{company}', 'version: v1.1', '/tags:', ' name: Tags', ' description: This is a description of tags', ' get:', ' summary: Get a list of recently tagged media', ' description: This is a description of getting tags', ' post:', ' summary: Create a new tagged media', ' description: This is a description of creating tags', ], { line: 6, ch: 0 }, { indentUnit: indentUnit } ); var foldRange = getFoldRange(editor, { line: 6 }); foldRange.should.deep.equal({ from: { line: 6, ch: 6 }, to: { line: 8, ch: 54} }); foldRange = getFoldRange(editor, { line: 9 }); foldRange.should.deep.equal({ from: { line: 9, ch: 7 }, to: { line: 11, ch: 55} }); }); it('should detect fold ranges for elements with children with more than one indentation level (traits, resourceTypes, etc)', function () { var indentUnit = 2; var editor = getEditor(codeMirror, [ '#%RAML 0.2', '---', 'title: Example API', 'baseUri: http://localhost:3000/api/{company}/', 'version: 1.0', 'traits:', ' - secured:', ' displayName: Secured', ' securitySchemes:', ], { line: 7, ch: 0 }, { indentUnit: indentUnit } ); var foldRange = getFoldRange(editor, { line: 6 }); foldRange.should.deep.equal({ from: { line: 6, ch: 12 }, to: { line: 8, ch: 22} }); }); it('should not detect ranges for empty lines', function (){ var indentUnit = 2; var editor = getEditor(codeMirror, [ 'title: Test', 'baseUri: http://www.api.com/{version}/{company}', 'version: v1.1', 'documentation:', ' - title: this is the title', ' content: |', ' this is some content', ' with multiple lines', ' with the same indentations', ' that should be ignored', '', ' empty lines to', '/tags:', ' name: Tags', ' description: This is a description of tags', ' get:', ' summary: Get a list of recently tagged media', ' description: This is a description of getting tags', ' post:', ' summary: Create a new tagged media', ' description: This is a description of creating tags', ], { line: 0, ch: 0 }, { indentUnit: indentUnit } ); var foldRange = getFoldRange(editor, { line: 10 }); should.equal(foldRange, undefined); }); it('should not detect ranges inside content blocks', function () { var indentUnit = 2; var editor = getEditor(codeMirror, [ 'title: Test', 'baseUri: http://www.api.com/{version}/{company}', 'version: v1.1', 'documentation:', ' - title: this is the title', ' content: |', ' this is some content', ' with multiple lines', ' with the same indentation', ' that should be ignored', ' ', ' empty lines to', '/tags:', ' name: Tags', ' description: This is a description of tags', ' get:', ' summary: Get a list of recently tagged media', ' description: This is a description of getting tags', ' post:', ' summary: Create a new tagged media', ' description: This is a description of creating tags', ], { line: 0, ch: 0 }, { indentUnit: indentUnit } ); var foldRange = getFoldRange(editor, { line: 7 }); should.equal(foldRange, undefined); }); it('should not detect ranges for items with more than one indentation level and no children (traits, resourceTypes, etc)', function (){ var indentUnit = 2; var editor = getEditor(codeMirror, [ '#%RAML 0.2', '---', 'title: Example API', 'baseUri: http://localhost:3000/api/{company}/', 'version: 1.0', 'traits:', ' - secured:', ' displayName: Secured', ' securitySchemes:', ], { line: 7, ch: 0 }, { indentUnit: indentUnit } ); var foldRange = getFoldRange(editor, { line: 7 }); should.equal(foldRange, undefined); }); it('should ignore empty lines inside content blocks', function () { var indentUnit = 2; var editor = getEditor(codeMirror, [ 'title: Test', 'baseUri: http://www.api.com/{version}/{company}', 'version: v1.1', 'documentation:', ' - title: this is the title', ' content: |', ' this is some content', ' with multiple lines', ' with the same indentations', ' that should be ignored', '', ' empty lines to', ' as long as they are inside a content element', '/tags:', ' name: Tags', ' description: This is a description of tags', ' get:', ' summary: Get a list of recently tagged media', ' description: This is a description of getting tags', ' post:', ' summary: Create a new tagged media', ' description: This is a description of creating tags', ], { line: 0, ch: 0 }, { indentUnit: indentUnit } ); var foldRange = getFoldRange(editor, { line: 5 }); foldRange.should.deep.equal({ from: { line: 5, ch: 13 }, to: { line: 12, ch: 49} }); }); it('should not allow folding of scalars even when having spaces after it (RT-325)', function (){ var indentUnit = 2; var editor = getEditor(codeMirror, [ '#%RAML 0.8', '---', 'title: Test', 'resourceTypes: ', ' - base:', ' is: [hola]', ' get:', 'traits:', ' - hola:', ' displayName: HOL', '/tags:', ' type:', ' base:', ' displayName: Tags', ' get:' ], { line: 7, ch: 0 }, { indentUnit: indentUnit } ); var foldRange = getFoldRange(editor, { line: 2 }); should.not.exist(foldRange); }); it('should fold spaces inside the last fold level (RT-113)', function (){ var foldRange; var indentUnit = 2; var editor = getEditor(codeMirror, [ '#%RAML 0.8', '---', 'title: Test', 'resourceTypes: ', ' - base:', ' is: [hola]', ' get:', 'traits:', ' - hola:', ' displayName: HOL', '/tags:', ' /abc:', ' /cde:', '', ' ', ' ', ' ', ' ', ' ' ], { line: 7, ch: 0 }, { indentUnit: indentUnit } ); foldRange = getFoldRange(editor, { line: 11 }); foldRange.should.deep.equal({ from: { line: 11, ch: 7 }, to: { line: 18, ch: 2} }); foldRange = getFoldRange(editor, { line: 13 }); should.not.exist(foldRange); }); it('should ignore spaces and include them on the parent fold', function (){ var foldRange; var indentUnit = 2; var editor = getEditor(codeMirror, [ '#%RAML 0.8', '---', 'title: Test', 'resourceTypes: ', ' - base:', ' is: [hola]', ' get:', 'traits:', ' - hola:', ' displayName: HOL', '/tags:', ' /abc:', ' /cde:', '', ' ', ' ', ' ', ' ', ' ', '/foo:', ' /bar:', ' /ggg:' ], { line: 7, ch: 0 }, { indentUnit: indentUnit } ); foldRange = getFoldRange(editor, { line: 10 }); foldRange.should.deep.equal({ from: { line: 10, ch: 6 }, to: { line: 18, ch: 2} }); foldRange = getFoldRange(editor, { line: 19 }); foldRange.should.deep.equal({ from: { line: 19, ch: 5 }, to: { line: 21, ch: 9} }); foldRange = getFoldRange(editor, { line: 13 }); should.not.exist(foldRange); }); });
{ "pile_set_name": "Github" }
// <auto-generated> // Copyright (c) Microsoft Corporation. All rights reserved. // Licensed under the MIT License. See License.txt in the project root for // license information. // // Code generated by Microsoft (R) AutoRest Code Generator. // </auto-generated> namespace Microsoft.Azure.Management.ContainerInstance.Fluent.Models { using Microsoft.Rest; using Newtonsoft.Json; using System.Linq; /// <summary> /// Represents a volume that is populated with the contents of a git /// repository /// </summary> public partial class GitRepoVolume { /// <summary> /// Initializes a new instance of the GitRepoVolume class. /// </summary> public GitRepoVolume() { CustomInit(); } /// <summary> /// Initializes a new instance of the GitRepoVolume class. /// </summary> /// <param name="repository">Repository URL</param> /// <param name="directory">Target directory name. Must not contain or /// start with '..'. If '.' is supplied, the volume directory will be /// the git repository. Otherwise, if specified, the volume will /// contain the git repository in the subdirectory with the given /// name.</param> /// <param name="revision">Commit hash for the specified /// revision.</param> public GitRepoVolume(string repository, string directory = default(string), string revision = default(string)) { Directory = directory; Repository = repository; Revision = revision; CustomInit(); } /// <summary> /// An initialization method that performs custom operations like setting defaults /// </summary> partial void CustomInit(); /// <summary> /// Gets or sets target directory name. Must not contain or start with /// '..'. If '.' is supplied, the volume directory will be the git /// repository. Otherwise, if specified, the volume will contain the /// git repository in the subdirectory with the given name. /// </summary> [JsonProperty(PropertyName = "directory")] public string Directory { get; set; } /// <summary> /// Gets or sets repository URL /// </summary> [JsonProperty(PropertyName = "repository")] public string Repository { get; set; } /// <summary> /// Gets or sets commit hash for the specified revision. /// </summary> [JsonProperty(PropertyName = "revision")] public string Revision { get; set; } /// <summary> /// Validate the object. /// </summary> /// <exception cref="ValidationException"> /// Thrown if validation fails /// </exception> public virtual void Validate() { if (Repository == null) { throw new ValidationException(ValidationRules.CannotBeNull, "Repository"); } } } }
{ "pile_set_name": "Github" }
[ { "PublicDescription": "This event counts the unhalted core cycles during which the thread is in the ring 0 privileged mode.", "EventCode": "0x5C", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "CPL_CYCLES.RING0", "SampleAfterValue": "2000003", "BriefDescription": "Unhalted core cycles when the thread is in ring 0", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "PublicDescription": "This event counts when there is a transition from ring 1,2 or 3 to ring0.", "EventCode": "0x5C", "Counter": "0,1,2,3", "UMask": "0x1", "EdgeDetect": "1", "EventName": "CPL_CYCLES.RING0_TRANS", "SampleAfterValue": "100007", "BriefDescription": "Number of intervals between processor halts while thread is in ring 0", "CounterMask": "1", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "PublicDescription": "This event counts unhalted core cycles during which the thread is in rings 1, 2, or 3.", "EventCode": "0x5C", "Counter": "0,1,2,3", "UMask": "0x2", "EventName": "CPL_CYCLES.RING123", "SampleAfterValue": "2000003", "BriefDescription": "Unhalted core cycles when thread is in rings 1, 2, or 3", "CounterHTOff": "0,1,2,3,4,5,6,7" }, { "PublicDescription": "This event counts cycles in which the L1 and L2 are locked due to a UC lock or split lock. A lock is asserted in case of locked memory access, due to noncacheable memory, locked operation that spans two cache lines, or a page walk from the noncacheable page table. L1D and L2 locks have a very high performance penalty and it is highly recommended to avoid such access.", "EventCode": "0x63", "Counter": "0,1,2,3", "UMask": "0x1", "EventName": "LOCK_CYCLES.SPLIT_LOCK_UC_LOCK_DURATION", "SampleAfterValue": "2000003", "BriefDescription": "Cycles when L1 and L2 are locked due to UC or split lock", "CounterHTOff": "0,1,2,3,4,5,6,7" } ]
{ "pile_set_name": "Github" }
// NOTE: This file was generated by the ServiceGenerator. // ---------------------------------------------------------------------------- // API: // Cloud OS Login API (oslogin/v1) // Description: // You can use OS Login to manage access to your VM instances using IAM roles. // Documentation: // https://cloud.google.com/compute/docs/oslogin/ #if SWIFT_PACKAGE || GTLR_USE_MODULAR_IMPORT @import GoogleAPIClientForRESTCore; #elif GTLR_BUILT_AS_FRAMEWORK #import "GTLR/GTLRService.h" #else #import "GTLRService.h" #endif #if GTLR_RUNTIME_VERSION != 3000 #error This file was generated by a different version of ServiceGenerator which is incompatible with this GTLR library source. #endif // Generated comments include content from the discovery document; avoid them // causing warnings since clang's checks are some what arbitrary. #pragma clang diagnostic push #pragma clang diagnostic ignored "-Wdocumentation" NS_ASSUME_NONNULL_BEGIN // ---------------------------------------------------------------------------- // Authorization scopes /** * Authorization scope: View and manage your data across Google Cloud Platform * services * * Value "https://www.googleapis.com/auth/cloud-platform" */ FOUNDATION_EXTERN NSString * const kGTLRAuthScopeCloudOSLoginCloudPlatform; /** * Authorization scope: View and manage your Google Compute Engine resources * * Value "https://www.googleapis.com/auth/compute" */ FOUNDATION_EXTERN NSString * const kGTLRAuthScopeCloudOSLoginCompute; // ---------------------------------------------------------------------------- // GTLRCloudOSLoginService // /** * Service for executing Cloud OS Login API queries. * * You can use OS Login to manage access to your VM instances using IAM roles. */ @interface GTLRCloudOSLoginService : GTLRService // No new methods // Clients should create a standard query with any of the class methods in // GTLRCloudOSLoginQuery.h. The query can the be sent with GTLRService's execute // methods, // // - (GTLRServiceTicket *)executeQuery:(GTLRQuery *)query // completionHandler:(void (^)(GTLRServiceTicket *ticket, // id object, NSError *error))handler; // or // - (GTLRServiceTicket *)executeQuery:(GTLRQuery *)query // delegate:(id)delegate // didFinishSelector:(SEL)finishedSelector; // // where finishedSelector has a signature of: // // - (void)serviceTicket:(GTLRServiceTicket *)ticket // finishedWithObject:(id)object // error:(NSError *)error; // // The object passed to the completion handler or delegate method // is a subclass of GTLRObject, determined by the query method executed. @end NS_ASSUME_NONNULL_END #pragma clang diagnostic pop
{ "pile_set_name": "Github" }
require 'test_helper' class CreationTest < MiniTest::Test class CallbackTest < TestResource include JsonApiClient::Helpers::Callbacks before_save do self.foo = 10 end after_create :after_create_method def after_create_method self.bar = 100 end end class Author < TestResource end class User < TestResource has_one :skill_level properties :first_name, type: :string end class SkillLevel < TestResource property :title, type: :string end def stub_simple_creation stub_request(:post, "http://example.com/articles") .with(headers: {content_type: "application/vnd.api+json", accept: "application/vnd.api+json"}, body: { data: { type: "articles", attributes: { title: "Rails is Omakase" } } }.to_json) .to_return(headers: {content_type: "application/vnd.api+json"}, body: { data: { type: "articles", id: "1", attributes: { title: "Rails is Omakase" }, links: { self: "http://example.com/articles/1", other: { href: "http://example.com/other" } } } }.to_json) end def test_can_create_with_class_method stub_simple_creation article = Article.create({ title: "Rails is Omakase" }) assert article.persisted?, article.inspect assert_equal "1", article.id assert_equal "Rails is Omakase", article.title end def test_changed_attributes_empty_after_create_with_class_method stub_simple_creation article = Article.create({ title: "Rails is Omakase" }) assert_empty article.changed_attributes end def test_can_create_with_new_record_and_save stub_simple_creation article = Article.new({ title: "Rails is Omakase" }) assert article.new_record? assert article.save assert article.persisted? assert_equal(false, article.new_record?) assert_equal "1", article.id end def test_can_create_with_includes_and_fields stub_request(:post, "http://example.com/articles") .with( headers: { content_type: "application/vnd.api+json", accept: "application/vnd.api+json" }, query: { include: 'comments,author.comments', fields: { articles: 'title', authors: 'name' } }, body: { data: { type: "articles", attributes: { title: "Rails is Omakase" } } }.to_json ).to_return( headers: { content_type: "application/vnd.api+json" }, body: { data: { type: "articles", id: "1", attributes: { title: "Rails is Omakase" }, relationships: { comments: { data: [ { id: "2", type: "comments" } ] }, author: { data: nil } } }, included: [ { id: "2", type: "comments", attributes: { body: "it is isn't it ?" } } ] }.to_json ) article = Article.new({ title: "Rails is Omakase" }) article.request_includes(:comments, author: :comments). request_select(:title, authors: [:name]) assert article.save assert article.persisted? assert_equal "1", article.id assert_nil article.author assert_equal 1, article.comments.size assert_equal "2", article.comments.first.id assert_equal "it is isn't it ?", article.comments.first.body end def test_can_create_with_links stub_simple_creation article = Article.new({ title: "Rails is Omakase" }) assert article.save assert article.persisted? assert_equal "http://example.com/articles/1", article.links.self end def test_can_create_with_new_record_with_relationships_and_save stub_request(:post, "http://example.com/articles") .with(headers: {content_type: "application/vnd.api+json", accept: "application/vnd.api+json"}, body: { data: { type: "articles", attributes: { title: "Rails is Omakase" } } }.to_json) .to_return(headers: {content_type: "application/vnd.api+json"}, body: { data: { type: "articles", id: "1", attributes: { title: "Rails is Omakase" }, relationships: { comments: { data: [ { id: "1", type: "comments" } ] } } }, included: [ { id: "1", type: "comments", attributes: { comments: "it is isn't it ?" } } ] }.to_json) article = Article.new({title: "Rails is Omakase"}) assert article.save assert article.persisted? assert_equal article.comments.length, 1 assert_equal "1", article.id end def test_correct_create_with_nil_attribute_value stub_request(:post, "http://example.com/authors") .with(headers: { content_type: "application/vnd.api+json", accept: "application/vnd.api+json" }, body: { data: { type: "authors", attributes: { name: "John Doe", description: nil } } }.to_json) .to_return(headers: { content_type: "application/vnd.api+json" }, body: { data: { type: "authors", id: "1", attributes: { name: "John Doe", description: nil } } }.to_json) author = Author.new({ name: "John Doe", description: nil }) assert author.save end def test_changed_attributes_empty_after_create_with_new_record_and_save stub_simple_creation article = Article.new({title: "Rails is Omakase"}) article.save assert_empty article.changed_attributes end def test_callbacks_on_update stub_request(:post, "http://example.com/callback_tests") .with(headers: { content_type: "application/vnd.api+json", accept: "application/vnd.api+json" }, body: { data: { type: "callback_tests", attributes: { foo: 10, bar: 1 } } }.to_json) .to_return(headers: { content_type: "application/vnd.api+json" }, body: { data: { type: "callback_tests", id: "1", attributes: { foo: 10, bar: 1 } } }.to_json) callback_test = CallbackTest.create({foo: 1, bar: 1}) assert_equal 100, callback_test.bar end def test_create_with_relationships_in_payload stub_request(:post, 'http://example.com/articles') .with(headers: {content_type: 'application/vnd.api+json', accept: 'application/vnd.api+json'}, body: { data: { type: 'articles', relationships: { comments: { data: [ { type: 'comments', id: '2' } ] } }, attributes: { title: 'Rails is Omakase' } } }.to_json) .to_return(headers: {content_type: 'application/vnd.api+json'}, body: { data: { type: 'articles', id: '1', attributes: { title: 'Rails is Omakase' } } }.to_json) article = Article.new(title: 'Rails is Omakase', relationships: {comments: [Comment.new(id: '2')]}) assert article.save assert article.persisted? assert_equal "1", article.id end def test_create_with_custom_type stub_request(:post, 'http://example.com/document--files') .with(headers: {content_type: 'application/vnd.api+json', accept: 'application/vnd.api+json'}, body: { data: { type: 'document--files', attributes: { url: 'http://example.com/downloads/1.pdf' } } }.to_json) .to_return(headers: {content_type: 'application/vnd.api+json'}, body: { data: { type: 'document--files', id: '1', attributes: { url: 'http://example.com/downloads/1.pdf' } } }.to_json) file = DocumentFile.new(url: 'http://example.com/downloads/1.pdf') assert file.save assert file.persisted? assert_equal '1', file.id end def test_access_loaded_relationship_instance stub_request(:get, 'http://example.com/skill_levels/1') .to_return(headers: {content_type: 'application/vnd.api+json'}, body: { data: { type: 'skill_levels', id: '1', attributes: { title: 'newbie' } } }.to_json) stub_request(:get, 'http://example.com/skill_levels/2') .to_return(headers: {content_type: 'application/vnd.api+json'}, body: { data: { type: 'skill_levels', id: '2', attributes: { title: 'pro' } } }.to_json) skill_level = SkillLevel.find(1).first user = User.new(first_name: 'Joe', relationships: { skill_level: skill_level }) assert_equal ({'data'=>{'type'=>'skill_levels', 'id'=>'1'}}), user.relationships.skill_level assert_kind_of SkillLevel, user.skill_level assert_equal '1', user.skill_level.id # test that object is cached and not recreated each time assert_equal user.skill_level.object_id, user.skill_level.object_id user.skill_level = SkillLevel.find(2).first assert_equal '2', user.skill_level.id end end
{ "pile_set_name": "Github" }
{ "type": "bundle", "id": "bundle--beef2ea8-43bb-4ae0-b371-7717839755b9", "spec_version": "2.0", "objects": [ { "type": "relationship", "id": "relationship--f5e3b137-a200-4caa-9f02-88cba0ca4e80", "created_by_ref": "identity--e50ab59c-5c4f-4d40-bf6a-d58418d89bcd", "created": "2014-06-23T00:00:00.000Z", "modified": "2019-04-04T00:00:00.000Z", "relationship_type": "mitigates", "source_ref": "course-of-action--a99fa1c3-7798-453a-8c18-1387446a4827", "target_ref": "attack-pattern--7f0f7de2-bf09-4f60-86bb-6933192b7128", "object_marking_refs": [ "marking-definition--17d82bb2-eeeb-4898-bda5-3ddbcd2b799d" ], "x_capec_version": "3.3" } ] }
{ "pile_set_name": "Github" }
;(function() { var utils = window.utils = {} var params = {} var str = location.search if (str && str.length > 1) { str.substring(1).split('&').forEach(function(item) { var parts = item.split('=') var name = decodeURIComponent(parts[0]) var value = decodeURIComponent(parts[1]) params[name] = value }) } utils.url_params = function(name) { return params[name] } })();
{ "pile_set_name": "Github" }
package com.hkm.slider.Tricks; import android.os.Build; import android.os.Handler; import android.support.annotation.Nullable; import android.view.View; import com.hkm.slider.SliderAdapter; /** * Created by hesk on 17/3/16. */ public class AnimationHelper { public static long mTransitionAnimation = 1000; public static void notify_component(final @Nullable View mObject, final SliderAdapter mSliderAdapter, Handler postHandler, final int delayPost) { mTransitionAnimation = delayPost; notify_component(mObject, mSliderAdapter, postHandler); } public static void notify_component(final @Nullable View mObject, final SliderAdapter mSliderAdapter, Handler postHandler) { if (mObject == null) return; int count = mSliderAdapter.getCount(); if (count <= 1) { postHandler.postDelayed(new Runnable() { @Override public void run() { if (mObject != null) { if (Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) { mObject.animate().alpha(0).withEndAction(new Runnable() { @Override public void run() { mObject.setVisibility(View.GONE); } }); } else { mObject.animate().alpha(0); } } } }, mTransitionAnimation); } else { if (mObject.getVisibility() != View.GONE) return; postHandler.postDelayed(new Runnable() { @Override public void run() { if (mObject != null) { if (mObject.getVisibility() != View.GONE) return; mObject.setVisibility(View.VISIBLE); mObject.setAlpha(0); mObject.animate().alpha(1); } } }, mTransitionAnimation); } } }
{ "pile_set_name": "Github" }
package com.hubspot.singularity.config; import javax.validation.constraints.NotNull; public class ZooKeeperConfiguration { @NotNull private String quorum; @NotNull private int sessionTimeoutMillis = 600_000; @NotNull private int connectTimeoutMillis = 60_000; @NotNull private int retryBaseSleepTimeMilliseconds = 1_000; @NotNull private int retryMaxTries = 3; @NotNull private String zkNamespace; private long abortAfterConnectionLostForMillis = 30000; public String getQuorum() { return quorum; } public void setQuorum(String quorum) { this.quorum = quorum; } public int getSessionTimeoutMillis() { return sessionTimeoutMillis; } public void setSessionTimeoutMillis(int sessionTimeoutMillis) { this.sessionTimeoutMillis = sessionTimeoutMillis; } public int getConnectTimeoutMillis() { return connectTimeoutMillis; } public void setConnectTimeoutMillis(int connectTimeoutMillis) { this.connectTimeoutMillis = connectTimeoutMillis; } public int getRetryBaseSleepTimeMilliseconds() { return retryBaseSleepTimeMilliseconds; } public void setRetryBaseSleepTimeMilliseconds(int retryBaseSleepTimeMilliseconds) { this.retryBaseSleepTimeMilliseconds = retryBaseSleepTimeMilliseconds; } public int getRetryMaxTries() { return retryMaxTries; } public void setRetryMaxTries(int retryMaxTries) { this.retryMaxTries = retryMaxTries; } public String getZkNamespace() { return zkNamespace; } public void setZkNamespace(String zkNamespace) { this.zkNamespace = zkNamespace; } public long getAbortAfterConnectionLostForMillis() { return abortAfterConnectionLostForMillis; } public void setAbortAfterConnectionLostForMillis( long abortAfterConnectionLostForMillis ) { this.abortAfterConnectionLostForMillis = abortAfterConnectionLostForMillis; } }
{ "pile_set_name": "Github" }
-----BEGIN CERTIFICATE----- MIIDFzCCAf+gAwIBAgIBATANBgkqhkiG9w0BAQsFADAmMSQwIgYDVQQDExtUZXN0 IGludGVybWVkaWF0ZSBtYXhwYXRoIDEwHhcNMDEwMTAxMDAwMDAwWhcNMzAxMjMx MjM1OTU5WjAmMSQwIgYDVQQDExtUZXN0IGludGVybWVkaWF0ZSBtYXhwYXRoIDEw ggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQDaRpQTF3aPHDvaKlMP2+jz MIjDVfCwnusAfVShz2ujhkNKPF6pLYMJ4da6I4KTIWwgKRO1F3jK+mRqvzbapjtY TuWnVeSXoVmcr4O4+BAaRoPIlqNIzaSjCdGMdbgZJJYxHWS0x2uGyv88tjSqgzUt slrPfzOfscOVxNnnAIOxU3F4X96udFfjOk9iGkPQcZ7U8gk/CCBdnkTP7fWPeOLP UX85vykFSkWD7nV+2IU7fYqgiQeCaKmIbNxxDtMD5CcWOCgU1AjfeLPu41BXUa2M XvvGGurSGFqg9/IuanRoWMa1XstS2rbAyUNhIDWKGzPy46AiytVlLLBKn9DlNm4t AgMBAAGjUDBOMAwGA1UdEwQFMAMBAQEwHQYDVR0OBBYEFDgZfHnVZvx+cXUthBhH i837Wfw1MB8GA1UdIwQYMBaAFDgZfHnVZvx+cXUthBhHi837Wfw1MA0GCSqGSIb3 DQEBCwUAA4IBAQDPQC9vYJegBgVZHu0StoRT7L6ShWcZc5Z/TeyrqJBdoiguSRq5 kMiFXZpksxeFlIUYry21MigYqxOXGZ2GZYNqhLpYVh7hzAY8uYvf4U70q88zj7mw gIcgEaMd71GHqbb2O5x3fCN7vLeU5DFYBWfqLlkL57Uqr2aRDHlucryyRNordicN WbCxPozmqtbNMABEUbjLMCCuzJeNRSZbS0OOod6Xd3N00EK7PqaRhbihbq3L6gUG MjUI2keSxW4vXcDfI5Hqem6SHpCc3retx2VUgwIDAoTrw7E4dwmyC4Tp7TDJL/+d GU8qhRmoQer7mLUzpb3s8mq/4rZx+alTQ3gu -----END CERTIFICATE-----
{ "pile_set_name": "Github" }
import matplotlib.pyplot as plt import random import numpy as np import cv2 def visualize(img, det_boxes=None, gt_boxes=None, keypoints=None, is_show_label=True, show_cls_label = True, show_skeleton_labels=False, classes=None, thresh=0.5, name='detection', return_img=False): if is_show_label: if classes == 'voc': classes = [ '__background__', 'aeroplane', 'bicycle', 'bird', 'boat', 'bottle', 'bus', 'car', 'cat', 'chair', 'cow', 'diningtable', 'dog', 'horse', 'motorbike', 'person', 'pottedplant', 'sheep', 'sofa', 'train', 'tvmonitor' ] elif classes == 'coco': classes = [ "__background__", "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light", "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow", "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee", "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard", "tennis racket","bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple", "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch", "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone", "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear", "hair drier", "toothbrush" ] color_map = [(0, 0, 0), (0, 255, 0), (255, 128, 0), (255, 255, 0), (255, 0, 255), (255, 128, 255), (128, 255, 128), (128, 255, 255), (255, 255, 128), (0, 128, 255), (0, 255, 128), (255, 0, 128), (0, 215, 255), (255, 0, 255), (255, 128, 0), (128, 128, 255), (0, 255, 255), (0, 69, 255), (0, 69, 255), (255, 204, 204), (204, 255, 255)] im = np.array(img).copy().astype(np.uint8) colors = dict() font = cv2.FONT_HERSHEY_SIMPLEX if det_boxes is not None: det_boxes = np.array(det_boxes) for det in det_boxes: bb = det[:4].astype(int) if is_show_label: if show_cls_label: cls_id = int(det[4]) if cls_id == 0: continue if len(det) > 4: score = det[-1] else: score = 1. if thresh < score: if show_cls_label: if cls_id not in colors: colors[cls_id] = (random.random() * 128 + 128, random.random() * 128 + 128, random.random() * 128 + 128) cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]), colors[cls_id], 1) if classes and len(classes) > cls_id: cls_name = classes[cls_id] else: cls_name = str(cls_id) cv2.putText(im, '{:s} {:.3f}'.format(cls_name, score), (bb[0], bb[1] - 2), font, 0.7, colors[cls_id], 2) else: cv2.putText(im, '{:.3f}'.format(score), (bb[0], bb[1] - 2), font, 0.7, (255, 0, 0), 2) cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]), (139, 139, 139), 1) else: cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]), (random.random() * 128 + 128, random.random() * 128 + 128, random.random() * 128 + 128), 1) if gt_boxes is not None: gt_boxes = np.array(gt_boxes) for gt in gt_boxes: bb = gt[:4].astype(int) if is_show_label: cls_id = int(gt[4]) cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]), (0, 0, 255), 3) if classes and len(classes) > cls_id: cls_name = classes[cls_id] else: cls_name = str(cls_id) cv2.putText(im, '{:s}'.format(cls_name), (bb[0], bb[1] - 2), \ font, 0.5, (0, 0, 255), 1) else: cv2.rectangle(im, (bb[0], bb[1]), (bb[2], bb[3]), (0, 0, 255), 3) if keypoints is not None: keypoints = np.array(keypoints).astype(int) keypoints = keypoints.reshape(-1, 17, 3) if False: idx = np.where(det_boxes[:, -1] > thresh) keypoints = keypoints[idx] for i in range(len(keypoints)): draw_skeleton(im, keypoints[i], show_skeleton_labels) else: for i in range(len(keypoints)): draw_skeleton(im, keypoints[i], show_skeleton_labels) if return_img: return im.copy() cv2.imshow(name, im) cv2.waitKey(0) # while True: # c = cv2.waitKey(0) # if c == ord('d'): # return # elif c == ord('n'): # break def draw_skeleton(aa, kp, show_skeleton_labels=False): skeleton = [[16, 14], [14, 12], [17, 15], [15, 13], [12, 13], [6, 12], [7, 13], [6, 7], [6, 8], [7, 9], [8, 10], [9, 11], [2, 3], [1, 2], [1, 3], [2, 4], [3, 5], [4, 6], [5, 7]] kp_names = ['nose', 'l_eye', 'r_eye', 'l_ear', 'r_ear', 'l_shoulder', 'r_shoulder', 'l_elbow', 'r_elbow', 'l_wrist', 'r_wrist', 'l_hip', 'r_hip', 'l_knee', 'r_knee', 'l_ankle', 'r_ankle'] for i, j in skeleton: if kp[i-1][0] >= 0 and kp[i-1][1] >= 0 and kp[j-1][0] >= 0 and kp[j-1][1] >= 0 and \ (len(kp[i-1]) <= 2 or (len(kp[i-1]) > 2 and kp[i-1][2] > 0.1 and kp[j-1][2] > 0.1)): cv2.line(aa, tuple(kp[i-1][:2]), tuple(kp[j-1][:2]), (0,255,255), 2) for j in range(len(kp)): if kp[j][0] >= 0 and kp[j][1] >= 0: if len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 1.1): cv2.circle(aa, tuple(kp[j][:2]), 2, tuple((0,0,255)), 2) elif len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 0.1): cv2.circle(aa, tuple(kp[j][:2]), 2, tuple((255,0,0)), 2) if show_skeleton_labels and (len(kp[j]) <= 2 or (len(kp[j]) > 2 and kp[j][2] > 0.1)): cv2.putText(aa, kp_names[j], tuple(kp[j][:2]), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (255, 255, 0))
{ "pile_set_name": "Github" }
// Copyright 2014 The Chromium Authors. All rights reserved. // Use of this source code is governed by a BSD-style license that can be // found in the LICENSE file. // This file has been auto-generated by code_generator_v8.py. DO NOT MODIFY! #ifndef V8FederatedCredential_h #define V8FederatedCredential_h #include "bindings/core/v8/ScriptWrappable.h" #include "bindings/core/v8/ToV8.h" #include "bindings/core/v8/V8Binding.h" #include "bindings/core/v8/V8DOMWrapper.h" #include "bindings/core/v8/WrapperTypeInfo.h" #include "bindings/modules/v8/V8Credential.h" #include "modules/ModulesExport.h" #include "modules/credentialmanager/FederatedCredential.h" #include "platform/heap/Handle.h" namespace blink { class V8FederatedCredential { public: MODULES_EXPORT static bool hasInstance(v8::Local<v8::Value>, v8::Isolate*); static v8::Local<v8::Object> findInstanceInPrototypeChain(v8::Local<v8::Value>, v8::Isolate*); MODULES_EXPORT static v8::Local<v8::FunctionTemplate> domTemplate(v8::Isolate*); static FederatedCredential* toImpl(v8::Local<v8::Object> object) { return toScriptWrappable(object)->toImpl<FederatedCredential>(); } MODULES_EXPORT static FederatedCredential* toImplWithTypeCheck(v8::Isolate*, v8::Local<v8::Value>); MODULES_EXPORT static const WrapperTypeInfo wrapperTypeInfo; static void refObject(ScriptWrappable*); static void derefObject(ScriptWrappable*); template<typename VisitorDispatcher> static void trace(VisitorDispatcher visitor, ScriptWrappable* scriptWrappable) { visitor->trace(scriptWrappable->toImpl<FederatedCredential>()); } static void constructorCallback(const v8::FunctionCallbackInfo<v8::Value>&); static const int internalFieldCount = v8DefaultWrapperInternalFieldCount + 0; static void installConditionallyEnabledProperties(v8::Local<v8::Object>, v8::Isolate*) { } static void preparePrototypeObject(v8::Isolate*, v8::Local<v8::Object> prototypeObject, v8::Local<v8::FunctionTemplate> interfaceTemplate) { } }; template <> struct V8TypeOf<FederatedCredential> { typedef V8FederatedCredential Type; }; } // namespace blink #endif // V8FederatedCredential_h
{ "pile_set_name": "Github" }
# frozen_string_literal: true require 'openssl' require 'spec_helper' RSpec.describe Authentication::AuthnK8s::K8sObjectLookup do let(:webservice) { Authentication::Webservice.new( account: 'MockAccount', authenticator_name: 'authn-k8s', service_id: 'MockService' )} context "inside of kubernetes" do include_context "running in kubernetes" context "instantiation" do it "does not require a webservice" do expect { Authentication::AuthnK8s::K8sObjectLookup.new }.not_to raise_error end end subject { Authentication::AuthnK8s::K8sObjectLookup.new(webservice) } it "gets the correct api url" do expect(subject.api_url).to eq("https://#{kubernetes_api_url}:#{kubernetes_api_port}") end it "has the correct ssl options" do expect(subject.options[:ssl_options]).to include(:cert_store, :verify_ssl => OpenSSL::SSL::VERIFY_PEER) end it "has the correct auth options" do expect(subject.options[:auth_options]).to include(:bearer_token => kubernetes_service_token) end end context "outside of kubernetes" do include_context "running outside kubernetes" context "instantiation" do it "requires a webservice" do allow(Authentication::AuthnK8s::K8sContextValue).to receive(:get) .with(nil, Authentication::AuthnK8s::SERVICEACCOUNT_CA_PATH, Authentication::AuthnK8s::VARIABLE_CA_CERT) .and_return(nil) expect { Authentication::AuthnK8s::K8sObjectLookup.new }.to raise_error(Errors::Authentication::AuthnK8s::MissingCertificate) end end subject { Authentication::AuthnK8s::K8sObjectLookup.new(webservice) } it "gets the correct api url" do expect(subject.api_url).to eq(kubernetes_api_url) end it "has the correct ssl options" do expect(subject.options[:ssl_options]).to include(:cert_store, :verify_ssl => OpenSSL::SSL::VERIFY_PEER) end it "has the correct auth options" do expect(subject.options[:auth_options]).to include(:bearer_token => kubernetes_service_token) end end end
{ "pile_set_name": "Github" }
//+build !faker package ping import ( "github.com/monitoror/monitoror/api/config/versions" pkgMonitorable "github.com/monitoror/monitoror/internal/pkg/monitorable" coreModels "github.com/monitoror/monitoror/models" "github.com/monitoror/monitoror/monitorables/ping/api" pingDelivery "github.com/monitoror/monitoror/monitorables/ping/api/delivery/http" pingModels "github.com/monitoror/monitoror/monitorables/ping/api/models" pingRepository "github.com/monitoror/monitoror/monitorables/ping/api/repository" pingUsecase "github.com/monitoror/monitoror/monitorables/ping/api/usecase" pingConfig "github.com/monitoror/monitoror/monitorables/ping/config" "github.com/monitoror/monitoror/pkg/system" "github.com/monitoror/monitoror/registry" "github.com/monitoror/monitoror/store" ) type Monitorable struct { store *store.Store config map[coreModels.VariantName]*pingConfig.Ping // Config tile settings pingTileEnabler registry.TileEnabler } func NewMonitorable(store *store.Store) *Monitorable { m := &Monitorable{} m.store = store m.config = make(map[coreModels.VariantName]*pingConfig.Ping) // Load core config from env pkgMonitorable.LoadConfig(&m.config, pingConfig.Default) // Register Monitorable Tile in config manager m.pingTileEnabler = store.Registry.RegisterTile(api.PingTileType, versions.MinimalVersion, m.GetVariantsNames()) return m } func (m *Monitorable) GetDisplayName() string { return "Ping" } func (m *Monitorable) GetVariantsNames() []coreModels.VariantName { return pkgMonitorable.GetVariantsNames(m.config) } func (m *Monitorable) Validate(variantName coreModels.VariantName) (bool, []error) { conf := m.config[variantName] // Validate Config if errors := pkgMonitorable.ValidateConfig(conf, variantName); errors != nil { return false, errors } return system.IsRawSocketAvailable(), nil } func (m *Monitorable) Enable(variantName coreModels.VariantName) { conf := m.config[variantName] repository := pingRepository.NewPingRepository(conf) usecase := pingUsecase.NewPingUsecase(repository) delivery := pingDelivery.NewPingDelivery(usecase) // EnableTile route to echo routeGroup := m.store.MonitorableRouter.Group("/ping", variantName) route := routeGroup.GET("/ping", delivery.GetPing) // EnableTile data for config hydration m.pingTileEnabler.Enable(variantName, &pingModels.PingParams{}, route.Path) }
{ "pile_set_name": "Github" }
/* This file is part of the iText (R) project. Copyright (c) 1998-2020 iText Group NV Authors: Bruno Lowagie, Paulo Soares, et al. This program is free software; you can redistribute it and/or modify it under the terms of the GNU Affero General Public License version 3 as published by the Free Software Foundation with the addition of the following permission added to Section 15 as permitted in Section 7(a): FOR ANY PART OF THE COVERED WORK IN WHICH THE COPYRIGHT IS OWNED BY ITEXT GROUP. ITEXT GROUP DISCLAIMS THE WARRANTY OF NON INFRINGEMENT OF THIRD PARTY RIGHTS This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Affero General Public License for more details. You should have received a copy of the GNU Affero General Public License along with this program; if not, see http://www.gnu.org/licenses or write to the Free Software Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA, 02110-1301 USA, or download the license from the following URL: http://itextpdf.com/terms-of-use/ The interactive user interfaces in modified source and object code versions of this program must display Appropriate Legal Notices, as required under Section 5 of the GNU Affero General Public License. In accordance with Section 7(b) of the GNU Affero General Public License, a covered work must retain the producer line in every PDF that is created or manipulated using iText. You can be released from the requirements of the license by purchasing a commercial license. Buying such a license is mandatory as soon as you develop commercial activities involving the iText software without disclosing the source code of your own applications. These activities include: offering paid services to customers as an ASP, serving PDFs on the fly in a web application, shipping iText with a closed source product. For more information, please contact iText Software Corp. at this address: [email protected] */ using iText.Kernel.Pdf.Tagging; using iText.Kernel.Pdf.Tagutils; using iText.Layout.Properties; using iText.Layout.Renderer; namespace iText.Layout.Element { /// <summary> /// A /// <see cref="Div"/> /// is a container object that defines a section in a document, /// which will have some shared layout properties. /// </summary> /// <remarks> /// A /// <see cref="Div"/> /// is a container object that defines a section in a document, /// which will have some shared layout properties. Like all /// <see cref="BlockElement{T}"/> /// types, it will try to take up as much horizontal space as possible. /// <para /> /// The concept is very similar to that of the div tag in HTML. /// </remarks> public class Div : BlockElement<Div> { protected internal DefaultAccessibilityProperties tagProperties; /// <summary>Adds any block element to the div's contents.</summary> /// <param name="element"> /// a /// <see cref="BlockElement{T}"/> /// </param> /// <returns>this Element</returns> public virtual Div Add(IBlockElement element) { childElements.Add(element); return this; } /// <summary>Adds an image to the div's contents.</summary> /// <param name="element"> /// an /// <see cref="Image"/> /// </param> /// <returns>this Element</returns> public virtual Div Add(Image element) { childElements.Add(element); return this; } /// <summary>Adds an area break to the div's contents.</summary> /// <param name="areaBreak"> /// an /// <see cref="AreaBreak"/> /// </param> /// <returns>this Element</returns> public virtual Div Add(AreaBreak areaBreak) { childElements.Add(areaBreak); return this; } public override AccessibilityProperties GetAccessibilityProperties() { if (tagProperties == null) { tagProperties = new DefaultAccessibilityProperties(StandardRoles.DIV); } return tagProperties; } public virtual Div SetFillAvailableArea(bool fillArea) { SetProperty(Property.FILL_AVAILABLE_AREA, fillArea); return this; } public virtual Div SetFillAvailableAreaOnSplit(bool fillAreaOnSplit) { SetProperty(Property.FILL_AVAILABLE_AREA_ON_SPLIT, fillAreaOnSplit); return this; } protected internal override IRenderer MakeNewRenderer() { return new DivRenderer(this); } } }
{ "pile_set_name": "Github" }
<!DOCTYPE html> <!-- this file is auto-generated. DO NOT EDIT. /* ** Copyright (c) 2012 The Khronos Group Inc. ** ** Permission is hereby granted, free of charge, to any person obtaining a ** copy of this software and/or associated documentation files (the ** "Materials"), to deal in the Materials without restriction, including ** without limitation the rights to use, copy, modify, merge, publish, ** distribute, sublicense, and/or sell copies of the Materials, and to ** permit persons to whom the Materials are furnished to do so, subject to ** the following conditions: ** ** The above copyright notice and this permission notice shall be included ** in all copies or substantial portions of the Materials. ** ** THE MATERIALS ARE PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, ** EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF ** MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. ** IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY ** CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, ** TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE ** MATERIALS OR THE USE OR OTHER DEALINGS IN THE MATERIALS. */ --> <html> <head> <meta charset="utf-8"> <title>WebGL GLSL conformance test: sin_001_to_006.html</title> <link rel="stylesheet" href="../../../../resources/js-test-style.css" /> <link rel="stylesheet" href="../../../../resources/ogles-tests.css" /> <script src="../../../../js/js-test-pre.js"></script> <script src="../../../../js/webgl-test-utils.js"></script> <script src="../../ogles-utils.js"></script> </head> <body> <canvas id="example" width="500" height="500" style="width: 16px; height: 16px;"></canvas> <div id="description"></div> <div id="console"></div> </body> <script> "use strict"; OpenGLESTestRunner.run({ "tests": [ { "referenceProgram": { "vertexShader": "../default/default.vert", "fragmentShader": "sin_float_frag_xvary_ref.frag" }, "model": null, "testProgram": { "vertexShader": "../default/default.vert", "fragmentShader": "sin_float_frag_xvary.frag" }, "name": "sin_float_frag_xvary.test.html", "pattern": "compare" }, { "referenceProgram": { "vertexShader": "../default/default.vert", "fragmentShader": "sin_vec2_frag_xvary_ref.frag" }, "model": null, "testProgram": { "vertexShader": "../default/default.vert", "fragmentShader": "sin_vec2_frag_xvary.frag" }, "name": "sin_vec2_frag_xvary.test.html", "pattern": "compare" }, { "referenceProgram": { "vertexShader": "../default/default.vert", "fragmentShader": "sin_vec3_frag_xvary_ref.frag" }, "model": null, "testProgram": { "vertexShader": "../default/default.vert", "fragmentShader": "sin_vec3_frag_xvary.frag" }, "name": "sin_vec3_frag_xvary.test.html", "pattern": "compare" }, { "referenceProgram": { "vertexShader": "sin_float_vert_xvary_ref.vert", "fragmentShader": "../default/default.frag" }, "model": "grid", "testProgram": { "vertexShader": "sin_float_vert_xvary.vert", "fragmentShader": "../default/default.frag" }, "name": "sin_float_vert_xvary.test.html", "pattern": "compare" }, { "referenceProgram": { "vertexShader": "sin_vec2_vert_xvary_ref.vert", "fragmentShader": "../default/default.frag" }, "model": "grid", "testProgram": { "vertexShader": "sin_vec2_vert_xvary.vert", "fragmentShader": "../default/default.frag" }, "name": "sin_vec2_vert_xvary.test.html", "pattern": "compare" }, { "referenceProgram": { "vertexShader": "sin_vec3_vert_xvary_ref.vert", "fragmentShader": "../default/default.frag" }, "model": "grid", "testProgram": { "vertexShader": "sin_vec3_vert_xvary.vert", "fragmentShader": "../default/default.frag" }, "name": "sin_vec3_vert_xvary.test.html", "pattern": "compare" } ] }); var successfullyParsed = true; </script> </html>
{ "pile_set_name": "Github" }
package org.limewire.util; /** * Thrown upon a version parsing error when the provided version format * is malformed. */ class VersionFormatException extends Exception { /** * */ private static final long serialVersionUID = 8633755784769968524L; VersionFormatException(String s) { super(s); } }
{ "pile_set_name": "Github" }
// Copyright 2016 The etcd Authors // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package main import ( "fmt" "path/filepath" "github.com/boltdb/bolt" "github.com/coreos/etcd/mvcc" "github.com/coreos/etcd/mvcc/backend" ) func snapDir(dataDir string) string { return filepath.Join(dataDir, "member", "snap") } func getBuckets(dbPath string) (buckets []string, err error) { db, derr := bolt.Open(dbPath, 0600, &bolt.Options{}) if derr != nil { return nil, derr } defer db.Close() err = db.View(func(tx *bolt.Tx) error { return tx.ForEach(func(b []byte, _ *bolt.Bucket) error { buckets = append(buckets, string(b)) return nil }) }) return } func iterateBucket(dbPath, bucket string, limit uint64) (err error) { db, derr := bolt.Open(dbPath, 0600, &bolt.Options{}) if derr != nil { return derr } defer db.Close() err = db.View(func(tx *bolt.Tx) error { b := tx.Bucket([]byte(bucket)) if b == nil { return fmt.Errorf("got nil bucket for %s", bucket) } c := b.Cursor() // iterate in reverse order (use First() and Next() for ascending order) for k, v := c.Last(); k != nil; k, v = c.Prev() { fmt.Printf("key=%q, value=%q\n", k, v) limit-- if limit == 0 { break } } return nil }) return } func getHash(dbPath string) (hash uint32, err error) { b := backend.NewDefaultBackend(dbPath) return b.Hash(mvcc.DefaultIgnores) } // TODO: revert by revision and find specified hash value // currently, it's hard because lease is in separate bucket // and does not modify revision
{ "pile_set_name": "Github" }
/* * Copyright 2006-2009, 2017, 2020 United States Government, as represented by the * Administrator of the National Aeronautics and Space Administration. * All rights reserved. * * The NASA World Wind Java (WWJ) platform is licensed under the Apache License, * Version 2.0 (the "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software distributed * under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR * CONDITIONS OF ANY KIND, either express or implied. See the License for the * specific language governing permissions and limitations under the License. * * NASA World Wind Java (WWJ) also contains the following 3rd party Open Source * software: * * Jackson Parser – Licensed under Apache 2.0 * GDAL – Licensed under MIT * JOGL – Licensed under Berkeley Software Distribution (BSD) * Gluegen – Licensed under Berkeley Software Distribution (BSD) * * A complete listing of 3rd Party software notices and licenses included in * NASA World Wind Java (WWJ) can be found in the WorldWindJava-v2.2 3rd-party * notices and licenses PDF found in code directory. */ package gov.nasa.worldwind.util.xml.xal; /** * @author tag * @version $Id: XALAddressLine.java 1171 2013-02-11 21:45:02Z dcollins $ */ public class XALAddressLine extends XALAddress { public XALAddressLine(String namespaceURI) { super(namespaceURI); } }
{ "pile_set_name": "Github" }
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # MDAnalysis --- https://www.mdanalysis.org # Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # R. J. Gowers, M. Linke, J. Barnoud, T. J. E. Reddy, M. N. Melo, S. L. Seyler, # D. L. Dotson, J. Domanski, S. Buchoux, I. M. Kenney, and O. Beckstein. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # doi: 10.25080/majora-629e541a-00e # # N. Michaud-Agrawal, E. J. Denning, T. B. Woolf, and O. Beckstein. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # # TRZ Reader written by Richard J. Gowers (2013) """TRZ trajectory I/O --- :mod:`MDAnalysis.coordinates.TRZ` ============================================================ Classes to read `IBIsCO`_ / `YASP`_ TRZ binary trajectories, including coordinates, velocities and more (see attributes of the :class:`Timestep`). Data are read and written in binary representation but because this depends on the machine hardware architecture, MDAnalysis *always* reads and writes TRZ trajectories in *little-endian* byte order. .. _IBIsCO: http://www.theo.chemie.tu-darmstadt.de/ibisco/IBISCO.html .. _YASP: http://www.theo.chemie.tu-darmstadt.de/group/services/yaspdoc/yaspdoc.html Classes ------- .. autoclass:: MDAnalysis.coordinates.TRZ.Timestep :members: .. attribute:: frame Index of current frame number (0 based) .. attribute:: time Current system time in ps .. attribute:: n_atoms Number of atoms in the frame (will be constant throughout trajectory) .. attribute:: pressure System pressure in pascals .. attribute:: pressure_tensor Array containing pressure tensors in order: xx, xy, yy, xz, yz, zz .. attribute:: total_energy Hamiltonian for the system in kJ/mol .. attribute:: potential_energy Potential energy of the system in kJ/mol .. attribute:: kinetic_energy Kinetic energy of the system in kJ/mol .. attribute:: temperature Temperature of the system in Kelvin .. autoclass:: TRZReader :members: .. autoclass:: TRZWriter :members: """ import warnings import numpy as np import os import errno from . import base from ..lib import util from ..lib.util import cached from .core import triclinic_box, triclinic_vectors class Timestep(base.Timestep): """ TRZ custom Timestep""" def _init_unitcell(self): return np.zeros(9) @property def dimensions(self): """ Unit cell dimensions ``[A,B,C,alpha,beta,gamma]``. """ x = self._unitcell[0:3] y = self._unitcell[3:6] z = self._unitcell[6:9] return triclinic_box(x, y, z) @dimensions.setter def dimensions(self, box): """Set the Timestep dimensions with MDAnalysis format cell (*A*, *B*, *C*, *alpha*, *beta*, *gamma*) .. versionadded:: 0.9.0 """ self._unitcell[:] = triclinic_vectors(box).reshape(9) class TRZReader(base.ReaderBase): """Reads an IBIsCO or YASP trajectory file Attributes ---------- ts : TRZ.Timestep :class:`~MDAnalysis.coordinates.TRZ.Timestep` object containing coordinates of current frame Note ---- Binary TRZ trajectories are *always* assumed to be written in *little-endian* byte order and are read as such. .. versionchanged:: 0.11.0 Frames now 0-based instead of 1-based. Extra data (Temperature, Energies, Pressures, etc) now read into ts.data dictionary. Now passes a weakref of self to ts (ts._reader). .. versionchanged:: 2.0.0 Now checks for the correct `n_atoms` on reading and can raise :exc:`ValueError`. """ format = "TRZ" units = {'time': 'ps', 'length': 'nm', 'velocity': 'nm/ps'} def __init__(self, trzfilename, n_atoms=None, **kwargs): """Creates a TRZ Reader Parameters ---------- trzfilename : str name of input file n_atoms : int number of atoms in trajectory, must be taken from topology file! convert_units : bool (optional) converts units to MDAnalysis defaults Raises ------ ValueError If `n_atoms` or the number of atoms in the topology file do not match the number of atoms in the trajectory. """ super(TRZReader, self).__init__(trzfilename, **kwargs) if n_atoms is None: raise ValueError('TRZReader requires the n_atoms keyword') self.trzfile = util.anyopen(self.filename, 'rb') self._cache = dict() self._n_atoms = n_atoms self._read_trz_header() self.ts = Timestep(self.n_atoms, velocities=True, forces=self.has_force, reader=self, **self._ts_kwargs) # structured dtype of a single trajectory frame readarg = str(n_atoms) + '<f4' frame_contents = [ ('p1', '<i4'), ('nframe', '<i4'), ('ntrj', '<i4'), ('natoms', '<i4'), ('treal', '<f8'), ('p2', '<2i4'), ('box', '<9f8'), ('p3', '<2i4'), ('pressure', '<f8'), ('ptensor', '<6f8'), ('p4', '<3i4'), ('etot', '<f8'), ('ptot', '<f8'), ('ek', '<f8'), ('T', '<f8'), ('p5', '<6i4'), ('rx', readarg), ('pad2', '<2i4'), ('ry', readarg), ('pad3', '<2i4'), ('rz', readarg), ('pad4', '<2i4'), ('vx', readarg), ('pad5', '<2i4'), ('vy', readarg), ('pad6', '<2i4'), ('vz', readarg)] if not self.has_force: frame_contents += [('pad7', '<i4')] else: frame_contents += [ ('pad7', '<2i4'), ('fx', readarg), ('pad8', '<2i4'), ('fy', readarg), ('pad9', '<2i4'), ('fz', readarg), ('pad10', '<i4')] self._dtype = np.dtype(frame_contents) self._read_next_timestep() def _read_trz_header(self): """Reads the header of the trz trajectory""" self._headerdtype = np.dtype([ ('p1', '<i4'), ('title', '80c'), ('p2', '<2i4'), ('force', '<i4'), ('p3', '<i4')]) data = np.fromfile(self.trzfile, dtype=self._headerdtype, count=1) self.title = ''.join(c.decode('utf-8') for c in data['title'][0]).strip() if data['force'] == 10: self.has_force = False elif data['force'] == 20: self.has_force = True else: raise IOError def _read_next_timestep(self, ts=None): if ts is None: ts = self.ts try: data = np.fromfile(self.trzfile, dtype=self._dtype, count=1) if data['natoms'][0] != self.n_atoms: raise ValueError("Supplied n_atoms {} is incompatible " "with provided trajectory file. " "Maybe `topology` is wrong?".format( self.n_atoms)) ts.frame = data['nframe'][0] - 1 # 0 based for MDA ts._frame = data['ntrj'][0] ts.time = data['treal'][0] ts._unitcell[:] = data['box'] ts.data['pressure'] = data['pressure'] ts.data['pressure_tensor'] = data['ptensor'] ts.data['total_energy'] = data['etot'] ts.data['potential_energy'] = data['ptot'] ts.data['kinetic_energy'] = data['ek'] ts.data['temperature'] = data['T'] ts._x[:] = data['rx'] ts._y[:] = data['ry'] ts._z[:] = data['rz'] ts._velocities[:, 0] = data['vx'] ts._velocities[:, 1] = data['vy'] ts._velocities[:, 2] = data['vz'] if self.has_force: ts._forces[:, 0] = data['fx'] ts._forces[:, 1] = data['fy'] ts._forces[:, 2] = data['fz'] except IndexError: # Raises indexerror if data has no data (EOF) raise IOError from None else: # Convert things read into MDAnalysis' native formats (nm -> angstroms) if self.convert_units: self.convert_pos_from_native(self.ts._pos) self.convert_pos_from_native(self.ts._unitcell) self.convert_velocities_from_native(self.ts._velocities) return ts @property def n_atoms(self): """Number of atoms in a frame""" return self._n_atoms @property @cached('n_frames') def n_frames(self): """Total number of frames in a trajectory""" try: return self._read_trz_n_frames(self.trzfile) except IOError: return 0 def _read_trz_n_frames(self, trzfile): """Uses size of file and dtype information to determine how many frames exist .. versionchanged:: 0.9.0 Now is based on filesize rather than reading entire file """ fsize = os.fstat(trzfile.fileno()).st_size # size of file in bytes if not (fsize - self._headerdtype.itemsize) % self._dtype.itemsize == 0: raise IOError("Trajectory has incomplete frames") # check that division is sane nframes = int((fsize - self._headerdtype.itemsize) / self._dtype.itemsize) # returns long int otherwise return nframes def _get_dt(self): """The amount of time between frames in ps Assumes that this step is constant (ie. 2 trajectories with different steps haven't been stitched together) Returns 0 in case of IOError """ curr_frame = self.ts.frame try: t0 = self.ts.time self.next() t1 = self.ts.time dt = t1 - t0 except StopIteration: return 0 else: return dt finally: self._read_frame(curr_frame) @property @cached('delta') def delta(self): """MD integration timestep""" return self.dt / self.skip_timestep @property @cached('skip_timestep') def skip_timestep(self): """Timesteps between trajectory frames""" curr_frame = self.ts.frame try: t0 = self.ts._frame self.next() t1 = self.ts._frame skip_timestep = t1 - t0 except StopIteration: return 0 else: return skip_timestep finally: self._read_frame(curr_frame) def _read_frame(self, frame): """Move to *frame* and fill timestep with data. .. versionchanged:: 0.11.0 Frames now 0-based instead of 1-based """ move = frame - self.ts.frame self._seek(move - 1) self._read_next_timestep() return self.ts def _seek(self, nframes): """Move *nframes* in the trajectory Note that this doens't read the trajectory (ts remains unchanged) .. versionadded:: 0.9.0 """ framesize = self._dtype.itemsize seeksize = framesize * nframes maxi_l = seeksize + 1 if seeksize > maxi_l: # Workaround for seek not liking long ints # On python 3 this branch will never be used as we defined maxi_l # greater than seeksize. framesize = long(framesize) seeksize = framesize * nframes nreps = int(seeksize / maxi_l) # number of max seeks we'll have to do rem = int(seeksize % maxi_l) # amount leftover to do once max seeks done for _ in range(nreps): self.trzfile.seek(maxint, 1) self.trzfile.seek(rem, 1) else: seeksize = int(seeksize) self.trzfile.seek(seeksize, 1) def _reopen(self): self.close() self.open_trajectory() self._read_trz_header() # Moves to start of first frame def open_trajectory(self): """Open the trajectory file""" if self.trzfile is not None: raise IOError(errno.EALREADY, 'TRZ file already opened', self.filename) if not os.path.exists(self.filename): raise IOError(errno.ENOENT, 'TRZ file not found', self.filename) self.trzfile = util.anyopen(self.filename, 'rb') #Reset ts ts = self.ts ts.frame = -1 return self.trzfile def Writer(self, filename, n_atoms=None): if n_atoms is None: # guess that they want to write the whole timestep unless told otherwise? n_atoms = self.ts.n_atoms return TRZWriter(filename, n_atoms) def close(self): """Close trz file if it was open""" if self.trzfile is not None: self.trzfile.close() self.trzfile = None class TRZWriter(base.WriterBase): """Writes a TRZ format trajectory. Note ---- Binary TRZ trajectories are *always* written in *little-endian* byte order. """ format = 'TRZ' multiframe = True units = {'time': 'ps', 'length': 'nm', 'velocity': 'nm/ps'} def __init__(self, filename, n_atoms, title='TRZ', convert_units=True): """Create a TRZWriter Parameters ---------- filename : str name of output file n_atoms : int number of atoms in trajectory title : str (optional) title of the trajectory; the title must be 80 characters or shorter, a longer title raises a ValueError exception. convert_units : bool (optional) units are converted to the MDAnalysis base format; [``True``] """ self.filename = filename if n_atoms is None: raise ValueError("TRZWriter requires the n_atoms keyword") if n_atoms == 0: raise ValueError("TRZWriter: no atoms in output trajectory") self.n_atoms = n_atoms if len(title) > 80: raise ValueError("TRZWriter: 'title' must be 80 characters of shorter") self.convert_units = convert_units self.trzfile = util.anyopen(self.filename, 'wb') self._writeheader(title) floatsize = str(n_atoms) + '<f4' self.frameDtype = np.dtype([ ('p1a', '<i4'), ('nframe', '<i4'), ('ntrj', '<i4'), ('natoms', '<i4'), ('treal', '<f8'), ('p1b', '<i4'), ('p2a', '<i4'), ('box', '<9f8'), ('p2b', '<i4'), ('p3a', '<i4'), ('pressure', '<f8'), ('ptensor', '<6f8'), ('p3b', '<i4'), ('p4a', '<i4'), ('six', '<i4'), ('etot', '<f8'), ('ptot', '<f8'), ('ek', '<f8'), ('T', '<f8'), ('blanks', '<2f8'), ('p4b', '<i4'), ('p5a', '<i4'), ('rx', floatsize), ('p5b', '<i4'), ('p6a', '<i4'), ('ry', floatsize), ('p6b', '<i4'), ('p7a', '<i4'), ('rz', floatsize), ('p7b', '<i4'), ('p8a', '<i4'), ('vx', floatsize), ('p8b', '<i4'), ('p9a', '<i4'), ('vy', floatsize), ('p9b', '<i4'), ('p10a', '<i4'), ('vz', floatsize), ('p10b', '<i4')]) def _writeheader(self, title): hdt = np.dtype([ ('pad1', '<i4'), ('title', '80c'), ('pad2', '<i4'), ('pad3', '<i4'), ('nrec', '<i4'), ('pad4', '<i4')]) out = np.zeros((), dtype=hdt) out['pad1'], out['pad2'] = 80, 80 out['title'] = title + ' ' * (80 - len(title)) out['pad3'], out['pad4'] = 4, 4 out['nrec'] = 10 out.tofile(self.trzfile) def _write_next_frame(self, obj): """Write information associated with ``obj`` at current frame into trajectory Parameters ---------- ag : AtomGroup or Universe .. versionchanged:: 1.0.0 Renamed from `write_next_timestep` to `_write_next_frame`. .. versionchanged:: 2.0.0 Deprecated support for Timestep argument has now been removed. Use AtomGroup or Universe as an input instead. """ # Check size of ts is same as initial try: # atomgroup? ts = obj.ts except AttributeError: # universe? try: ts = obj.trajectory.ts except AttributeError: errmsg = "Input obj is neither an AtomGroup or Universe" raise TypeError(errmsg) from None if not obj.atoms.n_atoms == self.n_atoms: errmsg = "Number of atoms in ts different to initialisation" raise ValueError(errmsg) # Gather data, faking it when unavailable data = {} faked_attrs = [] for att in ['pressure', 'pressure_tensor', 'total_energy', 'potential_energy', 'kinetic_energy', 'temperature']: try: data[att] = ts.data[att] except KeyError: if att == 'pressure_tensor': data[att] = np.zeros(6, dtype=np.float64) else: data[att] = 0.0 faked_attrs.append(att) try: data['step'] = ts._frame except AttributeError: data['step'] = ts.frame faked_attrs.append('step') try: data['time'] = ts.time except AttributeError: data['time'] = float(ts.frame) faked_attrs.append('time') if faked_attrs: warnings.warn("Timestep didn't have the following attributes: '{0}', " "these will be set to 0 in the output trajectory" "".format(", ".join(faked_attrs))) # Convert other stuff into our format unitcell = triclinic_vectors(ts.dimensions).reshape(9) try: vels = ts._velocities except AttributeError: vels = np.zeros((self.n_atoms, 3), dtype=np.float32, order='F') warnings.warn("Timestep didn't have velocity information, " "this will be set to zero in output trajectory. ") out = np.zeros((), dtype=self.frameDtype) out['p1a'], out['p1b'] = 20, 20 out['nframe'] = ts.frame + 1 # TRZ wants 1 based out['ntrj'] = data['step'] out['natoms'] = self.n_atoms out['treal'] = data['time'] out['p2a'], out['p2b'] = 72, 72 out['box'] = self.convert_pos_to_native(unitcell, inplace=False) out['p3a'], out['p3b'] = 56, 56 out['pressure'] = data['pressure'] out['ptensor'] = data['pressure_tensor'] out['p4a'], out['p4b'] = 60, 60 out['six'] = 6 out['etot'] = data['total_energy'] out['ptot'] = data['potential_energy'] out['ek'] = data['kinetic_energy'] out['T'] = data['temperature'] out['blanks'] = 0.0, 0.0 size = ts.n_atoms * 4 # size of float for vels & coords out['p5a'], out['p5b'] = size, size out['rx'] = self.convert_pos_to_native(ts._x, inplace=False) out['p6a'], out['p6b'] = size, size out['ry'] = self.convert_pos_to_native(ts._y, inplace=False) out['p7a'], out['p7b'] = size, size out['rz'] = self.convert_pos_to_native(ts._z, inplace=False) out['p8a'], out['p8b'] = size, size out['vx'] = self.convert_velocities_to_native(vels[:, 0], inplace=False) out['p9a'], out['p9b'] = size, size out['vy'] = self.convert_velocities_to_native(vels[:, 1], inplace=False) out['p10a'], out['p10b'] = size, size out['vz'] = self.convert_velocities_to_native(vels[:, 2], inplace=False) out.tofile(self.trzfile) def close(self): """Close if it was open""" if self.trzfile is None: return self.trzfile.close() self.trzfile = None
{ "pile_set_name": "Github" }
// Protocol Buffers - Google's data interchange format // Copyright 2012 Google Inc. All rights reserved. // https://developers.google.com/protocol-buffers/ // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are // met: // // * Redistributions of source code must retain the above copyright // notice, this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above // copyright notice, this list of conditions and the following disclaimer // in the documentation and/or other materials provided with the // distribution. // * Neither the name of Google Inc. nor the names of its // contributors may be used to endorse or promote products derived from // this software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. // This file is an internal atomic implementation, use atomicops.h instead. #ifndef GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ #define GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_ #define ATOMICOPS_COMPILER_BARRIER() __asm__ __volatile__("" : : : "memory") namespace google { namespace protobuf { namespace internal { // Atomically execute: // result = *ptr; // if (*ptr == old_value) // *ptr = new_value; // return result; // // I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value". // Always return the old value of "*ptr" // // This routine implies no memory barriers. inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { Atomic32 prev, tmp; __asm__ __volatile__(".set push\n" ".set noreorder\n" "1:\n" "ll %0, %5\n" // prev = *ptr "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 "move %2, %4\n" // tmp = new_value "sc %2, %1\n" // *ptr = tmp (with atomic check) "beqz %2, 1b\n" // start again on atomic error "nop\n" // delay slot nop "2:\n" ".set pop\n" : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) : "Ir" (old_value), "r" (new_value), "m" (*ptr) : "memory"); return prev; } // Atomically store new_value into *ptr, returning the previous value held in // *ptr. This routine implies no memory barriers. inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr, Atomic32 new_value) { Atomic32 temp, old; __asm__ __volatile__(".set push\n" ".set noreorder\n" "1:\n" "ll %1, %4\n" // old = *ptr "move %0, %3\n" // temp = new_value "sc %0, %2\n" // *ptr = temp (with atomic check) "beqz %0, 1b\n" // start again on atomic error "nop\n" // delay slot nop ".set pop\n" : "=&r" (temp), "=&r" (old), "=m" (*ptr) : "r" (new_value), "m" (*ptr) : "memory"); return old; } // Atomically increment *ptr by "increment". Returns the new value of // *ptr with the increment applied. This routine implies no memory barriers. inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) { Atomic32 temp, temp2; __asm__ __volatile__(".set push\n" ".set noreorder\n" "1:\n" "ll %0, %4\n" // temp = *ptr "addu %1, %0, %3\n" // temp2 = temp + increment "sc %1, %2\n" // *ptr = temp2 (with atomic check) "beqz %1, 1b\n" // start again on atomic error "addu %1, %0, %3\n" // temp2 = temp + increment ".set pop\n" : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) : "Ir" (increment), "m" (*ptr) : "memory"); // temp2 now holds the final value. return temp2; } inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr, Atomic32 increment) { ATOMICOPS_COMPILER_BARRIER(); Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment); ATOMICOPS_COMPILER_BARRIER(); return res; } // "Acquire" operations // ensure that no later memory access can be reordered ahead of the operation. // "Release" operations ensure that no previous memory access can be reordered // after the operation. "Barrier" operations have both "Acquire" and "Release" // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory // access. inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { ATOMICOPS_COMPILER_BARRIER(); Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); ATOMICOPS_COMPILER_BARRIER(); return res; } inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr, Atomic32 old_value, Atomic32 new_value) { ATOMICOPS_COMPILER_BARRIER(); Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); ATOMICOPS_COMPILER_BARRIER(); return res; } inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; } inline void MemoryBarrier() { __asm__ __volatile__("sync" : : : "memory"); } inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic32* ptr, Atomic32 value) { MemoryBarrier(); *ptr = value; } inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr) { return *ptr; } inline Atomic32 Acquire_Load(volatile const Atomic32* ptr) { Atomic32 value = *ptr; MemoryBarrier(); return value; } inline Atomic32 Release_Load(volatile const Atomic32* ptr) { MemoryBarrier(); return *ptr; } #if defined(__LP64__) // 64-bit versions of the atomic ops. inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 prev, tmp; __asm__ __volatile__(".set push\n" ".set noreorder\n" "1:\n" "lld %0, %5\n" // prev = *ptr "bne %0, %3, 2f\n" // if (prev != old_value) goto 2 "move %2, %4\n" // tmp = new_value "scd %2, %1\n" // *ptr = tmp (with atomic check) "beqz %2, 1b\n" // start again on atomic error "nop\n" // delay slot nop "2:\n" ".set pop\n" : "=&r" (prev), "=m" (*ptr), "=&r" (tmp) : "Ir" (old_value), "r" (new_value), "m" (*ptr) : "memory"); return prev; } // Atomically store new_value into *ptr, returning the previous value held in // *ptr. This routine implies no memory barriers. inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr, Atomic64 new_value) { Atomic64 temp, old; __asm__ __volatile__(".set push\n" ".set noreorder\n" "1:\n" "lld %1, %4\n" // old = *ptr "move %0, %3\n" // temp = new_value "scd %0, %2\n" // *ptr = temp (with atomic check) "beqz %0, 1b\n" // start again on atomic error "nop\n" // delay slot nop ".set pop\n" : "=&r" (temp), "=&r" (old), "=m" (*ptr) : "r" (new_value), "m" (*ptr) : "memory"); return old; } // Atomically increment *ptr by "increment". Returns the new value of // *ptr with the increment applied. This routine implies no memory barriers. inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) { Atomic64 temp, temp2; __asm__ __volatile__(".set push\n" ".set noreorder\n" "1:\n" "lld %0, %4\n" // temp = *ptr "daddu %1, %0, %3\n" // temp2 = temp + increment "scd %1, %2\n" // *ptr = temp2 (with atomic check) "beqz %1, 1b\n" // start again on atomic error "daddu %1, %0, %3\n" // temp2 = temp + increment ".set pop\n" : "=&r" (temp), "=&r" (temp2), "=m" (*ptr) : "Ir" (increment), "m" (*ptr) : "memory"); // temp2 now holds the final value. return temp2; } inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr, Atomic64 increment) { MemoryBarrier(); Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment); MemoryBarrier(); return res; } // "Acquire" operations // ensure that no later memory access can be reordered ahead of the operation. // "Release" operations ensure that no previous memory access can be reordered // after the operation. "Barrier" operations have both "Acquire" and "Release" // semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory // access. inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value); MemoryBarrier(); return res; } inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr, Atomic64 old_value, Atomic64 new_value) { MemoryBarrier(); return NoBarrier_CompareAndSwap(ptr, old_value, new_value); } inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; } inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value) { *ptr = value; MemoryBarrier(); } inline void Release_Store(volatile Atomic64* ptr, Atomic64 value) { MemoryBarrier(); *ptr = value; } inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr) { return *ptr; } inline Atomic64 Acquire_Load(volatile const Atomic64* ptr) { Atomic64 value = *ptr; MemoryBarrier(); return value; } inline Atomic64 Release_Load(volatile const Atomic64* ptr) { MemoryBarrier(); return *ptr; } #endif } // namespace internal } // namespace protobuf } // namespace google #undef ATOMICOPS_COMPILER_BARRIER #endif // GOOGLE_PROTOBUF_ATOMICOPS_INTERNALS_MIPS_GCC_H_
{ "pile_set_name": "Github" }
return function() local Config = require(script.Parent.Config) it("should accept valid configuration", function() local config = Config.new() local values = config.get() expect(values.elementTracing).to.equal(false) config.set({ elementTracing = true, }) expect(values.elementTracing).to.equal(true) end) it("should reject invalid configuration keys", function() local config = Config.new() local badKey = "garblegoop" local ok, err = pcall(function() config.set({ [badKey] = true, }) end) expect(ok).to.equal(false) -- The error should mention our bad key somewhere. expect(err:find(badKey)).to.be.ok() end) it("should reject invalid configuration values", function() local config = Config.new() local goodKey = "elementTracing" local badValue = "Hello there!" local ok, err = pcall(function() config.set({ [goodKey] = badValue, }) end) expect(ok).to.equal(false) -- The error should mention both our key and value expect(err:find(goodKey)).to.be.ok() expect(err:find(badValue)).to.be.ok() end) end
{ "pile_set_name": "Github" }
// Code generated by gengoos.go using 'go generate'. DO NOT EDIT. // +build openbsd package sys const GOOS = `openbsd` const GoosAndroid = 0 const GoosDarwin = 0 const GoosDragonfly = 0 const GoosFreebsd = 0 const GoosLinux = 0 const GoosNacl = 0 const GoosNetbsd = 0 const GoosOpenbsd = 1 const GoosPlan9 = 0 const GoosSolaris = 0 const GoosWindows = 0 const GoosZos = 0
{ "pile_set_name": "Github" }
--- title: Continuous delivery with Flutter title: Flutter 里的持续部署 description: How to automate continuous building and releasing of your Flutter app. description: 如何自动的持续编译构建和发布你的 Flutter 应用。 tags: CI/CD,持续部署,发布应用 keywords: fastlane --- Follow continuous delivery best practices with Flutter to make sure your application is delivered to your beta testers and validated on a frequent basis without resorting to manual workflows. 通过 Flutter 持续交付的最佳实践, 确保您的应用程序交付给您的 Beta 版本测试人员并能够频繁予以验证,而无需借助手动工作流程。 ## fastlane This guide shows how to integrate [fastlane][], an open-source tool suite, with your existing testing and continuous integration (CI) workflows (for example, Travis or Cirrus). 本指南介绍了如何将开源工具套件 fastlane 与您现有的测试和持续集成(CI)工作流程(比如 Travis 或 Cirrus)整合在一起。 ### Local setup ### 本地设置 It's recommended that you test the build and deployment process locally before migrating to a cloud-based system. You could also choose to perform continuous delivery from a local machine. 建议在迁移到基于云计算的系统之前,先在本地测试其构建和部署流程。 您还可以使用本地机器执行连续交付。 1. Install fastlane `gem install fastlane` or `brew install fastlane`. Visit the [fastlane docs][fastlane] for more info. 安装 fastlane `gem install fastlane` 或 `brew install fastlane`。 访问 [fastlane docs][fastlane] 以获得更多信息。 1. Create your Flutter project, and when ready, make sure that your project builds via 创建您的 Flutter 项目,准备就绪后,确保通过如下途径构建项目: * ![Android](/images/cd/android.png) `flutter build appbundle`; 以及 * ![iOS](/images/cd/ios.png) `flutter build ios --release --no-codesign`. 1. Initialize the fastlane projects for each platform. 初始化各平台的 fastlane 项目: * ![Android](/images/cd/android.png) In your `[project]/android` directory, run `fastlane init`. ![Android](/images/cd/android.png):在 `[project]/android` 目录中, 运行 `fastlane init` 命令。 * ![iOS](/images/cd/ios.png) In your `[project]/ios` directory, run `fastlane init`. ![iOS](/images/cd/ios.png): 在 `[project]/ios` 目录下,运行 `fastlane init` 命令。 1. Edit the `Appfile`s to ensure they have adequate metadata for your app. 编辑 `Appfile` 以确保它有应用程序的基本数据配置: * ![Android](/images/cd/android.png) Check that `package_name` in `[project]/android/fastlane/Appfile` matches your package name in AndroidManifest.xml. ![Android](/images/cd/android.png) 检查在 `[project]/android/fastlane/Appfile` 文件中的 `package_name` 是否匹配在 AndroidManifest.xml 中的包名。 * ![iOS](/images/cd/ios.png) Check that `app_identifier` in `[project]/ios/fastlane/Appfile` also matches Info.plist's bundle identifier. Fill in `apple_id`, `itc_team_id`, `team_id` with your respective account info. ![iOS](/images/cd/ios.png) 检查在 `[project]/ios/fastlane/Appfile` 中的 `app_identifier` 是否匹配 Info.plist 文件中的 bundle identifier. 1. Set up your local login credentials for the stores. 设置应用商店的本地登录凭据。 * ![Android](/images/cd/android.png) Follow the [Supply setup steps][] and ensure that `fastlane supply init` successfully syncs data from your Play Store console. _Treat the .json file like your password and do not check it into any public source control repositories._ ![Android](/images/cd/android.png) 按照 [Supply setup steps][] 文档操作, 并且确保 `fastlane supply init` 成功同步了 你在 Google Play 商店控制台中的数据。 **.json 文件与密码一样重要,切勿将其公开在任何公共源代码控制存储库。** * ![iOS](/images/cd/ios.png) Your iTunes Connect username is already in your `Appfile`'s `apple_id` field. Set the `FASTLANE_PASSWORD` shell environment variable with your iTunes Connect password. Otherwise, you'll be prompted when uploading to iTunes/TestFlight. ![iOS](/images/cd/ios.png) iTunes Connect 用户名已经存在于您的 `Appfile` 的 `apple_id` 字段中, 你需要将你的 iTunes 密码设置到 `FASTLANE_PASSWORD` 这个环境变量里。 否则,上传到 iTunes/TestFlight时会提示你。 1. Set up code signing. 设置代码签名。 * ![Android](/images/cd/android.png) On Android, there are two signing keys: deployment and upload. The end-users download the .apk signed with the 'deployment key'. An 'upload key' is used to authenticate the .aab / .apk uploaded by developers onto the Play Store and is re-signed with the deployment key once in the Play Store. ![Android](/images/cd/android.png) 在 Android 上有两种签名 key: 发布签名和上传签名。最终用户下载的 .aab / .apk 文件使用发布签名。 上传签名提供给开发者上传到 Google Play 商店的认证。 上传后,Google Play 商店会重新使用 发布签名对 .apk 文件签名。 * It's highly recommended to use the automatic cloud managed signing for the deployment key. For more information, see the [official Play Store documentation][]. 强烈建议使用自动化的云端管理发布签名。更多信息请查看 [Google Play 官方说明][official Play Store documentation。 * Follow the [key generation steps]({{site.android-dev}}/studio/publish/app-signing#sign-apk) to create your upload key. 按照 [密钥生成步骤]({{site.android-dev}}/studio/publish/app-signing#sign-apk) 创建你自己的上传签名。 * Configure gradle to use your upload key when building your app in release mode by editing `android.buildTypes.release` in `[project]/android/app/build.gradle`. 配置 gradle:通过设置 `[project]/android/app/build.gradle` 文件下的 `android.buildTypes.release`,当编译的时候在 release 模式下使用你自己的上传签名。 * ![iOS](/images/cd/ios.png) On iOS, create and sign using a distribution certificate instead of a development certificate when you're ready to test and deploy using TestFlight or App Store. ![iOS](/images/cd/ios.png) 在iOS上, 当您准备使用 TestFlight 或 App Store 进行测试和部署时, 使用分发证书而不是开发证书进行创建和签名。 * Create and download a distribution certificate in your [Apple Developer Account console][]. 在 [Apple Developer Account console][] 创建并下载一个分发证书。 * `open [project]/ios/Runner.xcworkspace/` and select the distribution certificate in your target's settings pane. 打开 `[project]/ios/Runner.xcworkspace/` 在你的项目设置里选择一个分发证书。 1. Create a `Fastfile` script for each platform. 给每个不同的平台创建一个 `Fastfile` 脚本。 * ![Android](/images/cd/android.png) On Android, follow the [fastlane Android beta deployment guide][]. Your edit could be as simple as adding a `lane` that calls `upload_to_play_store`. Set the `aab` argument to `../build/app/outputs/bundle/release/app-release.aab` to use the app bundle `flutter build` already built. ![Android](/images/cd/android.png) 在 Android 上按照 [fastlane Android beta deployment guide][] 指引操作。 你可以简单的编辑一下文件,加一个名叫 `upload_to_play_store` 的 `lane`。 为了使用 `flutter build` 命令编译 `aab`, 要把 `apk` 参数设置为 `../build/app/outputs/bundle/release/app-release.aab`。 * ![iOS](/images/cd/ios.png) On iOS, follow the [fastlane iOS beta deployment guide][]. Your edit could be as simple as adding a `lane` that calls `build_ios_app` with `export_method: 'app-store'` and `upload_to_testflight`. On iOS an extra build is required since `flutter build` builds an .app rather than archiving .ipas for release. ![iOS](/images/cd/ios.png) 在 iOS 上,按照 [fastlane iOS beta 部署指南][fastlane iOS beta deployment guide] 指引操作。 你可以简单编辑一下文件,加一个名叫 `build_ios_app` 的 `lane`,并且同时调用 `export_method: 'app-store'` 和 `upload_to_testflight`。 在 iOS 上只有当要编译成 .app 的时候才会用到 `flutter build`,其他情况用不到。 You're now ready to perform deployments locally or migrate the deployment process to a continuous integration (CI) system. 你现在已准备好在本地执行部署或将部署过程迁移到持续集成(CI)系统。 ### Running deployment locally ## 在本地运行部署 1. Build the release mode app. 构建发布模式的应用: * ![Android](/images/cd/android.png) `flutter build appbundle`. * ![iOS](/images/cd/ios.png) `flutter build ios --release --no-codesign`. No need to sign now since fastlane will sign when archiving. 这个时候不用签名,接下来 fastlane 会自动签名。 1. Run the Fastfile script on each platform. 在每个平台上运行 Fastfile 脚本。 * ![Android](/images/cd/android.png) `cd android` then `fastlane [name of the lane you created]`. * ![iOS](/images/cd/ios.png) `cd ios` then `fastlane [name of the lane you created]`. ### Cloud build and deploy setup ## 云构建和部署设置 First, follow the local setup section described in 'Local setup' to make sure the process works before migrating onto a cloud system like Travis. 首先,按照“本地设置”中描述的本地设置部分,确保在迁移到 Travis 等云系统之前,该过程有效。 The main thing to consider is that since cloud instances are ephemeral and untrusted, you won't be leaving your credentials like your Play Store service account JSON or your iTunes distribution certificate on the server. 需要考虑的主要事项是,由于云实例是短暂且不可信的,因此你不能在服务器上保留你的凭据,如 Play Store 服务帐户 JSON 或 iTunes 分发证书。 Continuous Integration (CI) systems, such as [Cirrus][] generally support encrypted environment variables to store private data. 诸如 [Cirrus][] 之类的持续集成(CI)系统通常支持加密的环境变量来存储私有数据。 **Take precaution not to re-echo those variable values back onto the console in your test scripts**. Those variables are also not available in pull requests until they're merged to ensure that malicious actors cannot create a pull request that prints these secrets out. Be careful with interactions with these secrets in pull requests that you accept and merge. **采取预防措施,不要在测试脚本中将这些变量值重新回显到控制台。** 在合并之前,这些变量在拉取请求中也不可用,以确保恶意行为者无法创建打印这些密钥的拉取请求。 在接受和合并的 pull 请求中,请注意与这些密钥。 1. Make login credentials ephemeral. 暂时性登录凭据。 * ![Android](/images/cd/android.png) On Android: ![Android](/images/cd/android.png) 在 Android 上: * Remove the `json_key_file` field from `Appfile` and store the string content of the JSON in your CI system's encrypted variable. Use the `json_key_data` argument in `upload_to_play_store` to read the environment variable directly in your `Fastfile`. 从 `Appfile` 里删除 `json_key_file` 并且在你的 CI 系统里的环境变量。 在 `upload_to_play_store` 里使用 `json_key_data` 参数从环境变量读取到`Fastfile`。 * Serialize your upload key (for example, using base64) and save it as an encrypted environment variable. You can deserialize it on your CI system during the install phase with 序列化您的上传密钥(例如,使用base64)并将其另存为加密环境变量。 可以可以在安装阶段在 CI 系统上对其进行反序列化 ```bash echo "$PLAY_STORE_UPLOAD_KEY" | base64 --decode > /home/cirrus/[directory # 在 gradle 中定义的文件夹和文件名].keystore ``` * ![iOS](/images/cd/ios.png) On iOS: ![iOS](/images/cd/ios.png) 在 iOS 上: * Move the local environment variable `FASTLANE_PASSWORD` to use encrypted environment variables on the CI system. 将本地环境变量 `FASTLANE_PASSWORD` 转而使用 CI 系统的加密的环境变量。 * The CI system needs access to your distribution certificate. fastlane's [Match][] system is recommended to synchronize your certificates across machines. CI 系统需要有权限拿到你的分发证书。建议使用fastlane 的 [Match][] 系统在不同的机器上同步你的证书。 2. It's recommended to use a Gemfile instead of using an indeterministic `gem install fastlane` on the CI system each time to ensure the fastlane dependencies are stable and reproducible between local and cloud machines. However, this step is optional. 建议每次使用 Gemfile 而不是 `gem install fastlane` 以避免其在 CI 系统上使用的不确定性 ,以确保 fastlane 依赖关系在本地和云计算机之间稳定且可重现。但是,此步骤是可选的。 * In both your `[project]/android` and `[project]/ios` folders, create a `Gemfile` containing the following content: 在 `[project]/android` 和 `[project]/ios` 文件夹中,创建一个 `Gemfile` 包含以下内容: ``` source "https://rubygems.org" gem "fastlane" ``` * In both directories, run `bundle update` and check both `Gemfile` and `Gemfile.lock` into source control. 在两个目录中,运行 `bundle update` 并将两者的 `Gemfile` 和 `Gemfile.lock` 文件纳入源代码管理。 * When running locally, use `bundle exec fastlane` instead of `fastlane`. 当你在本地运行的时候,请使用 `bundle exec fastlane` 而不是 `fastlane`。 3. Create the CI test script such as `.travis.yml` or `.cirrus.yml` in your repository root. 在你的仓库根目录创建一个 CI 测试脚本,例如: `.travis.yml` 或 `.cirrus.yml`。 * Shard your script to run on both Linux and macOS platforms. 分开你的脚本以便能在 Linux 和 macOS 两个平台运行。 * Remember to specify a dependency on Xcode for macOS (for example `osx_image: xcode9.2`). 请记住为 macOS 指定具体版本的 Xcode(例如 osx_image: xcode9.2)。 * See [fastlane CI documentation][] for CI specific setup. 有关特定于 CI 的设置,请参见 [fastlane CI 文档][]。 * During the setup phase, depending on the platform, make sure that: 在设置阶段,根据平台,确保以下几点: * Bundler is available using `gem install bundler`. Bundler 可以使用 `gem install bundler`。 * For Android, make sure the Android SDK is available and the `ANDROID_SDK_ROOT` path is set. 对于 Android 平台,请确保已经设置正确的 `ANDROID_SDK_ROOT` 环境变量。 * Run `bundle install` in `[project]/android` or `[project]/ios`. 在 `[project]/android` 或 `[project]/ios` 目录下分别运行 `bundle install`命令。 * Make sure the Flutter SDK is available and set in `PATH`. 确保 Flutter SDK 已经正确了设置在了 `PATH` 环境变量中 * In the script phase of the CI task: 在 CI 任务的脚本阶段: * Run `flutter build appbundle` or `flutter build ios --release --no-codesign`, depending on the platform. 根据平台的不同可以运行 `flutter build appbundle` 或者 `flutter build ios --release --no-codesign`。 * `cd android` or `cd ios`. 然后执行 `cd android` 或 `cd ios` 命令。 * `bundle exec fastlane [name of the lane]`. 最后执行 `bundle exec fastlane [name of the lane]` 命令。 ### Reference ### 参考 The [Flutter Gallery Project][] uses fastlane for continuous deployment. See the source for a working example of fastlane in action. Also see the Flutter framework repository's [Cirrus script][]. [Flutter repo 里的示例应用 Flutter Gallery][Flutter Gallery Project] 使用 fastlane 连续部署。有关 fastlane 实际运行示例,请参阅源代码。 另请参阅 Flutter 框架仓库库的 [Cirrus 脚本][Cirrus script]。 [fastlane CI documentation]: https://docs.fastlane.tools/best-practices/continuous-integration [fastlane CI 文档]: https://docs.fastlane.tools/best-practices/continuous-integration ## Other services ## 其他服务 The following are some other options available to help automate the delivery of your application. 其他关于如何进行持续交付的服务: * [Codemagic CI/CD for Flutter][] [使用 Codemagic 为 Flutter 应用加入持续交付][Codemagic CI/CD for Flutter] * [Flutter CI/CD with Bitrise][] [使用 Bitrise 为 Flutter 应用加入持续交付][Flutter CI/CD with Bitrise] * [Appcircle CI/CD for Flutter][] [使用 Appcircle 为 Flutter 应用加入持续交付][Appcircle CI/CD for Flutter] * [GitHub Actions- CI/CD on GitHub][] Get an [Example Project][] [使用 GitHub Actions 进行持续交付][Github Actions- CI/CD on Github], 查看这个 [样例项目][Example Project] [Apple Developer Account console]: https://developer.apple.com/account/ios/certificate/ [Cirrus]: https://cirrus-ci.org/guide/writing-tasks/#encrypted-variables [Cirrus script]: {{site.github}}/flutter/flutter/blob/master/.cirrus.yml [Codemagic CI/CD for Flutter]: https://blog.codemagic.io/getting-started-with-codemagic/ [Appcircle CI/CD for Flutter]: https://appcircle.io/blog/guide-to-automated-mobile-ci-cd-for-flutter-projects-with-appcircle/ [Example Project]: {{site.github}}/nabilnalakath/flutter-githubaction [fastlane]: https://docs.fastlane.tools [fastlane Android beta deployment guide]: https://docs.fastlane.tools/getting-started/android/beta-deployment/ [fastlane CI documentation]: https://docs.fastlane.tools/best-practices/continuous-integration [fastlane iOS beta deployment guide]: https://docs.fastlane.tools/getting-started/ios/beta-deployment/ [Flutter CI/CD with Bitrise]: https://devcenter.bitrise.io/getting-started/getting-started-with-flutter-apps/ [Flutter Gallery Project]: {{site.github}}/flutter/gallery [GitHub Actions- CI/CD on GitHub]: https://github.com/features/actions [GitLab Continuous Integration (GitLab CI/CD)]: https://docs.gitlab.com/ee/ci/README.html#doc-nav [Match]: https://docs.fastlane.tools/actions/match/ [official Play Store documentation]: https://support.google.com/googleplay/android-developer/answer/7384423?hl=en [Supply setup steps]: https://docs.fastlane.tools/getting-started/android/setup/#setting-up-supply
{ "pile_set_name": "Github" }
/* * @file * * @date Nov 29, 2012 * @author: Anton Bondarev */ #include <sys/types.h> #include <sys/stat.h> #include <fs/kfsop.h> int kcreat(struct path *node, const char *pathname, mode_t mode) { return -1; } int kmkdir(const char *pathname, mode_t mode) { return -1; } int kremove(const char *pathname) { return -1; } int kunlink(const char *pathname) { return -1; } int krmdir(const char *pathname) { return -1; } int klstat(const char *path, struct stat *buf) { return -1; } int kmount(const char *source, const char *dest, const char *fs_type) { return -1; } int kformat(const char *pathname, const char *fs_type) { return -1; } int kumount(const char *dir) { return -1; } int kutime(const char *path,const struct utimbuf *times) { return -1; }
{ "pile_set_name": "Github" }
# Copyright (c) Microsoft Corporation. # Licensed under the MIT license. import math import torch import torch.nn as nn import torch.nn.functional as F from torch.nn.parameter import Parameter class BahdanauAttention(nn.Module): """ Bahdanau Attention (https://arxiv.org/abs/1409.0473) Implementation is very similar to tf.contrib.seq2seq.BahdanauAttention """ def __init__(self, query_size, key_size, num_units, normalize=True, dropout=0, batch_first=False): """ Constructor for the BahdanauAttention. :param query_size: feature dimension for query :param key_size: feature dimension for keys :param num_units: internal feature dimension :param normalize: whether to normalize energy term :param dropout: probability of the dropout (between softmax and bmm) :param batch_first: if True batch size is the 1st dimension, if False the sequence is first and batch size is second """ super(BahdanauAttention, self).__init__() self.normalize = normalize self.batch_first = batch_first self.num_units = num_units self.linear_q = nn.Linear(query_size, num_units, bias=False) self.linear_k = nn.Linear(key_size, num_units, bias=False) self.linear_att = Parameter(torch.Tensor(num_units)) self.dropout = nn.Dropout(dropout) # Register mask as a buffer to facilitate correct stashing when pipelining. self.register_buffer('mask', None) if self.normalize: self.normalize_scalar = Parameter(torch.Tensor(1)) self.normalize_bias = Parameter(torch.Tensor(num_units)) else: self.register_parameter('normalize_scalar', None) self.register_parameter('normalize_bias', None) self.reset_parameters() def reset_parameters(self): """ Sets initial random values for trainable parameters. """ stdv = 1. / math.sqrt(self.num_units) self.linear_att.data.uniform_(-stdv, stdv) if self.normalize: self.normalize_scalar.data.fill_(stdv) self.normalize_bias.data.zero_() def set_mask(self, context_len, context): """ sets self.mask which is applied before softmax ones for inactive context fields, zeros for active context fields :param context_len: b :param context: if batch_first: (b x t_k x n) else: (t_k x b x n) self.mask: (b x t_k) """ if self.batch_first: max_len = context.size(1) else: max_len = context.size(0) indices = torch.arange(0, max_len, dtype=torch.int64, device=context.device) self.mask = indices >= (context_len.unsqueeze(1)) def calc_score(self, att_query, att_keys): """ Calculate Bahdanau score :param att_query: b x t_q x n :param att_keys: b x t_k x n returns: b x t_q x t_k scores """ b, t_k, n = att_keys.size() t_q = att_query.size(1) att_query = att_query.unsqueeze(2).expand(b, t_q, t_k, n) att_keys = att_keys.unsqueeze(1).expand(b, t_q, t_k, n) sum_qk = att_query + att_keys if self.normalize: sum_qk = sum_qk + self.normalize_bias tmp = self.linear_att.to(torch.float32) linear_att = tmp / tmp.norm() linear_att = linear_att.to(self.normalize_scalar) linear_att = linear_att * self.normalize_scalar else: linear_att = self.linear_att out = torch.tanh(sum_qk).matmul(linear_att) return out def forward(self, query, keys): """ :param query: if batch_first: (b x t_q x n) else: (t_q x b x n) :param keys: if batch_first: (b x t_k x n) else (t_k x b x n) :returns: (context, scores_normalized) context: if batch_first: (b x t_q x n) else (t_q x b x n) scores_normalized: if batch_first (b x t_q x t_k) else (t_q x b x t_k) """ # first dim of keys and query has to be 'batch', it's needed for bmm if not self.batch_first: keys = keys.transpose(0, 1) if query.dim() == 3: query = query.transpose(0, 1) if query.dim() == 2: single_query = True query = query.unsqueeze(1) else: single_query = False b = query.size(0) t_k = keys.size(1) t_q = query.size(1) # FC layers to transform query and key processed_query = self.linear_q(query) processed_key = self.linear_k(keys) # scores: (b x t_q x t_k) scores = self.calc_score(processed_query, processed_key) if self.mask is not None: mask = self.mask.unsqueeze(1).expand(b, t_q, t_k) # I can't use -INF because of overflow check in pytorch scores.data.masked_fill_(mask, -65504.0) # Normalize the scores, softmax over t_k scores_normalized = F.softmax(scores, dim=-1) # Calculate the weighted average of the attention inputs according to # the scores scores_normalized = self.dropout(scores_normalized) # context: (b x t_q x n) context = torch.bmm(scores_normalized, keys) if single_query: context = context.squeeze(1) scores_normalized = scores_normalized.squeeze(1) elif not self.batch_first: context = context.transpose(0, 1) scores_normalized = scores_normalized.transpose(0, 1) return context, scores_normalized
{ "pile_set_name": "Github" }
config BR2_PACKAGE_TI_UIM bool "ti-uim" help User Mode Initialization Manager for TI wl12xx connectivity chip shared transport drivers. This daemon is needed to get the btwilink driver to work. http://omappedia.org/wiki/Device_Driver_Interface_of_WiLink_Solution
{ "pile_set_name": "Github" }
[![Open Source Love](https://badges.frapsoft.com/os/v1/open-source.svg?v=103)](https://github.com/ellerbrock/open-source-badges/) [<img align="right" width="150" src="../assets/join-slack-team.png">](https://join.slack.com/t/firstcontributors/shared_invite/enQtNjkxNzQwNzA2MTMwLTVhMWJjNjg2ODRlNWZhNjIzYjgwNDIyZWYwZjhjYTQ4OTBjMWM0MmFhZDUxNzBiYzczMGNiYzcxNjkzZDZlMDM) [![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT) [![Open Source Helpers](https://www.codetriage.com/roshanjossey/first-contributions/badges/users.svg)](https://www.codetriage.com/roshanjossey/first-contributions) # Първи стъпки Трудно е. Когато правиш нещо за пръв път винаги е трудно. Още повече, когато се работи съвместно с други хора. Затова решихме да опростим процеса на учене за новобранците в допринасянето към отворен софтуеър. Четенето на статии и гледането на видео уроци помага, но има ли по-добър учител от практиката в защитена среда? Целта на този проект е да напътства и опрости първите стъпки на новобранците в участието и приноса към отворен софтуеър. Ако искате да направите първата си контрибуция и да станете част от обществото на отворен софтуеър, следвайте стъпките по-долу. #### *Ако не се чувствате комфортно използвайки command line (терминал), [ето и уроци за използването на програми с графичен интерфейс]( #tutorials-using-other-tools )* #### *Можете да прочетете тази статия и на [други езици](translations/Translations.md).* [🇮🇳](translations/README.hi.md) [🇲🇲](translations/README.mm_unicode.md) [🇮🇩](translations/README.id.md) [🇫🇷](translations/README.fr.md) [🇪🇸](translations/README.es.md) [<img src="../assets/catalan1.png" width="22">](translations/README.ca.md) [🇳🇱](translations/README.nl.md) [🇱🇹](translations/README.lt.md) [🇷🇺](translations/README.ru.md) [:slovakia:](translations/README.slk.md) [🇯🇵](translations/README.ja.md) [🇻🇳](translations/README.vn.md) [🇵🇱](translations/README.pl.md) [🇮🇷](translations/README.fa.md) [🇮🇷](translations/README.fa.en.md) [🇰🇷 🇰🇵](translations/README.ko.md) [🇩🇪](translations/README.de.md) [🇩🇰](translations/README.da.md) [🇨🇳](translations/README.chs.md) [🇹🇼](translations/README.cht.md) [🇬🇷](translations/README.gr.md) [🇪🇬](translations/README.eg.md) [🇸🇦](translations/README.ar.md) [🇺🇦](translations/README.ua.md) [🇧🇷](translations/README.pt_br.md) [🇵🇹](translations/README.pt-pt.md) [🇮🇹](translations/README.it.md) [🇹🇭](translations/README.th.md) [🏴](translations/README.gl.md) [🇵🇰](translations/README.ur.md) [:bangladesh:](translations/README.bn.md) [🇲🇩 🇷🇴](translations/README.ro.md) [🇹🇷](translations/README.tr.md) [🇸🇪](translations/README.se.md) [:slovenia:](translations/README.sl.md) [🇮🇱](translations/README.hb.md) [🇨🇿](translations/README.cs.md) [<img src="../assets/pirate.png" width="22">](translations/README.en-pirate.md) [🇲🇽](translations/README.mx.md) <img align="right" width="300" src="../assets/fork.png" alt="fork this repository" /> Ако нямате git на вашия компютър, [инсталирайте го]( https://help.github.com/articles/set-up-git/). ## Направете Разклонение (Fork) на това хранилище (repository) Направете разклонение на хранилището като натиснете бутона "Fork" в горната част на тази страница. Това ще направи копие на това хранилище във вашия GitHub профил. ## Клонирайте това хранилище <img align="right" width="300" src="../assets/clone.png" alt="clone this repository" /> Сега клонирайте локално на вашия компютър разклоненото хранилище. Отидете във вашия GitHub профил, отворете разклоненото хранилище, кликнете на бутона 'Clone' и копирайте линка или натиснете иконката 'copy to clipboard' (копирай в клипборда). Отворете терминал и въведете следната команда ``` git clone "линка който току-що копирахте" ``` като на мястото на "URL-а който току-що копирахте" (без кавичките) поставете линка към това хранилище (Вашето Разклонение на този проект). Вижте предните стъпки за това как да се сдобиете с линка. <img align="right" width="300" src="../assets/copy-to-clipboard.png" alt="copy URL to clipboard" /> Например: ``` git clone https://github.com/this-is-you/first-contributions.git ``` Като на мястото на `this-is-you` се намира вашето потребителско име. В тази стъпка вие направихте копие на съдържанието на GitHub хванилището на 'first contributions' във вашия компютър. ## Създайте клон (branch) Стигнете до местоположението във вашия компютър, където копирахте хранилището (ако вече не се намирате там): ``` cd first-contributions ``` Сега създайте клон използвайки командата `git checkout`: ``` git checkout -b <add-your-new-branch-name> ``` Например: ``` git checkout -b add-alonzo-church ``` (Името на клона не е задължително да съдържа думата *add*, но е препоръчително, защото целта на този клон е да добавите името си към списък.) ## Направете нужните промени и ги commit-нете Сега отворете файла `Contributors.md` в текстов редактор и добавете името си вн его. Не го добавяйте в началото или края на файла. Поставете го някъде посредата. Сега запазете промените. <img align="right" width="450" src="../assets/git-status.png" alt="git status" /> Ако навигирате през терминала до директорията на проекта и въведете командата `git status`, ще видите че има променени файлове. ДОбавете тези промени към клона, който създадохте, използвайки командата `git add`: ``` git add Contributors.md ``` Сега commit-нете тези промени с командата `git commit`: ``` git commit -m "Add <your-name> to Contributors list" ``` като смените `<your-name>` с вашето име. ## Качете (Push) промените в GitHub Качете вашите промени като въведете командата `git push`: ``` git push origin <add-your-branch-name> ``` сменяйки `<add-your-branch-name>` с името на клона, който създадохте по-рано. ## Предайте вашите промени за рецензия Ако отидете във вашето хранилище в GitHub, ще видите бутона `Compare & pull request`. Натиснете го. <img style="float: right;" src="../assets/compare-and-pull.png" alt="create a pull request" /> Сега предайте вашите промени за рецензия. <img style="float: right;" src="../assets/submit-pull-request.png" alt="submit pull request" /> Скоро аз ще слея (merge) всички ваши промени в главния клон (мастер branch) на този проект. Вие ще бъдете уведомени по електронната поща когато това се случи. ## От тук накъде? Поздравления! Вие току що изпълнихте стандарните _fork -> clone -> edit -> PR_ операции, които ще срещнете като сътрудник! Отпразнувайте вашия принос и го споделете с приятели и последователи като посетите [уеб приложението](https://roshanjossey.github.io/first-contributions/#social-share). Можете да се присъедините към нашия slack team в случай, че имате допълнителни въпроси или нужда от помощ [Присъединете се към slack team](https://join.slack.com/t/firstcontributors/shared_invite/enQtMzE1MTYwNzI3ODQ0LTZiMDA2OGI2NTYyNjM1MTFiNTc4YTRhZTg4OWZjMzA0ZWZmY2UxYzVkMzI1ZmVmOWI4ODdkZWQwNTM2NDVmNjY). Време да започнете да допринасяте и към други приекти. Ние сме съставили списък с проекти съдържащи лесни проблеми, които са лесни за начало. Проверете [списъка от приекти в уеб приложението](https://roshanjossey.github.io/first-contributions/#project-list). ### [Допълнителни материали](../additional-material/git_workflow_scenarios/additional-material.md) ## Уроци за иползването на други приложения |<a href="../github-desktop-tutorial.md"><img alt="GitHub Desktop" src="https://desktop.github.com/images/desktop-icon.svg" width="100"></a>|<a href="../github-windows-vs2017-tutorial.md"><img alt="Visual Studio 2017" src="https://upload.wikimedia.org/wikipedia/commons/c/cd/Visual_Studio_2017_Logo.svg" width="100"></a>|<a href="../gitkraken-tutorial.md"><img alt="GitKraken" src="../assets/gk-icon.png" width="100"></a>|<a href="../github-windows-vs-code-tutorial.md"><img alt="VS Code" src="https://upload.wikimedia.org/wikipedia/commons/2/2d/Visual_Studio_Code_1.18_icon.svg" width=100></a>| |---|---|---|---| |[GitHub Desktop](../github-desktop-tutorial.md)|[Visual Studio 2017](../github-windows-vs2017-tutorial.md)|[GitKraken](../gitkraken-tutorial.md)|[Visual Studio Code](../github-windows-vs-code-tutorial.md)|
{ "pile_set_name": "Github" }
// // Generated by class-dump 3.5 (64 bit). // // class-dump is Copyright (C) 1997-1998, 2000-2001, 2004-2015 by Steve Nygard. // #import <StoreServices/SSRequest.h> #import <StoreServices/SSXPCCoding-Protocol.h> @class NSString; @interface SSVClaimApplicationsRequest : SSRequest <SSXPCCoding> { long long _claimStyle; } @property(readonly, nonatomic) long long claimStyle; // @synthesize claimStyle=_claimStyle; - (id)copyXPCEncoding; - (id)initWithXPCEncoding:(id)arg1; - (void)startWithResponseBlock:(CDUnknownBlockType)arg1; - (id)initWithClaimStyle:(long long)arg1; // Remaining properties @property(readonly, copy) NSString *debugDescription; @property(readonly, copy) NSString *description; @property(readonly) unsigned long long hash; @property(readonly) Class superclass; @end
{ "pile_set_name": "Github" }
/* Copyright (C) 1995-1998 Eric Young ([email protected]) * All rights reserved. * * This package is an SSL implementation written * by Eric Young ([email protected]). * The implementation was written so as to conform with Netscapes SSL. * * This library is free for commercial and non-commercial use as long as * the following conditions are aheared to. The following conditions * apply to all code found in this distribution, be it the RC4, RSA, * lhash, DES, etc., code; not just the SSL code. The SSL documentation * included with this distribution is covered by the same copyright terms * except that the holder is Tim Hudson ([email protected]). * * Copyright remains Eric Young's, and as such any Copyright notices in * the code are not to be removed. * If this package is used in a product, Eric Young should be given attribution * as the author of the parts of the library used. * This can be in the form of a textual message at program startup or * in documentation (online or textual) provided with the package. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. All advertising materials mentioning features or use of this software * must display the following acknowledgement: * "This product includes cryptographic software written by * Eric Young ([email protected])" * The word 'cryptographic' can be left out if the rouines from the library * being used are not cryptographic related :-). * 4. If you include any Windows specific code (or a derivative thereof) from * the apps directory (application code) you must include an acknowledgement: * "This product includes software written by Tim Hudson ([email protected])" * * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * The licence and distribution terms for any publically available version or * derivative of this code cannot be changed. i.e. this code cannot simply be * copied and put under another distribution licence * [including the GNU Public Licence.] */ #ifndef OPENSSL_HEADER_SHA_H #define OPENSSL_HEADER_SHA_H #include <openssl/base.h> #if defined(__cplusplus) extern "C" { #endif /* The SHA family of hash functions (SHA-1 and SHA-2). */ /* SHA_CBLOCK is the block size of SHA-1. */ #define SHA_CBLOCK 64 /* SHA_DIGEST_LENGTH is the length of a SHA-1 digest. */ #define SHA_DIGEST_LENGTH 20 /* TODO(fork): remove */ #define SHA_LBLOCK 16 #define SHA_LONG uint32_t /* SHA1_Init initialises |sha| and returns one. */ OPENSSL_EXPORT int SHA1_Init(SHA_CTX *sha); /* SHA1_Update adds |len| bytes from |data| to |sha| and returns one. */ OPENSSL_EXPORT int SHA1_Update(SHA_CTX *sha, const void *data, size_t len); /* SHA1_Final adds the final padding to |sha| and writes the resulting digest * to |md|, which must have at least |SHA_DIGEST_LENGTH| bytes of space. It * returns one. */ OPENSSL_EXPORT int SHA1_Final(uint8_t *md, SHA_CTX *sha); /* SHA1 writes the digest of |len| bytes from |data| to |out| and returns * |out|. There must be at least |SHA_DIGEST_LENGTH| bytes of space in * |out|. */ OPENSSL_EXPORT uint8_t *SHA1(const uint8_t *data, size_t len, uint8_t *out); /* SHA1_Transform is a low-level function that performs a single, SHA-1 block * transformation using the state from |sha| and 64 bytes from |block|. */ OPENSSL_EXPORT void SHA1_Transform(SHA_CTX *sha, const uint8_t *block); struct sha_state_st { #if defined(OPENSSL_WINDOWS) uint32_t h[5]; #else /* wpa_supplicant accesses |h0|..|h4| so we must support those names * for compatibility with it until it can be updated. */ union { uint32_t h[5]; struct { uint32_t h0; uint32_t h1; uint32_t h2; uint32_t h3; uint32_t h4; }; }; #endif uint32_t Nl, Nh; uint8_t data[SHA_CBLOCK]; unsigned num; }; /* SHA-224. */ /* SHA224_CBLOCK is the block size of SHA-224. */ #define SHA224_CBLOCK 64 /* SHA224_DIGEST_LENGTH is the length of a SHA-224 digest. */ #define SHA224_DIGEST_LENGTH 28 /* SHA224_Init initialises |sha| and returns 1. */ OPENSSL_EXPORT int SHA224_Init(SHA256_CTX *sha); /* SHA224_Update adds |len| bytes from |data| to |sha| and returns 1. */ OPENSSL_EXPORT int SHA224_Update(SHA256_CTX *sha, const void *data, size_t len); /* SHA224_Final adds the final padding to |sha| and writes the resulting digest * to |md|, which must have at least |SHA224_DIGEST_LENGTH| bytes of space. It * returns one on success and zero on programmer error. */ OPENSSL_EXPORT int SHA224_Final(uint8_t *md, SHA256_CTX *sha); /* SHA224 writes the digest of |len| bytes from |data| to |out| and returns * |out|. There must be at least |SHA224_DIGEST_LENGTH| bytes of space in * |out|. */ OPENSSL_EXPORT uint8_t *SHA224(const uint8_t *data, size_t len, uint8_t *out); /* SHA-256. */ /* SHA256_CBLOCK is the block size of SHA-256. */ #define SHA256_CBLOCK 64 /* SHA256_DIGEST_LENGTH is the length of a SHA-256 digest. */ #define SHA256_DIGEST_LENGTH 32 /* SHA256_Init initialises |sha| and returns 1. */ OPENSSL_EXPORT int SHA256_Init(SHA256_CTX *sha); /* SHA256_Update adds |len| bytes from |data| to |sha| and returns 1. */ OPENSSL_EXPORT int SHA256_Update(SHA256_CTX *sha, const void *data, size_t len); /* SHA256_Final adds the final padding to |sha| and writes the resulting digest * to |md|, which must have at least |SHA256_DIGEST_LENGTH| bytes of space. It * returns one on success and zero on programmer error. */ OPENSSL_EXPORT int SHA256_Final(uint8_t *md, SHA256_CTX *sha); /* SHA256 writes the digest of |len| bytes from |data| to |out| and returns * |out|. There must be at least |SHA256_DIGEST_LENGTH| bytes of space in * |out|. */ OPENSSL_EXPORT uint8_t *SHA256(const uint8_t *data, size_t len, uint8_t *out); /* SHA256_Transform is a low-level function that performs a single, SHA-1 block * transformation using the state from |sha| and 64 bytes from |block|. */ OPENSSL_EXPORT void SHA256_Transform(SHA256_CTX *sha, const uint8_t *data); struct sha256_state_st { uint32_t h[8]; uint32_t Nl, Nh; uint8_t data[SHA256_CBLOCK]; unsigned num, md_len; }; /* SHA-384. */ /* SHA384_CBLOCK is the block size of SHA-384. */ #define SHA384_CBLOCK 128 /* SHA384_DIGEST_LENGTH is the length of a SHA-384 digest. */ #define SHA384_DIGEST_LENGTH 48 /* SHA384_Init initialises |sha| and returns 1. */ OPENSSL_EXPORT int SHA384_Init(SHA512_CTX *sha); /* SHA384_Update adds |len| bytes from |data| to |sha| and returns 1. */ OPENSSL_EXPORT int SHA384_Update(SHA512_CTX *sha, const void *data, size_t len); /* SHA384_Final adds the final padding to |sha| and writes the resulting digest * to |md|, which must have at least |SHA384_DIGEST_LENGTH| bytes of space. It * returns one on success and zero on programmer error. */ OPENSSL_EXPORT int SHA384_Final(uint8_t *md, SHA512_CTX *sha); /* SHA384 writes the digest of |len| bytes from |data| to |out| and returns * |out|. There must be at least |SHA384_DIGEST_LENGTH| bytes of space in * |out|. */ OPENSSL_EXPORT uint8_t *SHA384(const uint8_t *data, size_t len, uint8_t *out); /* SHA384_Transform is a low-level function that performs a single, SHA-1 block * transformation using the state from |sha| and 64 bytes from |block|. */ OPENSSL_EXPORT void SHA384_Transform(SHA512_CTX *sha, const uint8_t *data); /* SHA-512. */ /* SHA512_CBLOCK is the block size of SHA-512. */ #define SHA512_CBLOCK 128 /* SHA512_DIGEST_LENGTH is the length of a SHA-512 digest. */ #define SHA512_DIGEST_LENGTH 64 /* SHA512_Init initialises |sha| and returns 1. */ OPENSSL_EXPORT int SHA512_Init(SHA512_CTX *sha); /* SHA512_Update adds |len| bytes from |data| to |sha| and returns 1. */ OPENSSL_EXPORT int SHA512_Update(SHA512_CTX *sha, const void *data, size_t len); /* SHA512_Final adds the final padding to |sha| and writes the resulting digest * to |md|, which must have at least |SHA512_DIGEST_LENGTH| bytes of space. It * returns one on success and zero on programmer error. */ OPENSSL_EXPORT int SHA512_Final(uint8_t *md, SHA512_CTX *sha); /* SHA512 writes the digest of |len| bytes from |data| to |out| and returns * |out|. There must be at least |SHA512_DIGEST_LENGTH| bytes of space in * |out|. */ OPENSSL_EXPORT uint8_t *SHA512(const uint8_t *data, size_t len, uint8_t *out); /* SHA512_Transform is a low-level function that performs a single, SHA-1 block * transformation using the state from |sha| and 64 bytes from |block|. */ OPENSSL_EXPORT void SHA512_Transform(SHA512_CTX *sha, const uint8_t *data); struct sha512_state_st { uint64_t h[8]; uint64_t Nl, Nh; union { uint64_t d[16]; uint8_t p[128]; } u; unsigned num, md_len; }; #if defined(__cplusplus) } /* extern C */ #endif #endif /* OPENSSL_HEADER_SHA_H */
{ "pile_set_name": "Github" }
/* Copyright 2016 The Kubernetes Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package v1alpha1 import ( api "k8s.io/client-go/1.5/pkg/api" v1alpha1 "k8s.io/client-go/1.5/pkg/apis/rbac/v1alpha1" watch "k8s.io/client-go/1.5/pkg/watch" ) // RolesGetter has a method to return a RoleInterface. // A group's client should implement this interface. type RolesGetter interface { Roles(namespace string) RoleInterface } // RoleInterface has methods to work with Role resources. type RoleInterface interface { Create(*v1alpha1.Role) (*v1alpha1.Role, error) Update(*v1alpha1.Role) (*v1alpha1.Role, error) Delete(name string, options *api.DeleteOptions) error DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error Get(name string) (*v1alpha1.Role, error) List(opts api.ListOptions) (*v1alpha1.RoleList, error) Watch(opts api.ListOptions) (watch.Interface, error) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) RoleExpansion } // roles implements RoleInterface type roles struct { client *RbacClient ns string } // newRoles returns a Roles func newRoles(c *RbacClient, namespace string) *roles { return &roles{ client: c, ns: namespace, } } // Create takes the representation of a role and creates it. Returns the server's representation of the role, and an error, if there is any. func (c *roles) Create(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Post(). Namespace(c.ns). Resource("roles"). Body(role). Do(). Into(result) return } // Update takes the representation of a role and updates it. Returns the server's representation of the role, and an error, if there is any. func (c *roles) Update(role *v1alpha1.Role) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Put(). Namespace(c.ns). Resource("roles"). Name(role.Name). Body(role). Do(). Into(result) return } // Delete takes name of the role and deletes it. Returns an error if one occurs. func (c *roles) Delete(name string, options *api.DeleteOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("roles"). Name(name). Body(options). Do(). Error() } // DeleteCollection deletes a collection of objects. func (c *roles) DeleteCollection(options *api.DeleteOptions, listOptions api.ListOptions) error { return c.client.Delete(). Namespace(c.ns). Resource("roles"). VersionedParams(&listOptions, api.ParameterCodec). Body(options). Do(). Error() } // Get takes name of the role, and returns the corresponding role object, and an error if there is any. func (c *roles) Get(name string) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). Name(name). Do(). Into(result) return } // List takes label and field selectors, and returns the list of Roles that match those selectors. func (c *roles) List(opts api.ListOptions) (result *v1alpha1.RoleList, err error) { result = &v1alpha1.RoleList{} err = c.client.Get(). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, api.ParameterCodec). Do(). Into(result) return } // Watch returns a watch.Interface that watches the requested roles. func (c *roles) Watch(opts api.ListOptions) (watch.Interface, error) { return c.client.Get(). Prefix("watch"). Namespace(c.ns). Resource("roles"). VersionedParams(&opts, api.ParameterCodec). Watch() } // Patch applies the patch and returns the patched role. func (c *roles) Patch(name string, pt api.PatchType, data []byte, subresources ...string) (result *v1alpha1.Role, err error) { result = &v1alpha1.Role{} err = c.client.Patch(pt). Namespace(c.ns). Resource("roles"). SubResource(subresources...). Name(name). Body(data). Do(). Into(result) return }
{ "pile_set_name": "Github" }
<?php namespace GuzzleHttp\Tests\Psr7; use GuzzleHttp\Psr7\BufferStream; class BufferStreamTest extends \PHPUnit_Framework_TestCase { public function testHasMetadata() { $b = new BufferStream(10); $this->assertTrue($b->isReadable()); $this->assertTrue($b->isWritable()); $this->assertFalse($b->isSeekable()); $this->assertEquals(null, $b->getMetadata('foo')); $this->assertEquals(10, $b->getMetadata('hwm')); $this->assertEquals([], $b->getMetadata()); } public function testRemovesReadDataFromBuffer() { $b = new BufferStream(); $this->assertEquals(3, $b->write('foo')); $this->assertEquals(3, $b->getSize()); $this->assertFalse($b->eof()); $this->assertEquals('foo', $b->read(10)); $this->assertTrue($b->eof()); $this->assertEquals('', $b->read(10)); } /** * @expectedException \RuntimeException * @expectedExceptionMessage Cannot determine the position of a BufferStream */ public function testCanCastToStringOrGetContents() { $b = new BufferStream(); $b->write('foo'); $b->write('baz'); $this->assertEquals('foo', $b->read(3)); $b->write('bar'); $this->assertEquals('bazbar', (string) $b); $b->tell(); } public function testDetachClearsBuffer() { $b = new BufferStream(); $b->write('foo'); $b->detach(); $this->assertTrue($b->eof()); $this->assertEquals(3, $b->write('abc')); $this->assertEquals('abc', $b->read(10)); } public function testExceedingHighwaterMarkReturnsFalseButStillBuffers() { $b = new BufferStream(5); $this->assertEquals(3, $b->write('hi ')); $this->assertFalse($b->write('hello')); $this->assertEquals('hi hello', (string) $b); $this->assertEquals(4, $b->write('test')); } }
{ "pile_set_name": "Github" }
/* * Copyright (C) 2008 Apple Inc. All Rights Reserved. * Copyright (C) 2009 Google Inc. All Rights Reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY APPLE COMPUTER, INC. ``AS IS'' AND ANY * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE COMPUTER, INC. OR * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY * OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. * */ #include "config.h" #if ENABLE(WORKERS) #include "WorkerMessagingProxy.h" #include "CrossThreadTask.h" #include "DedicatedWorkerContext.h" #include "DedicatedWorkerThread.h" #include "DOMWindow.h" #include "Document.h" #include "ErrorEvent.h" #include "ExceptionCode.h" #include "InspectorInstrumentation.h" #include "MessageEvent.h" #include "NotImplemented.h" #include "ScriptCallStack.h" #include "ScriptExecutionContext.h" #include "Worker.h" #include "WorkerDebuggerAgent.h" #include "WorkerInspectorController.h" #include <wtf/MainThread.h> namespace WebCore { class MessageWorkerContextTask : public ScriptExecutionContext::Task { public: static PassOwnPtr<MessageWorkerContextTask> create(PassRefPtr<SerializedScriptValue> message, PassOwnPtr<MessagePortChannelArray> channels) { return adoptPtr(new MessageWorkerContextTask(message, channels)); } private: MessageWorkerContextTask(PassRefPtr<SerializedScriptValue> message, PassOwnPtr<MessagePortChannelArray> channels) : m_message(message) , m_channels(channels) { } virtual void performTask(ScriptExecutionContext* scriptContext) { ASSERT(scriptContext->isWorkerContext()); DedicatedWorkerContext* context = static_cast<DedicatedWorkerContext*>(scriptContext); OwnPtr<MessagePortArray> ports = MessagePort::entanglePorts(*scriptContext, m_channels.release()); context->dispatchEvent(MessageEvent::create(ports.release(), m_message)); context->thread()->workerObjectProxy().confirmMessageFromWorkerObject(context->hasPendingActivity()); } private: RefPtr<SerializedScriptValue> m_message; OwnPtr<MessagePortChannelArray> m_channels; }; class MessageWorkerTask : public ScriptExecutionContext::Task { public: static PassOwnPtr<MessageWorkerTask> create(PassRefPtr<SerializedScriptValue> message, PassOwnPtr<MessagePortChannelArray> channels, WorkerMessagingProxy* messagingProxy) { return adoptPtr(new MessageWorkerTask(message, channels, messagingProxy)); } private: MessageWorkerTask(PassRefPtr<SerializedScriptValue> message, PassOwnPtr<MessagePortChannelArray> channels, WorkerMessagingProxy* messagingProxy) : m_message(message) , m_channels(channels) , m_messagingProxy(messagingProxy) { } virtual void performTask(ScriptExecutionContext* scriptContext) { Worker* workerObject = m_messagingProxy->workerObject(); if (!workerObject || m_messagingProxy->askedToTerminate()) return; OwnPtr<MessagePortArray> ports = MessagePort::entanglePorts(*scriptContext, m_channels.release()); workerObject->dispatchEvent(MessageEvent::create(ports.release(), m_message)); } private: RefPtr<SerializedScriptValue> m_message; OwnPtr<MessagePortChannelArray> m_channels; WorkerMessagingProxy* m_messagingProxy; }; class WorkerExceptionTask : public ScriptExecutionContext::Task { public: static PassOwnPtr<WorkerExceptionTask> create(const String& errorMessage, int lineNumber, const String& sourceURL, WorkerMessagingProxy* messagingProxy) { return adoptPtr(new WorkerExceptionTask(errorMessage, lineNumber, sourceURL, messagingProxy)); } private: WorkerExceptionTask(const String& errorMessage, int lineNumber, const String& sourceURL, WorkerMessagingProxy* messagingProxy) : m_errorMessage(errorMessage.crossThreadString()) , m_lineNumber(lineNumber) , m_sourceURL(sourceURL.crossThreadString()) , m_messagingProxy(messagingProxy) { } virtual void performTask(ScriptExecutionContext* context) { Worker* workerObject = m_messagingProxy->workerObject(); if (!workerObject) return; // We don't bother checking the askedToTerminate() flag here, because exceptions should *always* be reported even if the thread is terminated. // This is intentionally different than the behavior in MessageWorkerTask, because terminated workers no longer deliver messages (section 4.6 of the WebWorker spec), but they do report exceptions. bool errorHandled = !workerObject->dispatchEvent(ErrorEvent::create(m_errorMessage, m_sourceURL, m_lineNumber)); if (!errorHandled) context->reportException(m_errorMessage, m_lineNumber, m_sourceURL, 0); } String m_errorMessage; int m_lineNumber; String m_sourceURL; WorkerMessagingProxy* m_messagingProxy; }; class WorkerContextDestroyedTask : public ScriptExecutionContext::Task { public: static PassOwnPtr<WorkerContextDestroyedTask> create(WorkerMessagingProxy* messagingProxy) { return adoptPtr(new WorkerContextDestroyedTask(messagingProxy)); } private: WorkerContextDestroyedTask(WorkerMessagingProxy* messagingProxy) : m_messagingProxy(messagingProxy) { } virtual void performTask(ScriptExecutionContext*) { m_messagingProxy->workerContextDestroyedInternal(); } WorkerMessagingProxy* m_messagingProxy; }; class WorkerTerminateTask : public ScriptExecutionContext::Task { public: static PassOwnPtr<WorkerTerminateTask> create(WorkerMessagingProxy* messagingProxy) { return adoptPtr(new WorkerTerminateTask(messagingProxy)); } private: WorkerTerminateTask(WorkerMessagingProxy* messagingProxy) : m_messagingProxy(messagingProxy) { } virtual void performTask(ScriptExecutionContext*) { m_messagingProxy->terminateWorkerContext(); } WorkerMessagingProxy* m_messagingProxy; }; class WorkerThreadActivityReportTask : public ScriptExecutionContext::Task { public: static PassOwnPtr<WorkerThreadActivityReportTask> create(WorkerMessagingProxy* messagingProxy, bool confirmingMessage, bool hasPendingActivity) { return adoptPtr(new WorkerThreadActivityReportTask(messagingProxy, confirmingMessage, hasPendingActivity)); } private: WorkerThreadActivityReportTask(WorkerMessagingProxy* messagingProxy, bool confirmingMessage, bool hasPendingActivity) : m_messagingProxy(messagingProxy) , m_confirmingMessage(confirmingMessage) , m_hasPendingActivity(hasPendingActivity) { } virtual void performTask(ScriptExecutionContext*) { m_messagingProxy->reportPendingActivityInternal(m_confirmingMessage, m_hasPendingActivity); } WorkerMessagingProxy* m_messagingProxy; bool m_confirmingMessage; bool m_hasPendingActivity; }; class PostMessageToPageInspectorTask : public ScriptExecutionContext::Task { public: static PassOwnPtr<PostMessageToPageInspectorTask> create(WorkerMessagingProxy* messagingProxy, const String& message) { return adoptPtr(new PostMessageToPageInspectorTask(messagingProxy, message)); } private: PostMessageToPageInspectorTask(WorkerMessagingProxy* messagingProxy, const String& message) : m_messagingProxy(messagingProxy) , m_message(message.crossThreadString()) { } virtual void performTask(ScriptExecutionContext*) { if (WorkerContextProxy::PageInspector* pageInspector = m_messagingProxy->m_pageInspector) pageInspector->dispatchMessageFromWorker(m_message); } WorkerMessagingProxy* m_messagingProxy; String m_message; }; #if !PLATFORM(CHROMIUM) WorkerContextProxy* WorkerContextProxy::create(Worker* worker) { return new WorkerMessagingProxy(worker); } #endif WorkerMessagingProxy::WorkerMessagingProxy(Worker* workerObject) : m_scriptExecutionContext(workerObject->scriptExecutionContext()) , m_workerObject(workerObject) , m_unconfirmedMessageCount(0) , m_workerThreadHadPendingActivity(false) , m_askedToTerminate(false) #if ENABLE(INSPECTOR) , m_pageInspector(0) #endif { ASSERT(m_workerObject); ASSERT((m_scriptExecutionContext->isDocument() && isMainThread()) || (m_scriptExecutionContext->isWorkerContext() && currentThread() == static_cast<WorkerContext*>(m_scriptExecutionContext.get())->thread()->threadID())); } WorkerMessagingProxy::~WorkerMessagingProxy() { ASSERT(!m_workerObject); ASSERT((m_scriptExecutionContext->isDocument() && isMainThread()) || (m_scriptExecutionContext->isWorkerContext() && currentThread() == static_cast<WorkerContext*>(m_scriptExecutionContext.get())->thread()->threadID())); } void WorkerMessagingProxy::startWorkerContext(const KURL& scriptURL, const String& userAgent, const String& sourceCode) { RefPtr<DedicatedWorkerThread> thread = DedicatedWorkerThread::create(scriptURL, userAgent, sourceCode, *this, *this, DontPauseWorkerContextOnStart); workerThreadCreated(thread); thread->start(); } void WorkerMessagingProxy::postMessageToWorkerObject(PassRefPtr<SerializedScriptValue> message, PassOwnPtr<MessagePortChannelArray> channels) { m_scriptExecutionContext->postTask(MessageWorkerTask::create(message, channels, this)); } void WorkerMessagingProxy::postMessageToWorkerContext(PassRefPtr<SerializedScriptValue> message, PassOwnPtr<MessagePortChannelArray> channels) { if (m_askedToTerminate) return; if (m_workerThread) { ++m_unconfirmedMessageCount; m_workerThread->runLoop().postTask(MessageWorkerContextTask::create(message, channels)); } else m_queuedEarlyTasks.append(MessageWorkerContextTask::create(message, channels)); } void WorkerMessagingProxy::postTaskForModeToWorkerContext(PassOwnPtr<ScriptExecutionContext::Task> task, const String& mode) { if (m_askedToTerminate) return; ASSERT(m_workerThread); m_workerThread->runLoop().postTaskForMode(task, mode); } void WorkerMessagingProxy::postTaskToLoader(PassOwnPtr<ScriptExecutionContext::Task> task) { // FIXME: In case of nested workers, this should go directly to the root Document context. ASSERT(m_scriptExecutionContext->isDocument()); m_scriptExecutionContext->postTask(task); } void WorkerMessagingProxy::postExceptionToWorkerObject(const String& errorMessage, int lineNumber, const String& sourceURL) { m_scriptExecutionContext->postTask(WorkerExceptionTask::create(errorMessage, lineNumber, sourceURL, this)); } static void postConsoleMessageTask(ScriptExecutionContext* context, WorkerMessagingProxy* messagingProxy, MessageSource source, MessageType type, MessageLevel level, const String& message, unsigned lineNumber, const String& sourceURL) { if (messagingProxy->askedToTerminate()) return; context->addMessage(source, type, level, message, lineNumber, sourceURL, 0); } void WorkerMessagingProxy::postConsoleMessageToWorkerObject(MessageSource source, MessageType type, MessageLevel level, const String& message, int lineNumber, const String& sourceURL) { m_scriptExecutionContext->postTask( createCallbackTask(&postConsoleMessageTask, AllowCrossThreadAccess(this), source, type, level, message, lineNumber, sourceURL)); } void WorkerMessagingProxy::workerThreadCreated(PassRefPtr<DedicatedWorkerThread> workerThread) { m_workerThread = workerThread; if (m_askedToTerminate) { // Worker.terminate() could be called from JS before the thread was created. m_workerThread->stop(); } else { unsigned taskCount = m_queuedEarlyTasks.size(); ASSERT(!m_unconfirmedMessageCount); m_unconfirmedMessageCount = taskCount; m_workerThreadHadPendingActivity = true; // Worker initialization means a pending activity. for (unsigned i = 0; i < taskCount; ++i) m_workerThread->runLoop().postTask(m_queuedEarlyTasks[i].release()); m_queuedEarlyTasks.clear(); } } void WorkerMessagingProxy::workerObjectDestroyed() { m_workerObject = 0; if (m_workerThread) terminateWorkerContext(); else workerContextDestroyedInternal(); } #if ENABLE(INSPECTOR) static void connectToWorkerContextInspectorTask(ScriptExecutionContext* context, bool) { ASSERT(context->isWorkerContext()); static_cast<WorkerContext*>(context)->workerInspectorController()->connectFrontend(); } void WorkerMessagingProxy::connectToInspector(WorkerContextProxy::PageInspector* pageInspector) { if (m_askedToTerminate) return; ASSERT(!m_pageInspector); m_pageInspector = pageInspector; m_workerThread->runLoop().postTask(createCallbackTask(connectToWorkerContextInspectorTask, true)); } static void disconnectFromWorkerContextInspectorTask(ScriptExecutionContext* context, bool) { ASSERT(context->isWorkerContext()); static_cast<WorkerContext*>(context)->workerInspectorController()->disconnectFrontend(); } void WorkerMessagingProxy::disconnectFromInspector() { m_pageInspector = 0; if (m_askedToTerminate) return; #if ENABLE(JAVASCRIPT_DEBUGGER) m_workerThread->runLoop().postTaskForMode(createCallbackTask(disconnectFromWorkerContextInspectorTask, true), WorkerDebuggerAgent::debuggerTaskMode); #endif } static void dispatchOnInspectorBackendTask(ScriptExecutionContext* context, const String& message) { ASSERT(context->isWorkerContext()); static_cast<WorkerContext*>(context)->workerInspectorController()->dispatchMessageFromFrontend(message); } void WorkerMessagingProxy::sendMessageToInspector(const String& message) { if (m_askedToTerminate) return; #if ENABLE(JAVASCRIPT_DEBUGGER) m_workerThread->runLoop().postTaskForMode(createCallbackTask(dispatchOnInspectorBackendTask, String(message)), WorkerDebuggerAgent::debuggerTaskMode); #endif } #endif void WorkerMessagingProxy::workerContextDestroyed() { m_scriptExecutionContext->postTask(WorkerContextDestroyedTask::create(this)); // Will execute workerContextDestroyedInternal() on context's thread. } void WorkerMessagingProxy::workerContextClosed() { // Executes terminateWorkerContext() on parent context's thread. m_scriptExecutionContext->postTask(WorkerTerminateTask::create(this)); } void WorkerMessagingProxy::workerContextDestroyedInternal() { // WorkerContextDestroyedTask is always the last to be performed, so the proxy is not needed for communication // in either side any more. However, the Worker object may still exist, and it assumes that the proxy exists, too. m_askedToTerminate = true; m_workerThread = 0; InspectorInstrumentation::workerContextTerminated(m_scriptExecutionContext.get(), this); if (!m_workerObject) delete this; } void WorkerMessagingProxy::terminateWorkerContext() { if (m_askedToTerminate) return; m_askedToTerminate = true; if (m_workerThread) m_workerThread->stop(); InspectorInstrumentation::workerContextTerminated(m_scriptExecutionContext.get(), this); } #if ENABLE(INSPECTOR) void WorkerMessagingProxy::postMessageToPageInspector(const String& message) { m_scriptExecutionContext->postTask(PostMessageToPageInspectorTask::create(this, message)); } void WorkerMessagingProxy::updateInspectorStateCookie(const String&) { notImplemented(); } #endif void WorkerMessagingProxy::confirmMessageFromWorkerObject(bool hasPendingActivity) { m_scriptExecutionContext->postTask(WorkerThreadActivityReportTask::create(this, true, hasPendingActivity)); // Will execute reportPendingActivityInternal() on context's thread. } void WorkerMessagingProxy::reportPendingActivity(bool hasPendingActivity) { m_scriptExecutionContext->postTask(WorkerThreadActivityReportTask::create(this, false, hasPendingActivity)); // Will execute reportPendingActivityInternal() on context's thread. } void WorkerMessagingProxy::reportPendingActivityInternal(bool confirmingMessage, bool hasPendingActivity) { if (confirmingMessage && !m_askedToTerminate) { ASSERT(m_unconfirmedMessageCount); --m_unconfirmedMessageCount; } m_workerThreadHadPendingActivity = hasPendingActivity; } bool WorkerMessagingProxy::hasPendingActivity() const { return (m_unconfirmedMessageCount || m_workerThreadHadPendingActivity) && !m_askedToTerminate; } } // namespace WebCore #endif // ENABLE(WORKERS)
{ "pile_set_name": "Github" }
.. Copyright 2011-2012 OpenStack Foundation All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. .. _auth_plugins: Authentication Plugins ====================== .. NOTE:: This feature is only supported by keystone for the Identity API v3 clients. Keystone supports authentication plugins and they are specified in the ``[auth]`` section of the configuration file. However, an authentication plugin may also have its own section in the configuration file. It is up to the plugin to register its own configuration options. * ``methods`` - comma-delimited list of authentication plugin names * ``<plugin name>`` - specify the class which handles to authentication method, in the same manner as one would specify a backend driver. Keystone provides three authentication methods by default. ``password`` handles password authentication and ``token`` handles token authentication. ``external`` is used in conjunction with authentication performed by a container web server that sets the ``REMOTE_USER`` environment variable. For more details, refer to :doc:`External Authentication <../admin/external-authentication>`. How to Implement an Authentication Plugin ----------------------------------------- All authentication plugins must extend the :class:`keystone.auth.plugins.base.AuthMethodHandler` class and implement the ``authenticate()`` method. The ``authenticate()`` method expects the following parameters. * ``context`` - keystone's request context * ``auth_payload`` - the content of the authentication for a given method * ``auth_context`` - user authentication context, a dictionary shared by all plugins. It contains ``method_names`` and ``bind`` by default. ``method_names`` is a list and ``bind`` is a dictionary. If successful, the ``authenticate()`` method must provide a valid ``user_id`` in ``auth_context`` and return ``None``. ``method_name`` is used to convey any additional authentication methods in case authentication is for re-scoping. For example, if the authentication is for re-scoping, a plugin must append the previous method names into ``method_names``. If authentication requires multiple steps, the ``authenticate()`` method must return the payload in the form of a dictionary for the next authentication step. If authentication is unsuccessful, the ``authenticate()`` method must raise a :class:`keystone.exception.Unauthorized` exception. Simply add the new plugin name to the ``methods`` list along with your plugin class configuration in the ``[auth]`` sections of the configuration file to deploy it. If the plugin requires additional configurations, it may register its own section in the configuration file. Plugins are invoked in the order in which they are specified in the ``methods`` attribute of the ``authentication`` request body. If multiple plugins are invoked, all plugins must succeed in order to for the entire authentication to be successful. Furthermore, all the plugins invoked must agree on the ``user_id`` in the ``auth_context``. The ``REMOTE_USER`` environment variable is only set from a containing webserver. However, to ensure that a user must go through other authentication mechanisms, even if this variable is set, remove ``external`` from the list of plugins specified in ``methods``. This effectively disables external authentication. For more details, refer to :doc:`External Authentication <../admin/external-authentication>`.
{ "pile_set_name": "Github" }
## PragmataPro PragmataPro™ is a condensed monospaced font optimized for screen, designed by Fabrizio Schiavi to be the ideal font for coding, math and engineering. PragmataPro Regular version now has more than 10,000 glyphs! The Bold, Italic and Bold Italic versions have more than 7000 chars. Discover more about PragmataPro™ visiting its official [web page](https://www.fsd.it/shop/fonts/pragmatapro/) This repository is very useful to collect issues and suggestions with the goal to improve better as possible this typeface family. Many thanks! Fabrizio Schiavi ![All chars of PragmataPro Regular](useful_files/All_chars.png) ![PragmataPro Handbook](useful_files/Handbook.png) ![PragmataPro Haskell ligatures list](https://www.fsd.it/pragmatapro/PragmataPro_Haskell_liga.png)
{ "pile_set_name": "Github" }
{ "registry_name": "astralsorcery:travel_28", "perk_class": "astralsorcery:modifier_perk", "x": 37.0, "y": 46.0, "name": "perk.astralsorcery.generic.inc.perk_effect", "data": { "modifiers": [ { "type": "astralsorcery:perkeffect", "mode": "ADDED_MULTIPLY", "value": 0.02 } ] }, "connection": [ "astralsorcery:core_m_gem", "astralsorcery:travel_9" ] }
{ "pile_set_name": "Github" }
/* ------------------------------------------------------------------ */ /* Decimal Number Library Demonstration program */ /* ------------------------------------------------------------------ */ /* Copyright (c) IBM Corporation, 2001, 2007. All rights reserved. */ /* ----------------------------------------------------------------+- */ /* right margin -->| */ // example8.c -- using decQuad with the decNumber module // compile: example8.c decContext.c decQuad.c // and: decNumber.c decimal128.c decimal64.c #include "decQuad.h" // decQuad library #include "decimal128.h" // interface to decNumber #include <stdio.h> // for printf int main(int argc, char *argv[]) { decQuad a; // working decQuad decNumber numa, numb; // working decNumbers decContext set; // working context char string[DECQUAD_String]; // number->string buffer if (argc<3) { // not enough words printf("Please supply two numbers for power(2*a, b).\n"); return 1; } decContextDefault(&set, DEC_INIT_DECQUAD); // initialize decQuadFromString(&a, argv[1], &set); // get a decQuadAdd(&a, &a, &a, &set); // double a decQuadToNumber(&a, &numa); // convert to decNumber decNumberFromString(&numb, argv[2], &set); decNumberPower(&numa, &numa, &numb, &set); // numa=numa**numb decQuadFromNumber(&a, &numa, &set); // back via a Quad decQuadToString(&a, string); // .. printf("power(2*%s, %s) => %s\n", argv[1], argv[2], string); return 0; } // main
{ "pile_set_name": "Github" }
// Issue #5886: a complex instance of issue #2687. trait Iterator<A> { fn next(&mut self) -> Option<A>; } trait IteratorUtil<A>: Sized { fn zip<B, U: Iterator<U>>(self, other: U) -> ZipIterator<Self, U>; } impl<A, T: Iterator<A>> IteratorUtil<A> for T { fn zip<B, U: Iterator<B>>(self, other: U) -> ZipIterator<T, U> { //~^ ERROR E0276 ZipIterator{a: self, b: other} } } struct ZipIterator<T, U> { a: T, b: U } fn main() {}
{ "pile_set_name": "Github" }
#pragma once #include <string> std::string base64_encode(unsigned char const* , unsigned int len); std::string base64_decode(std::string const& s); std::string url_encode(const std::string &s); std::string url_decode(const std::string &s);
{ "pile_set_name": "Github" }
# # Copyright (C) 2011 OpenWrt.org # # This is free software, licensed under the GNU General Public License v2. # See /LICENSE for more information. # define Profile/ESR-9753 NAME:=EnGenius ESR-9753 PACKAGES:= endef define Profile/ESR-9753/Description EnGenius ESR-9753 profile. endef $(eval $(call Profile,ESR-9753))
{ "pile_set_name": "Github" }
/* * Ref manager */ import { isFunction, isObject } from '../types'; import { INSTANCE } from '../constant'; import { warning, throwMinifiedWarn } from '../error'; export function updateRef(prevElement, nextElement, component) { let prevRef = prevElement ? prevElement.ref : null; let nextRef = nextElement ? nextElement.ref : null; // Update refs in owner component if (prevRef !== nextRef) { // Detach prev RenderedElement's ref prevRef && detachRef(prevElement._owner, prevRef, component); // Attach next RenderedElement's ref nextRef && attachRef(nextElement._owner, nextRef, component); } } export function attachRef(ownerComponent, ref, component) { if (!ownerComponent) { if (process.env.NODE_ENV !== 'production') { warning('Ref can not attach because multiple copies of Rax are used.'); } else { throwMinifiedWarn(3); } return; } let instance = component.__getPublicInstance(); if (process.env.NODE_ENV !== 'production') { if (instance == null) { warning('Do not attach ref to function component because they don’t have instances.'); } } if (isFunction(ref)) { ref(instance); } else if (isObject(ref)) { ref.current = instance; } else { ownerComponent[INSTANCE].refs[ref] = instance; } } export function detachRef(ownerComponent, ref, component) { if (isFunction(ref)) { // When the referenced component is unmounted and whenever the ref changes, the old ref will be called with null as an argument. ref(null); } else { // Must match component and ref could detach the ref on owner when A's before ref is B's current ref let instance = component.__getPublicInstance(); if (isObject(ref) && ref.current === instance) { ref.current = null; } else if (ownerComponent[INSTANCE].refs[ref] === instance) { delete ownerComponent[INSTANCE].refs[ref]; } } }
{ "pile_set_name": "Github" }
/* * Copyright (c) 1995, 2015, Oracle and/or its affiliates. All rights reserved. * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER. * * This code is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License version 2 only, as * published by the Free Software Foundation. Oracle designates this * particular file as subject to the "Classpath" exception as provided * by Oracle in the LICENSE file that accompanied this code. * * This code is distributed in the hope that it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License * version 2 for more details (a copy is included in the LICENSE file that * accompanied this code). * * You should have received a copy of the GNU General Public License version * 2 along with this work; if not, write to the Free Software Foundation, * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA. * * Please contact Oracle, 500 Oracle Parkway, Redwood Shores, CA 94065 USA * or visit www.oracle.com if you need additional information or have any * questions. */ package java.awt; import javax.accessibility.*; /** * {@code Panel} is the simplest container class. A panel * provides space in which an application can attach any other * component, including other panels. * <p> * The default layout manager for a panel is the * {@code FlowLayout} layout manager. * * @author Sami Shaio * @see java.awt.FlowLayout * @since 1.0 */ public class Panel extends Container implements Accessible { private static final String base = "panel"; private static int nameCounter = 0; /* * JDK 1.1 serialVersionUID */ private static final long serialVersionUID = -2728009084054400034L; /** * Creates a new panel using the default layout manager. * The default layout manager for all panels is the * {@code FlowLayout} class. */ public Panel() { this(new FlowLayout()); } /** * Creates a new panel with the specified layout manager. * @param layout the layout manager for this panel. * @since 1.1 */ public Panel(LayoutManager layout) { setLayout(layout); } /** * Construct a name for this component. Called by getName() when the * name is null. */ String constructComponentName() { synchronized (Panel.class) { return base + nameCounter++; } } /** * Creates the Panel's peer. The peer allows you to modify the * appearance of the panel without changing its functionality. */ public void addNotify() { synchronized (getTreeLock()) { if (peer == null) peer = getComponentFactory().createPanel(this); super.addNotify(); } } ///////////////// // Accessibility support //////////////// /** * Gets the AccessibleContext associated with this Panel. * For panels, the AccessibleContext takes the form of an * AccessibleAWTPanel. * A new AccessibleAWTPanel instance is created if necessary. * * @return an AccessibleAWTPanel that serves as the * AccessibleContext of this Panel * @since 1.3 */ public AccessibleContext getAccessibleContext() { if (accessibleContext == null) { accessibleContext = new AccessibleAWTPanel(); } return accessibleContext; } /** * This class implements accessibility support for the * {@code Panel} class. It provides an implementation of the * Java Accessibility API appropriate to panel user-interface elements. * @since 1.3 */ protected class AccessibleAWTPanel extends AccessibleAWTContainer { private static final long serialVersionUID = -6409552226660031050L; /** * Constructs an {@code AccessibleAWTPanel} */ protected AccessibleAWTPanel() {} /** * Get the role of this object. * * @return an instance of AccessibleRole describing the role of the * object */ public AccessibleRole getAccessibleRole() { return AccessibleRole.PANEL; } } }
{ "pile_set_name": "Github" }
{ "name": "pair Networks, Inc.", "displayName": "pair Networks", "properties": [ "ghanaweb.com", "pair.com", "pairserver.com" ], "prevalence": { "tracking": 0, "nonTracking": 0.000101, "total": 0.000101 } }
{ "pile_set_name": "Github" }
; RUN: clspv-opt %s -o %t -ReplaceOpenCLBuiltin ; RUN: FileCheck %s < %t target datalayout = "e-p:32:32-i64:64-v16:16-v24:32-v32:32-v48:64-v96:128-v192:256-v256:256-v512:512-v1024:1024" target triple = "spir-unknown-unknown" ; CHECK-NOT: call spir_func define void @foo(<4 x float> addrspace(1)* %a, <4 x float> addrspace(1)* %b) { entry: %ld_b = load <4 x float>, <4 x float> addrspace(1)* %b %cast = bitcast <4 x float> addrspace(1)* %a to half addrspace(1)* ; CHECK: [[shuffle0:%[a-zA-Z0-9_.]+]] = shufflevector <4 x float> %ld_b, <4 x float> undef, <2 x i32> <i32 0, i32 1> ; CHECK: [[shuffle1:%[a-zA-Z0-9_.]+]] = shufflevector <4 x float> %ld_b, <4 x float> undef, <2 x i32> <i32 2, i32 3> ; CHECK: [[pack0:%[a-zA-Z0-9_.]+]] = call i32 @_Z16spirv.pack.v2f16(<2 x float> [[shuffle0]]) ; CHECK: [[pack1:%[a-zA-Z0-9_.]+]] = call i32 @_Z16spirv.pack.v2f16(<2 x float> [[shuffle1]]) ; CHECK: [[in0:%[a-zA-Z0-9_.]+]] = insertelement <2 x i32> undef, i32 [[pack0]], i32 0 ; CHECK: [[in1:%[a-zA-Z0-9_.]+]] = insertelement <2 x i32> [[in0]], i32 [[pack1]], i32 1 ; CHECK: [[cast:%[a-zA-Z0-9_.]+]] = bitcast half addrspace(1)* %cast to <2 x i32> addrspace(1)* ; CHECK: [[gep:%[a-zA-Z0-9_.]+]] = getelementptr <2 x i32>, <2 x i32> addrspace(1)* [[cast]], i32 0 ; CHECK: store <2 x i32> [[in1]], <2 x i32> addrspace(1)* [[gep]] call spir_func void @_Z12vstore_half4Dv4_fjPU3AS1Dh(<4 x float> %ld_b, i32 0, half addrspace(1)* %cast) ret void } declare spir_func void @_Z12vstore_half4Dv4_fjPU3AS1Dh(<4 x float>, i32, half addrspace(1)*)
{ "pile_set_name": "Github" }
--- - code: DC type: state - code: DE type: state - code: DN type: state - code: DS type: state - code: DW type: state - code: GD type: state - code: GZ type: state - code: KA type: state - code: KH type: state - code: KN type: state - code: KS type: state - code: NB type: state - code: 'NO' type: state - code: NR type: state - code: NW type: state - code: RS type: state - code: SI type: state
{ "pile_set_name": "Github" }
// (C) Copyright John Maddock 2006. // Use, modification and distribution are subject to the // Boost Software License, Version 1.0. (See accompanying file // LICENSE_1_0.txt or copy at http://www.boost.org/LICENSE_1_0.txt) // See http://www.boost.org for most recent version. // GCC-XML C++ compiler setup: # if !defined(__GCCXML_GNUC__) || ((__GCCXML_GNUC__ <= 3) && (__GCCXML_GNUC_MINOR__ <= 3)) # define BOOST_NO_IS_ABSTRACT # endif // // Threading support: Turn this on unconditionally here (except for // those platforms where we can know for sure). It will get turned off again // later if no threading API is detected. // #if !defined(__MINGW32__) && !defined(_MSC_VER) && !defined(linux) && !defined(__linux) && !defined(__linux__) # define BOOST_HAS_THREADS #endif // // gcc has "long long" // #define BOOST_HAS_LONG_LONG // C++0x features: // # define BOOST_NO_CXX11_CONSTEXPR # define BOOST_NO_CXX11_NULLPTR # define BOOST_NO_CXX11_TEMPLATE_ALIASES # define BOOST_NO_CXX11_DECLTYPE # define BOOST_NO_CXX11_DECLTYPE_N3276 # define BOOST_NO_CXX11_FUNCTION_TEMPLATE_DEFAULT_ARGS # define BOOST_NO_CXX11_RVALUE_REFERENCES # define BOOST_NO_CXX11_STATIC_ASSERT # define BOOST_NO_CXX11_VARIADIC_TEMPLATES # define BOOST_NO_CXX11_VARIADIC_MACROS # define BOOST_NO_CXX11_AUTO_DECLARATIONS # define BOOST_NO_CXX11_AUTO_MULTIDECLARATIONS # define BOOST_NO_CXX11_CHAR16_T # define BOOST_NO_CXX11_CHAR32_T # define BOOST_NO_CXX11_DEFAULTED_FUNCTIONS # define BOOST_NO_CXX11_DELETED_FUNCTIONS # define BOOST_NO_CXX11_HDR_INITIALIZER_LIST # define BOOST_NO_CXX11_SCOPED_ENUMS # define BOOST_NO_SFINAE_EXPR # define BOOST_NO_CXX11_SFINAE_EXPR # define BOOST_NO_CXX11_EXPLICIT_CONVERSION_OPERATORS # define BOOST_NO_CXX11_LAMBDAS # define BOOST_NO_CXX11_LOCAL_CLASS_TEMPLATE_PARAMETERS # define BOOST_NO_CXX11_RANGE_BASED_FOR # define BOOST_NO_CXX11_RAW_LITERALS # define BOOST_NO_CXX11_UNICODE_LITERALS # define BOOST_NO_CXX11_NOEXCEPT # define BOOST_NO_CXX11_UNIFIED_INITIALIZATION_SYNTAX # define BOOST_NO_CXX11_USER_DEFINED_LITERALS # define BOOST_NO_CXX11_ALIGNAS # define BOOST_NO_CXX11_TRAILING_RESULT_TYPES # define BOOST_NO_CXX11_INLINE_NAMESPACES # define BOOST_NO_CXX11_REF_QUALIFIERS # define BOOST_NO_CXX11_FINAL # define BOOST_NO_CXX11_THREAD_LOCAL // C++ 14: #if !defined(__cpp_aggregate_nsdmi) || (__cpp_aggregate_nsdmi < 201304) # define BOOST_NO_CXX14_AGGREGATE_NSDMI #endif #if !defined(__cpp_binary_literals) || (__cpp_binary_literals < 201304) # define BOOST_NO_CXX14_BINARY_LITERALS #endif #if !defined(__cpp_constexpr) || (__cpp_constexpr < 201304) # define BOOST_NO_CXX14_CONSTEXPR #endif #if !defined(__cpp_decltype_auto) || (__cpp_decltype_auto < 201304) # define BOOST_NO_CXX14_DECLTYPE_AUTO #endif #if (__cplusplus < 201304) // There's no SD6 check for this.... # define BOOST_NO_CXX14_DIGIT_SEPARATORS #endif #if !defined(__cpp_generic_lambdas) || (__cpp_generic_lambdas < 201304) # define BOOST_NO_CXX14_GENERIC_LAMBDAS #endif #if !defined(__cpp_init_captures) || (__cpp_init_captures < 201304) # define BOOST_NO_CXX14_INITIALIZED_LAMBDA_CAPTURES #endif #if !defined(__cpp_return_type_deduction) || (__cpp_return_type_deduction < 201304) # define BOOST_NO_CXX14_RETURN_TYPE_DEDUCTION #endif #if !defined(__cpp_variable_templates) || (__cpp_variable_templates < 201304) # define BOOST_NO_CXX14_VARIABLE_TEMPLATES #endif // C++17 #if !defined(__cpp_structured_bindings) || (__cpp_structured_bindings < 201606) # define BOOST_NO_CXX17_STRUCTURED_BINDINGS #endif #if !defined(__cpp_inline_variables) || (__cpp_inline_variables < 201606) # define BOOST_NO_CXX17_INLINE_VARIABLES #endif #if !defined(__cpp_fold_expressions) || (__cpp_fold_expressions < 201603) # define BOOST_NO_CXX17_FOLD_EXPRESSIONS #endif #if !defined(__cpp_if_constexpr) || (__cpp_if_constexpr < 201606) # define BOOST_NO_CXX17_IF_CONSTEXPR #endif #define BOOST_COMPILER "GCC-XML C++ version " __GCCXML__
{ "pile_set_name": "Github" }
// Copyright 2009 The Go Authors. All rights reserved. // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. // +build 386,freebsd package unix import ( "syscall" "unsafe" ) func setTimespec(sec, nsec int64) Timespec { return Timespec{Sec: int32(sec), Nsec: int32(nsec)} } func setTimeval(sec, usec int64) Timeval { return Timeval{Sec: int32(sec), Usec: int32(usec)} } func SetKevent(k *Kevent_t, fd, mode, flags int) { k.Ident = uint32(fd) k.Filter = int16(mode) k.Flags = uint16(flags) } func (iov *Iovec) SetLen(length int) { iov.Len = uint32(length) } func (msghdr *Msghdr) SetControllen(length int) { msghdr.Controllen = uint32(length) } func (msghdr *Msghdr) SetIovlen(length int) { msghdr.Iovlen = int32(length) } func (cmsg *Cmsghdr) SetLen(length int) { cmsg.Len = uint32(length) } func sendfile(outfd int, infd int, offset *int64, count int) (written int, err error) { var writtenOut uint64 = 0 _, _, e1 := Syscall9(SYS_SENDFILE, uintptr(infd), uintptr(outfd), uintptr(*offset), uintptr((*offset)>>32), uintptr(count), 0, uintptr(unsafe.Pointer(&writtenOut)), 0, 0) written = int(writtenOut) if e1 != 0 { err = e1 } return } func Syscall9(num, a1, a2, a3, a4, a5, a6, a7, a8, a9 uintptr) (r1, r2 uintptr, err syscall.Errno) func PtraceGetFsBase(pid int, fsbase *int64) (err error) { return ptrace(PTRACE_GETFSBASE, pid, uintptr(unsafe.Pointer(fsbase)), 0) } func PtraceIO(req int, pid int, addr uintptr, out []byte, countin int) (count int, err error) { ioDesc := PtraceIoDesc{Op: int32(req), Offs: (*byte)(unsafe.Pointer(addr)), Addr: (*byte)(unsafe.Pointer(&out[0])), Len: uint32(countin)} err = ptrace(PTRACE_IO, pid, uintptr(unsafe.Pointer(&ioDesc)), 0) return int(ioDesc.Len), err }
{ "pile_set_name": "Github" }
window._ = require('lodash'); /** * We'll load jQuery and the Bootstrap jQuery plugin which provides support * for JavaScript based Bootstrap features such as modals and tabs. This * code may be modified to fit the specific needs of your application. */ try { window.$ = window.jQuery = require('jquery'); require('bootstrap-sass'); } catch (e) {}
{ "pile_set_name": "Github" }
using MongoDB.Bson; namespace MongoDB.Protocol { /// <summary> /// The OP_GETMORE message is used to query the database for documents in a collection. /// </summary> /// <remarks> /// struct { /// MsgHeader header; // standard message header /// int32 ZERO; // 0 - reserved for future use /// cstring fullCollectionName; // "dbname.collectionname" /// int32 numberToReturn; // number of documents to return /// int64 cursorID; // cursorID from the OP_REPLY /// } /// </remarks> internal class GetMoreMessage : RequestMessageBase { /// <summary> /// Initializes a new instance of the <see cref="GetMoreMessage"/> class. /// </summary> /// <param name="fullCollectionName">Full name of the collection.</param> /// <param name="cursorId">The cursor id.</param> public GetMoreMessage(string fullCollectionName, long cursorId) : this(fullCollectionName, cursorId, 0) { } /// <summary> /// Initializes a new instance of the <see cref="GetMoreMessage"/> class. /// </summary> /// <param name="fullCollectionName">Full name of the collection.</param> /// <param name="cursorId">The cursor id.</param> /// <param name="numberToReturn">The number to return.</param> public GetMoreMessage(string fullCollectionName, long cursorId, int numberToReturn) : base(new BsonWriterSettings()) { Header = new MessageHeader(OpCode.GetMore); FullCollectionName = fullCollectionName; CursorId = cursorId; NumberToReturn = numberToReturn; } /// <summary> /// cursorID from the OP_REPLY. /// </summary> /// <value>The cursor id.</value> public long CursorId { get; set; } /// <summary> /// Gets or sets the full name of the collection. /// </summary> /// <value>The full name of the collection.</value> public string FullCollectionName { get; set; } /// <summary> /// Gets or sets the number to return. /// </summary> /// <value>The number to return.</value> public int NumberToReturn { get; set; } /// <summary> /// Writes the body. /// </summary> /// <param name="writer">The writer.</param> protected override void WriteBody(BsonWriter writer){ writer.WriteValue(BsonType.Integer, 0); writer.Write(FullCollectionName, false); writer.WriteValue(BsonType.Integer, NumberToReturn); writer.WriteValue(BsonType.Long, CursorId); } /// <summary> /// Calculates the size of the body. /// </summary> /// <param name="writer">The writer.</param> /// <returns></returns> protected override int CalculateBodySize(BsonWriter writer){ var size = 4; //first int32 size += writer.CalculateSize(FullCollectionName, false); size += 12; //number to return + cursorid return size; } } }
{ "pile_set_name": "Github" }
gcr.io/google_containers/kube-controller-manager:v1.12.0-rc.2
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <config xmlns="http://www.hasor.net/sechma/rsf-registry"> <!-- Registry 框架环境变量 --> <hasor.environmentVar> <!-- app_key --> <RSF_APP_KEY></RSF_APP_KEY> <!-- app KeySecret --> <RSF_APP_KEY_SECRET></RSF_APP_KEY_SECRET> <!-- 注册中心(例:rsf://127.0.0.1:2180) --> <!-- Center工作在集群模式下的时候,集群的机器列表,注意:集群中每台机器的对应clusterServers配置都应相同,center在启动时会进行检测 --> <RSF_CENTER_SERVERS></RSF_CENTER_SERVERS> <!-- 注册中心工作模式,默认:none --> <RSF_CENTER_WORK_MODE>none</RSF_CENTER_WORK_MODE> <!-- 授权密钥文件名,查找顺序是 WORK_HOME 下第一优先顺序。classpath 第二优先顺序,只加载一个配置文件。 当 "rsfCenter.adapterConfig.authQuery" 配置使用 net.hasor.rsf.center.server.adapter.FileAuthQuery 时有效 --> <RSF_CENTER_AUTH_FILE_NAME>auth_keys.xml</RSF_CENTER_AUTH_FILE_NAME> <!-- 是否开启匿名应用连入的请求: 默认开启 --> <RSF_CENTER_AUTH_ANONYMOUS>true</RSF_CENTER_AUTH_ANONYMOUS> <!-- 内置简易文件数据存储器的存储位置 --> <RSF_CENTER_FILE_STORAGE>%WORK_HOME%/rsfcenter_storage</RSF_CENTER_FILE_STORAGE> </hasor.environmentVar> <hasor> <!-- 框架配置,包扫描范围 --> <loadPackages>net.hasor.registry.*</loadPackages> <!-- 启动入口 --> <modules.module>net.hasor.registry.boot.RegistryBootModule</modules.module> </hasor> <!-- Hasor-RSF ( 分布式服务框架的注册中心 ) workAt : 注册中心工作模式,可选的配置有 none 禁用模式,不启动 client 客户端模式,将本地服务注册到center server 服务器模式,center服务器 cluster 集群模式,center为集群状态,当前节点为集群中的一个节点 --> <hasor.registry workAt="${RSF_CENTER_WORK_MODE}"> <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> <!-- 注册中心,用于更新服务列表的配置服务器 - timeout : RSF 和 Center 之间发起远程调用超时时间(6秒) - heartbeatTime : 服务心跳时间(30秒) Center工作在集群模式下的时候,集群的机器列表 注意:集群中每台机器的对应clusterServers配置都应相同,center在启动时会进行检测 --> <servers timeout="6000" heartbeatTime="30000"> ${RSF_CENTER_SERVERS} </servers> <!-- 安全 --> <security> <!-- app_key --> <appKeyID>${RSF_APP_KEY}</appKeyID> <!-- app KeySecret --> <appKeySecret>${RSF_APP_KEY_SECRET}</appKeySecret> </security> <!-- 授权配置 : allowAnonymous 允许匿名应用 --> <auth allowAnonymous="${RSF_CENTER_AUTH_ANONYMOUS}"/> <!-- ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --> <!-- 推送配置 - threadSize : 负责执行推送任务的线程数。 - queueMaxSize : 推送队列最大长度,当推送处理队列达到这个阀值之后,新的推送任务将不会被接受。受影响Center将会暂定服务。 - sleepTime : 当遇到推送队列满了之后等待多长时间重试一次,如果重试的时候队列依然满的,那么放弃推送。(单位毫秒) --> <polling threadSize="10" queueMaxSize="100000" sleepTime="1000"/> <!-- 服务管理数据过期时间(300秒) --> <serviceManager dataExpireTime="300"/> </hasor.registry> </config>
{ "pile_set_name": "Github" }
export * from './InstanceReader'; export * from './details'; export * from './instance.write.service'; export * from './instanceType.service'; export * from './loadBalancer/InstanceLoadBalancerHealth'; export * from './templates';
{ "pile_set_name": "Github" }
Title: AMD 发布 Catalyst 9.12 for Linux Date: 2009-12-18 11:46 Author: toy Category: Apps Tags: AMD, ATI, Catalyst Slug: amd-releases-catalyst-912-for-linux AMD 已面向 Linux 平台发布了 Catalyst 9.12 显示驱动。该驱动添加了 RedFlag DT 6.0 SP3 和 SLED/SLES 10 SPS 支持,并对 Catalyst 控制中心进行了增强,如新增大小/位置、HDTV、缩放等选项页。另外,AMD Catalyst 9.12 还修正了几个问题。进一步的信息,可参考其 [Release Notes](https://a248.e.akamai.net/f/674/9206/0/www2.ati.com/drivers/linux/catalyst\_912\_linux.pdf)。 ![ATI](http://i.linuxtoy.org/images/2009/12/ati.jpg) AMD Catalyst 9.12 支持 x86 及 x86\\\_64 架构,其[安装文件](http://a248.e.akamai.net/f/674/9206/0/www2.ati.com/drivers/linux/ati-driver-installer-9-12-x86.x86\_64.run)可从此处下载。 { via [Phoronix](http://www.phoronix.com/scan.php?page=news\_item&px=NzgxOA) }
{ "pile_set_name": "Github" }
<?xml version="1.0" encoding="UTF-8"?> <StructureDefinition xmlns="http://hl7.org/fhir"> <id value="iso21090-ADXP-unitType"/> <url value="http://hl7.org/fhir/StructureDefinition/iso21090-ADXP-unitType"/> <name value="ADXP-unitType"/> <status value="draft"/> <date value="2012-06-24"/> <publisher value="Health Level Seven International (Modeling and Methodology)"/> <contact> <telecom> <system value="url"/> <value value="http://www.hl7.org/Special/committees/mnm"/> </telecom> </contact> <description value="Indicates the type of specific unit contained within a building or complex. E.g. Appartment, Floor."/> <fhirVersion value="1.9.0"/> <mapping> <identity value="rim"/> <uri value="http://hl7.org/v3"/> <name value="RIM Mapping"/> </mapping> <kind value="complex-type"/> <abstract value="false"/> <contextType value="datatype"/> <context value="Address.line"/> <type value="Extension"/> <baseDefinition value="http://hl7.org/fhir/StructureDefinition/Extension"/> <derivation value="constraint"/> <snapshot> <element id="Extension:adxp-unittype"> <path value="Extension"/> <short value="unitType"/> <definition value="Indicates the type of specific unit contained within a building or complex. E.g. Appartment, Floor."/> <min value="0"/> <max value="*"/> <base> <path value="Extension"/> <min value="0"/> <max value="*"/> </base> <condition value="ele-1"/> <constraint> <key value="ele-1"/> <severity value="error"/> <human value="All FHIR elements must have a @value or children"/> <expression value="children().count() &gt; id.count()"/> <xpath value="@value|f:*|h:div"/> <source value="Element"/> </constraint> <constraint> <key value="ext-1"/> <severity value="error"/> <human value="Must have either extensions or value[x], not both"/> <expression value="extension.exists() != value.exists()"/> <xpath value="exists(f:extension)!=exists(f:*[starts-with(local-name(.), &#39;value&#39;)])"/> <source value="Extension"/> </constraint> <mapping> <identity value="rim"/> <map value="ADXP[partType=UNIT]"/> </mapping> </element> <element id="Extension:adxp-unittype.id"> <path value="Extension.id"/> <representation value="xmlAttr"/> <short value="xml:id (or equivalent in JSON)"/> <definition value="unique id for the element within a resource (for internal references). This may be any string value that does not contain spaces."/> <min value="0"/> <max value="1"/> <base> <path value="Element.id"/> <min value="0"/> <max value="1"/> </base> <type> <code value="string"/> </type> <mapping> <identity value="rim"/> <map value="n/a"/> </mapping> </element> <element id="Extension:adxp-unittype.extension"> <path value="Extension.extension"/> <short value="Extension"/> <definition value="An Extension"/> <min value="0"/> <max value="0"/> <base> <path value="Element.extension"/> <min value="0"/> <max value="*"/> </base> <type> <code value="Extension"/> </type> </element> <element id="Extension:adxp-unittype.url"> <path value="Extension.url"/> <representation value="xmlAttr"/> <short value="identifies the meaning of the extension"/> <definition value="Source of the definition for the extension code - a logical name or a URL."/> <comment value="The definition may point directly to a computable or human-readable definition of the extensibility codes, or it may be a logical URI as declared in some other specification. The definition SHALL be a URI for the Structure Definition defining the extension."/> <min value="1"/> <max value="1"/> <base> <path value="Extension.url"/> <min value="1"/> <max value="1"/> </base> <type> <code value="uri"/> </type> <fixedUri value="http://hl7.org/fhir/StructureDefinition/iso21090-ADXP-unitType"/> <mapping> <identity value="rim"/> <map value="N/A"/> </mapping> </element> <element id="Extension:adxp-unittype.valueString"> <path value="Extension.valueString"/> <short value="Value of extension"/> <definition value="Value of extension - may be a resource or one of a constrained set of the data types (see Extensibility in the spec for list)."/> <min value="0"/> <max value="1"/> <base> <path value="Extension.value[x]"/> <min value="0"/> <max value="1"/> </base> <type> <code value="string"/> </type> <mapping> <identity value="rim"/> <map value="N/A"/> </mapping> </element> </snapshot> <differential> <element id="Extension:adxp-unittype"> <path value="Extension"/> <short value="unitType"/> <definition value="Indicates the type of specific unit contained within a building or complex. E.g. Appartment, Floor."/> <min value="0"/> <max value="*"/> <mapping> <identity value="rim"/> <map value="ADXP[partType=UNIT]"/> </mapping> </element> <element id="Extension:adxp-unittype.extension"> <path value="Extension.extension"/> <max value="0"/> </element> <element id="Extension:adxp-unittype.url"> <path value="Extension.url"/> <type> <code value="uri"/> </type> <fixedUri value="http://hl7.org/fhir/StructureDefinition/iso21090-ADXP-unitType"/> </element> <element id="Extension:adxp-unittype.valueString"> <path value="Extension.valueString"/> <type> <code value="string"/> </type> </element> </differential> </StructureDefinition>
{ "pile_set_name": "Github" }
# Amazon DynamoDB getting started examples ## Purpose Shows how to use the AWS SDK for Python (Boto3) to create an Amazon DynamoDB table for storing movies, load movies into the table from a JSON-formatted file, and update and query movies in various ways. ## Prerequisites - You must have an AWS account, and have your default credentials and AWS Region configured as described in the [AWS Tools and SDKs Shared Configuration and Credentials Reference Guide](https://docs.aws.amazon.com/credref/latest/refdocs/creds-config-files.html). - Python 3.6 or later - Boto 3 1.11.10 or later - PyTest 5.3.5 or later (to run unit tests) - Download and extract the [example movie data JSON file](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/samples/moviedata.zip). ## Cautions - As an AWS best practice, grant this code least privilege, or only the permissions required to perform a task. For more information, see [Grant Least Privilege](https://docs.aws.amazon.com/IAM/latest/UserGuide/best-practices.html#grant-least-privilege) in the *AWS Identity and Access Management User Guide*. - This code has not been tested in all AWS Regions. Some AWS services are available only in specific Regions. For more information, see the [AWS Region Table](https://aws.amazon.com/about-aws/global-infrastructure/regional-product-services/) on the AWS website. - Running this code might result in charges to your AWS account. ## Running the code The files in this example are designed to be used as part of the [Getting Started Developing with Python and DynamoDB](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/GettingStarted.Python.html) step-by-step tutorial in the *Amazon DynamoDB Developer Guide*. The functions in this example are configured to connect to a locally installed version of DynamoDB as described in [Setting Up DynamoDB Local](https://docs.aws.amazon.com/amazondynamodb/latest/developerguide/DynamoDBLocal.html). To run the code against your AWS account instead, remove the `endpoint_url="http://localhost:8000"` parameter from the resource creation function in each file so that the code looks like the following. ``` dynamodb = boto3.resource('dynamodb') ``` Each file can be run separately at a command prompt. For example, create the Movies table by running the following from a command prompt window. ``` python MoviesCreateTable.py ``` ## Running the tests The unit tests in this module use the botocore Stubber, which captures requests before they are sent to AWS and returns a mocked response. To run all of the tests, run the following in your [GitHub root]/python/example_code/dynamodb/GettingStarted folder. ``` python -m pytest ``` ## Additional information - [Boto3 Amazon DynamoDB examples](https://boto3.amazonaws.com/v1/documentation/api/latest/guide/dynamodb.html) - [Boto3 Amazon DynamoDB service reference](https://boto3.amazonaws.com/v1/documentation/api/latest/reference/services/dynamodb.html) --- Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved. SPDX-License-Identifier: Apache-2.0
{ "pile_set_name": "Github" }
:server: 'localhost' :username: '[email protected]' :password: 'pass' :connection_options: :port: 8993 :ssl: :verify_mode: 0
{ "pile_set_name": "Github" }
// This file is part of Eigen, a lightweight C++ template library // for linear algebra. // // Copyright (C) 2014 Benoit Steiner <[email protected]> // // This Source Code Form is subject to the terms of the Mozilla // Public License v. 2.0. If a copy of the MPL was not distributed // with this file, You can obtain one at http://mozilla.org/MPL/2.0/. #ifndef EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H #define EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H namespace Eigen { /** \internal * * \class TensorIntDiv * \ingroup CXX11_Tensor_Module * * \brief Fast integer division by a constant. * * See the paper from Granlund and Montgomery for explanation. * (at https://doi.org/10.1145/773473.178249) * * \sa Tensor */ namespace internal { namespace { // Note: result is undefined if val == 0 template <typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename internal::enable_if<sizeof(T)==4,int>::type count_leading_zeros(const T val) { #ifdef EIGEN_GPU_COMPILE_PHASE return __clz(val); #elif defined(SYCL_DEVICE_ONLY) return cl::sycl::clz(val); #elif EIGEN_COMP_MSVC unsigned long index; _BitScanReverse(&index, val); return 31 - index; #else EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE); return __builtin_clz(static_cast<uint32_t>(val)); #endif } template <typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE typename internal::enable_if<sizeof(T)==8,int>::type count_leading_zeros(const T val) { #ifdef EIGEN_GPU_COMPILE_PHASE return __clzll(val); #elif defined(SYCL_DEVICE_ONLY) return static_cast<int>(cl::sycl::clz(val)); #elif EIGEN_COMP_MSVC && EIGEN_ARCH_x86_64 unsigned long index; _BitScanReverse64(&index, val); return 63 - index; #elif EIGEN_COMP_MSVC // MSVC's _BitScanReverse64 is not available for 32bits builds. unsigned int lo = (unsigned int)(val&0xffffffff); unsigned int hi = (unsigned int)((val>>32)&0xffffffff); int n; if(hi==0) n = 32 + count_leading_zeros<unsigned int>(lo); else n = count_leading_zeros<unsigned int>(hi); return n; #else EIGEN_STATIC_ASSERT(sizeof(unsigned long long) == 8, YOU_MADE_A_PROGRAMMING_MISTAKE); return __builtin_clzll(static_cast<uint64_t>(val)); #endif } template <typename T> struct UnsignedTraits { typedef typename conditional<sizeof(T) == 8, uint64_t, uint32_t>::type type; }; template <typename T> struct DividerTraits { typedef typename UnsignedTraits<T>::type type; static const int N = sizeof(T) * 8; }; template <typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t muluh(const uint32_t a, const T b) { #if defined(EIGEN_GPU_COMPILE_PHASE) return __umulhi(a, b); #elif defined(SYCL_DEVICE_ONLY) return cl::sycl::mul_hi(a, static_cast<uint32_t>(b)); #else return (static_cast<uint64_t>(a) * b) >> 32; #endif } template <typename T> EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t muluh(const uint64_t a, const T b) { #if defined(EIGEN_GPU_COMPILE_PHASE) return __umul64hi(a, b); #elif defined(SYCL_DEVICE_ONLY) return cl::sycl::mul_hi(a, static_cast<uint64_t>(b)); #elif EIGEN_HAS_BUILTIN_INT128 __uint128_t v = static_cast<__uint128_t>(a) * static_cast<__uint128_t>(b); return static_cast<uint64_t>(v >> 64); #else return (TensorUInt128<static_val<0>, uint64_t>(a) * TensorUInt128<static_val<0>, uint64_t>(b)).upper(); #endif } template <int N, typename T> struct DividerHelper { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint32_t computeMultiplier(const int log_div, const T divider) { EIGEN_STATIC_ASSERT(N == 32, YOU_MADE_A_PROGRAMMING_MISTAKE); return static_cast<uint32_t>((static_cast<uint64_t>(1) << (N+log_div)) / divider - (static_cast<uint64_t>(1) << N) + 1); } }; template <typename T> struct DividerHelper<64, T> { static EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE uint64_t computeMultiplier(const int log_div, const T divider) { #if EIGEN_HAS_BUILTIN_INT128 && !defined(EIGEN_GPU_COMPILE_PHASE) && !defined(SYCL_DEVICE_ONLY) return static_cast<uint64_t>((static_cast<__uint128_t>(1) << (64+log_div)) / static_cast<__uint128_t>(divider) - (static_cast<__uint128_t>(1) << 64) + 1); #else const uint64_t shift = 1ULL << log_div; TensorUInt128<uint64_t, uint64_t> result = TensorUInt128<uint64_t, static_val<0> >(shift, 0) / TensorUInt128<static_val<0>, uint64_t>(divider) - TensorUInt128<static_val<1>, static_val<0> >(1, 0) + TensorUInt128<static_val<0>, static_val<1> >(1); return static_cast<uint64_t>(result); #endif } }; } template <typename T, bool div_gt_one = false> struct TensorIntDivisor { public: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() { multiplier = 0; shift1 = 0; shift2 = 0; } // Must have 0 < divider < 2^31. This is relaxed to // 0 < divider < 2^63 when using 64-bit indices on platforms that support // the __uint128_t type. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor(const T divider) { const int N = DividerTraits<T>::N; eigen_assert(static_cast<typename UnsignedTraits<T>::type>(divider) < NumTraits<UnsignedType>::highest()/2); eigen_assert(divider > 0); // fast ln2 const int leading_zeros = count_leading_zeros(static_cast<UnsignedType>(divider)); int log_div = N - leading_zeros; // if divider is a power of two then log_div is 1 more than it should be. if ((static_cast<typename UnsignedTraits<T>::type>(1) << (log_div-1)) == static_cast<typename UnsignedTraits<T>::type>(divider)) log_div--; multiplier = DividerHelper<N, T>::computeMultiplier(log_div, divider); shift1 = log_div > 1 ? 1 : log_div; shift2 = log_div > 1 ? log_div-1 : 0; } // Must have 0 <= numerator. On platforms that don't support the __uint128_t // type numerator should also be less than 2^32-1. EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T divide(const T numerator) const { eigen_assert(static_cast<typename UnsignedTraits<T>::type>(numerator) < NumTraits<UnsignedType>::highest()/2); //eigen_assert(numerator >= 0); // this is implicitly asserted by the line above UnsignedType t1 = muluh(multiplier, numerator); UnsignedType t = (static_cast<UnsignedType>(numerator) - t1) >> shift1; return (t1 + t) >> shift2; } private: typedef typename DividerTraits<T>::type UnsignedType; UnsignedType multiplier; int32_t shift1; int32_t shift2; }; // Optimized version for signed 32 bit integers. // Derived from Hacker's Delight. // Only works for divisors strictly greater than one template <> class TensorIntDivisor<int32_t, true> { public: EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE TensorIntDivisor() { magic = 0; shift = 0; } // Must have 2 <= divider EIGEN_DEVICE_FUNC TensorIntDivisor(int32_t divider) { eigen_assert(divider >= 2); calcMagic(divider); } EIGEN_DEVICE_FUNC EIGEN_ALWAYS_INLINE int divide(const int32_t n) const { #ifdef EIGEN_GPU_COMPILE_PHASE return (__umulhi(magic, n) >> shift); #elif defined(SYCL_DEVICE_ONLY) return (cl::sycl::mul_hi(magic, static_cast<uint32_t>(n)) >> shift); #else uint64_t v = static_cast<uint64_t>(magic) * static_cast<uint64_t>(n); return (static_cast<uint32_t>(v >> 32) >> shift); #endif } private: // Compute the magic numbers. See Hacker's Delight section 10 for an in // depth explanation. EIGEN_DEVICE_FUNC void calcMagic(int32_t d) { const unsigned two31 = 0x80000000; // 2**31. unsigned ad = d; unsigned t = two31 + (ad >> 31); unsigned anc = t - 1 - t%ad; // Absolute value of nc. int p = 31; // Init. p. unsigned q1 = two31/anc; // Init. q1 = 2**p/|nc|. unsigned r1 = two31 - q1*anc; // Init. r1 = rem(2**p, |nc|). unsigned q2 = two31/ad; // Init. q2 = 2**p/|d|. unsigned r2 = two31 - q2*ad; // Init. r2 = rem(2**p, |d|). unsigned delta = 0; do { p = p + 1; q1 = 2*q1; // Update q1 = 2**p/|nc|. r1 = 2*r1; // Update r1 = rem(2**p, |nc|). if (r1 >= anc) { // (Must be an unsigned q1 = q1 + 1; // comparison here). r1 = r1 - anc;} q2 = 2*q2; // Update q2 = 2**p/|d|. r2 = 2*r2; // Update r2 = rem(2**p, |d|). if (r2 >= ad) { // (Must be an unsigned q2 = q2 + 1; // comparison here). r2 = r2 - ad;} delta = ad - r2; } while (q1 < delta || (q1 == delta && r1 == 0)); magic = (unsigned)(q2 + 1); shift = p - 32; } uint32_t magic; int32_t shift; }; template <typename T, bool div_gt_one> static EIGEN_DEVICE_FUNC EIGEN_STRONG_INLINE T operator / (const T& numerator, const TensorIntDivisor<T, div_gt_one>& divisor) { return divisor.divide(numerator); } } // end namespace internal } // end namespace Eigen #endif // EIGEN_CXX11_TENSOR_TENSOR_INTDIV_H
{ "pile_set_name": "Github" }
<Type Name="SettingsGetMapping" FullName="GLib.SettingsGetMapping"> <TypeSignature Language="C#" Value="public delegate bool SettingsGetMapping(Variant value, IntPtr result);" /> <TypeSignature Language="ILAsm" Value=".class public auto ansi sealed SettingsGetMapping extends System.MulticastDelegate" /> <AssemblyInfo> <AssemblyName>gio-sharp</AssemblyName> </AssemblyInfo> <Base> <BaseTypeName>System.Delegate</BaseTypeName> </Base> <Parameters> <Parameter Name="value" Type="GLib.Variant" /> <Parameter Name="result" Type="System.IntPtr" /> </Parameters> <ReturnValue> <ReturnType>System.Boolean</ReturnType> </ReturnValue> <Docs> <param name="value">To be added.</param> <param name="result">To be added.</param> <summary>To be added.</summary> <returns>To be added.</returns> <remarks>To be added.</remarks> <since version="Gtk# 3.0" /> </Docs> </Type>
{ "pile_set_name": "Github" }
# set-value [![NPM version](https://img.shields.io/npm/v/set-value.svg?style=flat)](https://www.npmjs.com/package/set-value) [![NPM monthly downloads](https://img.shields.io/npm/dm/set-value.svg?style=flat)](https://npmjs.org/package/set-value) [![NPM total downloads](https://img.shields.io/npm/dt/set-value.svg?style=flat)](https://npmjs.org/package/set-value) [![Linux Build Status](https://img.shields.io/travis/jonschlinkert/set-value.svg?style=flat&label=Travis)](https://travis-ci.org/jonschlinkert/set-value) > Create nested values and any intermediaries using dot notation (`'a.b.c'`) paths. ## Install Install with [npm](https://www.npmjs.com/): ```sh $ npm install --save set-value ``` ## Usage ```js var set = require('set-value'); set(object, prop, value); ``` ### Params * `object` **{object}**: The object to set `value` on * `prop` **{string}**: The property to set. Dot-notation may be used. * `value` **{any}**: The value to set on `object[prop]` ## Examples Updates and returns the given object: ```js var obj = {}; set(obj, 'a.b.c', 'd'); console.log(obj); //=> { a: { b: { c: 'd' } } } ``` ### Escaping **Escaping with backslashes** Prevent set-value from splitting on a dot by prefixing it with backslashes: ```js console.log(set({}, 'a\\.b.c', 'd')); //=> { 'a.b': { c: 'd' } } console.log(set({}, 'a\\.b\\.c', 'd')); //=> { 'a.b.c': 'd' } ``` **Escaping with double-quotes or single-quotes** Wrap double or single quotes around the string, or part of the string, that should not be split by set-value: ```js console.log(set({}, '"a.b".c', 'd')); //=> { 'a.b': { c: 'd' } } console.log(set({}, "'a.b'.c", "d")); //=> { 'a.b': { c: 'd' } } console.log(set({}, '"this/is/a/.file.path"', 'd')); //=> { 'this/is/a/file.path': 'd' } ``` ### Bracket support set-value does not split inside brackets or braces: ```js console.log(set({}, '[a.b].c', 'd')); //=> { '[a.b]': { c: 'd' } } console.log(set({}, "(a.b).c", "d")); //=> { '(a.b)': { c: 'd' } } console.log(set({}, "<a.b>.c", "d")); //=> { '<a.b>': { c: 'd' } } console.log(set({}, "{a..b}.c", "d")); //=> { '{a..b}': { c: 'd' } } ``` ## History ### v2.0.0 * Adds support for escaping with double or single quotes. See [escaping](#escaping) for examples. * Will no longer split inside brackets or braces. See [bracket support](#bracket-support) for examples. If there are any regressions please create a [bug report](../../issues/new). Thanks! ## About ### Related projects * [assign-value](https://www.npmjs.com/package/assign-value): Assign a value or extend a deeply nested property of an object using object path… [more](https://github.com/jonschlinkert/assign-value) | [homepage](https://github.com/jonschlinkert/assign-value "Assign a value or extend a deeply nested property of an object using object path notation.") * [get-value](https://www.npmjs.com/package/get-value): Use property paths (`a.b.c`) to get a nested value from an object. | [homepage](https://github.com/jonschlinkert/get-value "Use property paths (`a.b.c`) to get a nested value from an object.") * [has-value](https://www.npmjs.com/package/has-value): Returns true if a value exists, false if empty. Works with deeply nested values using… [more](https://github.com/jonschlinkert/has-value) | [homepage](https://github.com/jonschlinkert/has-value "Returns true if a value exists, false if empty. Works with deeply nested values using object paths.") * [merge-value](https://www.npmjs.com/package/merge-value): Similar to assign-value but deeply merges object values or nested values using object path/dot notation. | [homepage](https://github.com/jonschlinkert/merge-value "Similar to assign-value but deeply merges object values or nested values using object path/dot notation.") * [omit-value](https://www.npmjs.com/package/omit-value): Omit properties from an object or deeply nested property of an object using object path… [more](https://github.com/jonschlinkert/omit-value) | [homepage](https://github.com/jonschlinkert/omit-value "Omit properties from an object or deeply nested property of an object using object path notation.") * [set-value](https://www.npmjs.com/package/set-value): Create nested values and any intermediaries using dot notation (`'a.b.c'`) paths. | [homepage](https://github.com/jonschlinkert/set-value "Create nested values and any intermediaries using dot notation (`'a.b.c'`) paths.") * [union-value](https://www.npmjs.com/package/union-value): Set an array of unique values as the property of an object. Supports setting deeply… [more](https://github.com/jonschlinkert/union-value) | [homepage](https://github.com/jonschlinkert/union-value "Set an array of unique values as the property of an object. Supports setting deeply nested properties using using object-paths/dot notation.") * [unset-value](https://www.npmjs.com/package/unset-value): Delete nested properties from an object using dot notation. | [homepage](https://github.com/jonschlinkert/unset-value "Delete nested properties from an object using dot notation.") ### Contributing Pull requests and stars are always welcome. For bugs and feature requests, [please create an issue](../../issues/new). ### Contributors | **Commits** | **Contributor** | | --- | --- | | 59 | [jonschlinkert](https://github.com/jonschlinkert) | | 1 | [vadimdemedes](https://github.com/vadimdemedes) | | 1 | [wtgtybhertgeghgtwtg](https://github.com/wtgtybhertgeghgtwtg) | ### Building docs _(This project's readme.md is generated by [verb](https://github.com/verbose/verb-generate-readme), please don't edit the readme directly. Any changes to the readme must be made in the [.verb.md](.verb.md) readme template.)_ To generate the readme, run the following command: ```sh $ npm install -g verbose/verb#dev verb-generate-readme && verb ``` ### Running tests Running and reviewing unit tests is a great way to get familiarized with a library and its API. You can install dependencies and run tests with the following command: ```sh $ npm install && npm test ``` ### Author **Jon Schlinkert** * [github/jonschlinkert](https://github.com/jonschlinkert) * [twitter/jonschlinkert](https://twitter.com/jonschlinkert) ### License Copyright © 2017, [Jon Schlinkert](https://github.com/jonschlinkert). Released under the [MIT License](LICENSE). *** _This file was generated by [verb-generate-readme](https://github.com/verbose/verb-generate-readme), v0.6.0, on June 21, 2017._
{ "pile_set_name": "Github" }
"use strict"; /** * @author Jonathan Casarrubias * @module BootScript Decorator * @license MIT * @description * This decorator will return boot script instances * This avoids the need of creating static bootscripts **/ function BootScript() { function f(target) { function BootScriptInstance(reference) { return new target(reference); } return BootScriptInstance; } return f; } exports.BootScript = BootScript; //# sourceMappingURL=C:/Users/a-jimenez/mean-expert/fireloop.io/core/src/decorators/boot-script/index.js.map
{ "pile_set_name": "Github" }
"""This is a sample module that doesn't really test anything all that interesting. It simply has a few tests, some of which succeed and some of which fail. It's important that the numbers remain constant as another test is testing the running of these tests. >>> 2+2 4 """ def foo(): """ >>> 2+2 5 >>> 2+2 4 """ def bar(): """ >>> 2+2 4 """ def test_silly_setup(): """ >>> import test.test_doctest >>> test.test_doctest.sillySetup True """ def w_blank(): """ >>> if 1: ... print 'a' ... print ... print 'b' a <BLANKLINE> b """ x = 1 def x_is_one(): """ >>> x 1 """ def y_is_one(): """ >>> y 1 """ __test__ = {'good': """ >>> 42 42 """, 'bad': """ >>> 42 666 """, } def test_suite(): import doctest return doctest.DocTestSuite()
{ "pile_set_name": "Github" }
/* -*- indent-tabs-mode: nil; js-indent-level: 2 -*- */ /* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //----------------------------------------------------------------------------- var BUGNUMBER = 407727; var summary = 'let Object global redeclaration'; var actual = ''; var expect = 1; printBugNumber(BUGNUMBER); printStatus (summary); let Object = 1; actual = Object; reportCompare(expect, actual, summary);
{ "pile_set_name": "Github" }
package agents import ( "errors" "github.com/TeaWeb/build/internal/teautils" "github.com/iwind/TeaGo/maps" "github.com/iwind/TeaGo/types" "github.com/robertkrimen/otto" "strings" ) // 使用某个参数执行数值运算,使用Javascript语法 func EvalParam(param string, value interface{}, old interface{}, varMapping maps.Map, supportsMath bool) (string, error) { if old == nil { old = value } var resultErr error = nil paramValue := RegexpParamNamedVariable.ReplaceAllStringFunc(param, func(s string) string { varName := s[2 : len(s)-1] funcExpr := "" index := strings.Index(varName, "|") if index > -1 { funcExpr = varName[index+1:] varName = strings.TrimSpace(varName[:index]) } result, err := evalVarName(varName, value, old, varMapping, supportsMath) if err != nil { resultErr = err } else if len(funcExpr) > 0 { result, err = RunFuncExpr(result, []byte(funcExpr)) } return types.String(result) }) // 支持加、减、乘、除、余 if len(paramValue) > 0 { if supportsMath && strings.ContainsAny(param, "+-*/%") { vm := otto.New() v, err := vm.Run(paramValue) if err != nil { return "", errors.New("\"" + param + "\": eval \"" + paramValue + "\":" + err.Error()) } else { paramValue = v.String() } } // javascript if strings.HasPrefix(paramValue, "javascript:") { vm := otto.New() v, err := vm.Run(paramValue[len("javascript:"):]) if err != nil { return "", errors.New("\"" + param + "\": eval \"" + paramValue + "\":" + err.Error()) } else { paramValue = v.String() } } } return paramValue, resultErr } func evalVarName(varName string, value interface{}, old interface{}, varMapping maps.Map, supportsMath bool) (interface{}, error) { // 从varMapping中查找 if varMapping != nil { index := strings.Index(varName, ".") var firstKey = varName if index > 0 { firstKey = varName[:index] } firstKey = strings.TrimSpace(firstKey) if varMapping.Has(firstKey) { keys := strings.Split(varName, ".") for index, key := range keys { keys[index] = strings.TrimSpace(key) } result := teautils.Get(varMapping, keys) if result == nil { return "", nil } return result, nil } } if value == nil { return nil, nil } // 支持${OLD}和${OLD.xxx} if varName == "OLD" { return evalVarName("0", old, nil, nil, supportsMath) } else if strings.HasPrefix(varName, "OLD.") { return evalVarName(varName[4:], old, nil, nil, supportsMath) } switch v := value.(type) { case string: if varName == "0" { return v, nil } return "", nil case int8, int16, int, int32, int64, uint8, uint16, uint, uint32, uint64: if varName == "0" { return v, nil } return 0, nil case float32, float64: if varName == "0" { return v, nil } return 0, nil case bool: if varName == "0" { return v, nil } return false, nil default: if types.IsSlice(value) || types.IsMap(value) { keys := strings.Split(varName, ".") for index, key := range keys { keys[index] = strings.TrimSpace(key) } return teautils.Get(v, keys), nil } } return "${" + varName + "}", nil }
{ "pile_set_name": "Github" }
using System.Reflection; using System.Runtime.CompilerServices; using System.Runtime.InteropServices; // General Information about an assembly is controlled through the following // set of attributes. Change these attribute values to modify the information // associated with an assembly. [assembly: AssemblyTitle("EntityFilters.MultiTenant")] [assembly: AssemblyDescription("")] [assembly: AssemblyConfiguration("")] [assembly: AssemblyCompany("")] [assembly: AssemblyProduct("EntityFilters.MultiTenant")] [assembly: AssemblyCopyright("Copyright © 2017")] [assembly: AssemblyTrademark("")] [assembly: AssemblyCulture("")] // Setting ComVisible to false makes the types in this assembly not visible // to COM components. If you need to access a type in this assembly from // COM, set the ComVisible attribute to true on that type. [assembly: ComVisible(false)] // The following GUID is for the ID of the typelib if this project is exposed to COM [assembly: Guid("46cff2ac-57cc-48bc-b8dc-b874dbfdba05")] // Version information for an assembly consists of the following four values: // // Major Version // Minor Version // Build Number // Revision // // You can specify all the values or you can default the Build and Revision Numbers // by using the '*' as shown below: // [assembly: AssemblyVersion("1.0.*")] [assembly: AssemblyVersion("1.0.0.0")] [assembly: AssemblyFileVersion("1.0.0.0")]
{ "pile_set_name": "Github" }
/* * Licensed to the Apache Software Foundation (ASF) under one * or more contributor license agreements. See the NOTICE file * distributed with this work for additional information * regarding copyright ownership. The ASF licenses this file * to you under the Apache License, Version 2.0 (the * "License"); you may not use this file except in compliance * with the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ package org.apache.flink.runtime.rest.handler; import org.apache.flink.runtime.rest.handler.util.HandlerUtils; import org.apache.flink.runtime.rest.messages.ErrorResponseBody; import org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandler; import org.apache.flink.shaded.netty4.io.netty.channel.ChannelHandlerContext; import org.apache.flink.shaded.netty4.io.netty.channel.SimpleChannelInboundHandler; import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpRequest; import org.apache.flink.shaded.netty4.io.netty.handler.codec.http.HttpResponseStatus; import org.slf4j.Logger; import java.util.Collections; import java.util.Map; import static java.util.Objects.requireNonNull; /** * This is the last handler in the pipeline. It logs all error messages. */ @ChannelHandler.Sharable public class PipelineErrorHandler extends SimpleChannelInboundHandler<HttpRequest> { /** The logger to which the handler writes the log statements. */ private final Logger logger; private final Map<String, String> responseHeaders; public PipelineErrorHandler(Logger logger, final Map<String, String> responseHeaders) { this.logger = requireNonNull(logger); this.responseHeaders = requireNonNull(responseHeaders); } @Override protected void channelRead0(ChannelHandlerContext ctx, HttpRequest message) { // we can't deal with this message. No one in the pipeline handled it. Log it. logger.warn("Unknown message received: {}", message); HandlerUtils.sendErrorResponse( ctx, message, new ErrorResponseBody("Bad request received."), HttpResponseStatus.BAD_REQUEST, Collections.emptyMap()); } @Override public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) { logger.warn("Unhandled exception", cause); HandlerUtils.sendErrorResponse( ctx, false, new ErrorResponseBody("Internal server error: " + cause.getMessage()), HttpResponseStatus.INTERNAL_SERVER_ERROR, responseHeaders); } }
{ "pile_set_name": "Github" }
/* =========== Module Settings Styling =========== */ .dnnModuleSettings { margin: 2em auto; } .dnnModuleSettings div.msModuleSettings, .dnnModuleSettings div.msPermissions, .dnnModuleSettings div.msPageSettings, .dnnModuleSettings div.msSpecificSettings { padding: 0; } .dnnModuleSettings .dnnFormItem input[type="text"], .dnnModuleSettings .dnnFormItem .dnnFormInput, .dnnModuleSettings .dnnFormItem textarea, .dnnExport .dnnFormItem input[type="text"], .dnnExport .dnnFormItem .dnnFormInput, .dnnExport .dnnFormItem textarea { } .dnnModuleSettings .urlControlLinkType .dnnFormLabel { width: 100px; float: left; display: block; margin-top: 0; font-weight: normal; } .dnnModuleSettings .urlControlLinkType br, .dnnModuleSettings .dnnFormRadioButtons br { display: none; } .dnnModuleSettings .urlControlLinkType .ucLinkTypeRadioButtons { float: left; display: block; min-width: 350px; } .dnnModuleSettings .dnnFormRadioButtons { min-width: 350px; margin-top: 0 !important; } .dnnModuleSettings .urlControlLinkType label, .urlControlFileRow .dnnFormLabel { margin-top: 0; font-weight: normal; } .urlControl span.dnnFormLabel { width: 100px; text-align: right; display: block; float: left; margin: 8px 14px 0 0; } .dnnModuleSettings div.dnnmsStat { border-top: 1px #ccc solid; padding: 1em 0 4em; overflow: hidden; } .dnnmsStat p { float: left; padding-right: 2em; } p.dnnFormRequired span { border-left: 5px #F00 solid; padding-left: 0.5em; } .mspsContent .urlControl { overflow: hidden; } .mspsContent .urlControlLinkType { float: left; width: 100%; } .mspsContent .urlControlLinkType .dnnFormLabel { float: left; width: auto; } .mspsContent .urlControlFileRow a.dnnSecondaryAction { margin: 10px 10px 0 0; float: left; } .mspsContent .urlControlLinkType input[type="radio"] { width: auto; margin-right: 5px; } .dnnModuleSettings .dnnFormItem input[type="text"].msCacheDuration { width: 100px; min-width: 50px; } /* adjust form item padding/spacing */ .dnnFormItem label + input{ margin-left: 15px; } .dnnFormItem .dnnLeft{ width: 60%; margin: 6px 0 15px 0; } .dnnFormItem > textarea{ margin-top:5px; } span.labelCacheInherited{ display: block !important; background-color: rgba(255, 0, 0, 0.15); border-color: rgba(255, 0, 0, 0.2); padding: 8px; float: left; width: 100px !important; } .tabVisibilityRow span.dnnFormLabel { width: 20%; display: block; float: left; text-align: right; margin:0 12px 0 0 ; font-weight: normal; } .tabVisibilityRowItem{ width: 100%; clear: both; margin-bottom: 8px; } .dnnContainerPreview .dnnLabel{ float: left; } .dnnContainerPreview .selectize-control{ float: left; margin-right: 15px; } .dnnContainerPreview .dnnSecondaryAction{ float: left; } .dnnCacheSettings .dnnLabel{ float: left; } .dnnCacheSettings .RadComboBox_Default{ width: 200px !important; float: left; margin-right: 15px; } /* =========== End Module Settings Styling =========== */
{ "pile_set_name": "Github" }